code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2009 Ben Duffield
# Copyright (c) 2010 aldo
# Copyright (c) 2010-2012 roger
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011-2015 Tycho Andersen
# Copyright (c) 2012-2013 dequis
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Filipe Nepomuceno
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
from .. import bar, hook
from . import base
from typing import Any, List, Tuple # noqa: F401
class _GroupBase(base._TextBox, base.PaddingMixin, base.MarginMixin):
defaults = [
("borderwidth", 3, "Current group border width"),
("center_aligned", True, "center-aligned group box"),
] # type: List[Tuple[str, Any, str]]
def __init__(self, **config):
base._TextBox.__init__(self, width=bar.CALCULATED, **config)
self.add_defaults(_GroupBase.defaults)
self.add_defaults(base.PaddingMixin.defaults)
self.add_defaults(base.MarginMixin.defaults)
def box_width(self, groups):
width, _ = self.drawer.max_layout_size(
[i.label for i in groups],
self.font,
self.fontsize
)
return width + self.padding_x * 2 + self.borderwidth * 2
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
if self.fontsize is None:
calc = self.bar.height - self.margin_y * 2 - \
self.borderwidth * 2 - self.padding_y * 2
self.fontsize = max(calc, 1)
self.layout = self.drawer.textlayout(
"",
"ffffff",
self.font,
self.fontsize,
self.fontshadow
)
self.setup_hooks()
def setup_hooks(self):
def hook_response(*args, **kwargs):
self.bar.draw()
hook.subscribe.client_managed(hook_response)
hook.subscribe.client_urgent_hint_changed(hook_response)
hook.subscribe.client_killed(hook_response)
hook.subscribe.setgroup(hook_response)
hook.subscribe.group_window_add(hook_response)
hook.subscribe.current_screen_change(hook_response)
hook.subscribe.changegroup(hook_response)
def drawbox(self, offset, text, bordercolor, textcolor, highlight_color=None,
width=None, rounded=False, block=False, line=False, highlighted=False):
self.layout.text = text
self.layout.font_family = self.font
self.layout.font_size = self.fontsize
self.layout.colour = textcolor
if width is not None:
self.layout.width = width
if line:
pad_y = [
(self.bar.height - self.layout.height - self.borderwidth) / 2,
(self.bar.height - self.layout.height + self.borderwidth) / 2
]
else:
pad_y = self.padding_y
framed = self.layout.framed(
self.borderwidth,
bordercolor,
0,
pad_y,
highlight_color
)
y = self.margin_y
if self.center_aligned:
for t in base.MarginMixin.defaults:
if t[0] == 'margin':
y += (self.bar.height - framed.height) / 2 - t[1]
break
if block:
framed.draw_fill(offset, y, rounded)
elif line:
framed.draw_line(offset, y, highlighted)
else:
framed.draw(offset, y, rounded)
class AGroupBox(_GroupBase):
"""A widget that graphically displays the current group"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [("border", "000000", "group box border color")]
def __init__(self, **config):
_GroupBase.__init__(self, **config)
self.add_defaults(AGroupBox.defaults)
def button_press(self, x, y, button):
self.bar.screen.cmd_next_group()
def calculate_length(self):
return self.box_width(self.qtile.groups) + self.margin_x * 2
def draw(self):
self.drawer.clear(self.background or self.bar.background)
e = next(
i for i in self.qtile.groups
if i.name == self.bar.screen.group.name
)
self.drawbox(self.margin_x, e.name, self.border, self.foreground)
self.drawer.draw(offsetx=self.offset, width=self.width)
class GroupBox(_GroupBase):
"""
A widget that graphically displays the current group.
All groups are displayed by their label.
If the label of a group is the empty string that group will not be displayed.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("active", "FFFFFF", "Active group font colour"),
("inactive", "404040", "Inactive group font colour"),
(
"highlight_method",
"border",
"Method of highlighting ('border', 'block', 'text', or 'line')"
"Uses `*_border` color settings"
),
("rounded", True, "To round or not to round box borders"),
(
"this_current_screen_border",
"215578",
"Border or line colour for group on this screen when focused."
),
(
"this_screen_border",
"215578",
"Border or line colour for group on this screen when unfocused."
),
(
"other_current_screen_border",
"404040",
"Border or line colour for group on other screen when focused."
),
(
"other_screen_border",
"404040",
"Border or line colour for group on other screen when unfocused."
),
(
"highlight_color",
["000000", "282828"],
"Active group highlight color when using 'line' highlight method."
),
(
"urgent_alert_method",
"border",
"Method for alerting you of WM urgent "
"hints (one of 'border', 'text', 'block', or 'line')"
),
("urgent_text", "FF0000", "Urgent group font color"),
("urgent_border", "FF0000", "Urgent border or line color"),
(
"disable_drag",
False,
"Disable dragging and dropping of group names on widget"
),
("invert_mouse_wheel", False, "Whether to invert mouse wheel group movement"),
("use_mouse_wheel", True, "Whether to use mouse wheel events"),
(
"visible_groups",
None,
"Groups that will be visible. "
"If set to None or [], all groups will be visible."
"Visible groups are identified by name not by their displayed label."
),
(
"hide_unused",
False,
"Hide groups that have no windows and that are not displayed on any screen."
),
(
"spacing",
None,
"Spacing between groups"
"(if set to None, will be equal to margin_x)")
]
def __init__(self, **config):
_GroupBase.__init__(self, **config)
self.add_defaults(GroupBox.defaults)
if self.spacing is None:
self.spacing = self.margin_x
self.clicked = None
@property
def groups(self):
"""
returns list of visible groups.
The existing groups are filtered by the visible_groups attribute and
their label. Groups with an empty string as label are never contained.
Groups that are not named in visible_groups are not returned.
"""
if self.hide_unused:
if self.visible_groups:
return [g for g in self.qtile.groups
if g.label and (g.windows or g.screen) and
g.name in self.visible_groups]
else:
return [g for g in self.qtile.groups if g.label and
(g.windows or g.screen)]
else:
if self.visible_groups:
return [g for g in self.qtile.groups
if g.label and g.name in self.visible_groups]
else:
return [g for g in self.qtile.groups if g.label]
def get_clicked_group(self, x, y):
group = None
new_width = self.margin_x - self.spacing / 2.0
width = 0
for g in self.groups:
new_width += self.box_width([g]) + self.spacing
if width <= x <= new_width:
group = g
break
width = new_width
return group
def button_press(self, x, y, button):
self.clicked = None
group = None
current_group = self.qtile.current_group
if button == (5 if not self.invert_mouse_wheel else 4):
if self.use_mouse_wheel:
i = itertools.cycle(self.qtile.groups)
while next(i) != current_group:
pass
while group is None or group not in self.groups:
group = next(i)
elif button == (4 if not self.invert_mouse_wheel else 5):
if self.use_mouse_wheel:
i = itertools.cycle(reversed(self.qtile.groups))
while next(i) != current_group:
pass
while group is None or group not in self.groups:
group = next(i)
else:
group = self.get_clicked_group(x, y)
if not self.disable_drag:
self.clicked = group
if group:
self.bar.screen.set_group(group)
def button_release(self, x, y, button):
if button not in (5, 4):
group = self.get_clicked_group(x, y)
if group and self.clicked:
group.cmd_switch_groups(self.clicked.name)
self.clicked = None
def calculate_length(self):
width = self.margin_x * 2 + (len(self.groups) - 1) * self.spacing
for g in self.groups:
width += self.box_width([g])
return width
def group_has_urgent(self, group):
return len([w for w in group.windows if w.urgent]) > 0
def draw(self):
self.drawer.clear(self.background or self.bar.background)
offset = self.margin_x
for i, g in enumerate(self.groups):
to_highlight = False
is_block = (self.highlight_method == 'block')
is_line = (self.highlight_method == 'line')
bw = self.box_width([g])
if self.group_has_urgent(g) and self.urgent_alert_method == "text":
text_color = self.urgent_text
elif g.windows:
text_color = self.active
else:
text_color = self.inactive
if g.screen:
if self.highlight_method == 'text':
border = self.bar.background
text_color = self.this_current_screen_border
else:
if self.bar.screen.group.name == g.name:
if self.qtile.current_screen == self.bar.screen:
border = self.this_current_screen_border
to_highlight = True
else:
border = self.this_screen_border
else:
if self.qtile.current_screen == g.screen:
border = self.other_current_screen_border
else:
border = self.other_screen_border
elif self.group_has_urgent(g) and \
self.urgent_alert_method in ('border', 'block', 'line'):
border = self.urgent_border
if self.urgent_alert_method == 'block':
is_block = True
elif self.urgent_alert_method == 'line':
is_line = True
else:
border = self.background or self.bar.background
self.drawbox(
offset,
g.label,
border,
text_color,
highlight_color=self.highlight_color,
width=bw,
rounded=self.rounded,
block=is_block,
line=is_line,
highlighted=to_highlight
)
offset += bw + self.spacing
self.drawer.draw(offsetx=self.offset, width=self.width)
|
soulchainer/qtile
|
libqtile/widget/groupbox.py
|
Python
|
mit
| 13,561
|
import datetime
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template
class TimeTopicPlugin(WillPlugin):
# Disabled for now.
# @periodic(minute='0')
def set_topic_time(self):
now_pst = datetime.datetime.now()
now_bcn = now_pst + datetime.timedelta(hours=9)
topic = "PST: %s, Paris: %s" % (now_pst.strftime("%a %I:%M %p"), now_bcn.strftime("%a %I:%M %p"))
self.set_topic(topic)
@respond_to("^time$")
def tell_times(self, message):
now_pst = datetime.datetime.now()
now_bcn = now_pst + datetime.timedelta(hours=9)
topic = "PST: %s, Paris: %s" % (now_pst.strftime("%a %I:%M %p"), now_bcn.strftime("%a %I:%M %p"))
self.reply(message, topic)
|
skoczen/my-will
|
plugins/time_topic.py
|
Python
|
mit
| 799
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
def load_libnat():
try:
from fairseq import libnat_cuda
return libnat_cuda, True
except ImportError as e:
print(str(e) + "... fall back to CPU version")
try:
from fairseq import libnat
return libnat, False
except ImportError as e:
import sys
sys.stderr.write(
"ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n"
)
raise e
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
libnat, use_cuda = load_libnat()
def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels(
out_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
masked_tgt_masks = masked_tgt_masks.bool() & out_masks
mask_ins_targets = mask_ins_targets.type_as(in_tokens)[
:, 1 : in_masks.size(1)
].masked_fill_(~in_masks[:, 1:], 0)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx):
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
mask_inputs = [
[len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels
]
# generate labels
masked_tgt_masks = []
for mask_input in mask_inputs:
mask_label = []
for beam_size in mask_input[1:-1]: # HACK 1:-1
mask_label += [0] + [1 for _ in range(beam_size)]
masked_tgt_masks.append(
mask_label + [0 for _ in range(out_seq_len - len(mask_label))]
)
mask_ins_targets = [
mask_input[1:-1]
+ [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))]
for mask_input in mask_inputs
]
# transform to tensor
masked_tgt_masks = torch.tensor(
masked_tgt_masks, device=out_tokens.device
).bool()
mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
if use_cuda:
return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx)
return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
def _get_del_targets(in_tokens, out_tokens, padding_idx):
libnat, use_cuda = load_libnat()
def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
word_del_targets = libnat.generate_deletion_labels(
in_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(
~in_masks, 0
)
return word_del_targets
def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx):
out_seq_len = out_tokens.size(1)
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
word_del_targets = [b[-1] for b in full_labels]
word_del_targets = [
labels + [0 for _ in range(out_seq_len - len(labels))]
for labels in word_del_targets
]
# transform to tensor
word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device)
return word_del_targets
if use_cuda:
return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx)
return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx)
def _apply_ins_masks(
in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx
):
in_masks = in_tokens.ne(padding_idx)
in_lengths = in_masks.sum(1)
# HACK: hacky way to shift all the paddings to eos first.
in_tokens.masked_fill_(~in_masks, eos_idx)
mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0)
out_lengths = in_lengths + mask_ins_pred.sum(1)
out_max_len = out_lengths.max()
out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None]
reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1)
out_tokens = (
in_tokens.new_zeros(in_tokens.size(0), out_max_len)
.fill_(padding_idx)
.masked_fill_(out_masks, unk_idx)
)
out_tokens[:, 0] = in_tokens[:, 0]
out_tokens.scatter_(1, reordering, in_tokens[:, 1:])
out_scores = None
if in_scores is not None:
in_scores.masked_fill_(~in_masks, 0)
out_scores = in_scores.new_zeros(*out_tokens.size())
out_scores[:, 0] = in_scores[:, 0]
out_scores.scatter_(1, reordering, in_scores[:, 1:])
return out_tokens, out_scores
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx):
word_ins_masks = in_tokens.eq(unk_idx)
out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks])
if in_scores is not None:
out_scores = in_scores.masked_scatter(
word_ins_masks, word_ins_scores[word_ins_masks]
)
else:
out_scores = None
return out_tokens, out_scores
def _apply_del_words(
in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx
):
# apply deletion to a tensor
in_masks = in_tokens.ne(padding_idx)
bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx)
max_len = in_tokens.size(1)
word_del_pred.masked_fill_(~in_masks, 1)
word_del_pred.masked_fill_(bos_eos_masks, 0)
reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1]
out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering)
out_scores = None
if in_scores is not None:
out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering)
out_attn = None
if in_attn is not None:
_mask = word_del_pred[:, :, None].expand_as(in_attn)
_reordering = reordering[:, :, None].expand_as(in_attn)
out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering)
return out_tokens, out_scores, out_attn
def _skip(x, mask):
"""
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
"""
if isinstance(x, int):
return x
if x is None:
return None
if isinstance(x, torch.Tensor):
if x.size(0) == mask.size(0):
return x[mask]
elif x.size(1) == mask.size(0):
return x[:, mask]
if isinstance(x, list):
return [_skip(x_i, mask) for x_i in x]
if isinstance(x, dict):
return {k: _skip(v, mask) for k, v in x.items()}
raise NotImplementedError
def _skip_encoder_out(encoder, encoder_out, mask):
if not mask.any():
return encoder_out
else:
return encoder.reorder_encoder_out(
encoder_out, mask.nonzero(as_tuple=False).squeeze()
)
def _fill(x, mask, y, padding_idx):
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None:
return y
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
dims = [x.size(0), y.size(1) - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = padding_idx
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
|
pytorch/fairseq
|
fairseq/models/nat/levenshtein_utils.py
|
Python
|
mit
| 9,508
|
'''
@author: Sergio Rojas
@contact: rr.sergio@gmail.com
--------------------------
Contenido bajo
Atribución-NoComercial-CompartirIgual 3.0 Venezuela (CC BY-NC-SA 3.0 VE)
http://creativecommons.org/licenses/by-nc-sa/3.0/ve/
Creado en abril 21, 2016
'''
def fog(g,x):
return ((g(x))**2 + 3)
def g(x):
return (6.0*x**2 + 5.0*x + 8.0)
x = 0
print(fog(g,x))
|
rojassergio/Aprendiendo-a-programar-en-Python-con-mi-computador
|
Programas_Capitulo_05/Cap05_pagina_115.py
|
Python
|
mit
| 369
|
from office365.runtime.client_value import ClientValue
class SPSiteCreationRequest(ClientValue):
def __init__(self, title, url, owner=None):
super(SPSiteCreationRequest, self).__init__()
self.Title = title
self.Url = url
self.WebTemplate = "SITEPAGEPUBLISHING#0"
self.Owner = owner
self.Lcid = 1033
self.ShareByEmailEnabled = False
self.Classification = ""
self.Description = ""
self.SiteDesignId = "00000000-0000-0000-0000-000000000000"
self.HubSiteId = "00000000-0000-0000-0000-000000000000"
self.WebTemplateExtensionId = "00000000-0000-0000-0000-000000000000"
@property
def entity_type_name(self):
return "Microsoft.SharePoint.Portal.SPSiteCreationRequest"
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/portal/site_creation_request.py
|
Python
|
mit
| 780
|
import mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
def ClearLandscape():
# set air layer
# (x1, y1, z1, x2, y2, z2, blockID, blockState)
mc.setBlocks(-248, 1, -248, 248, 248, 248, 0)
# set layer of podzal
mc.setBlocks(-248, 0, 0, 248, -3, 248, 3, 2)
# set layer of mushrooms
# mc.setBlocks(-248, 0, -248, 248, 0, 248, 40)
mc.postToChat("Altering landscape...")
ClearLandscape()
|
cssidy/minecraft-hacks
|
building/mushroomFields.py
|
Python
|
mit
| 414
|
#!/usr/bin/python3
import pygame
import os
import random
from Target import *
class Train(pygame.sprite.Sprite, Target):
maxWait = 30 # 30# maximum time we can wait for new train after old one is gone
def __init__(self, FPS):
pygame.sprite.Sprite.__init__(self)
Target.__init__(self, True,'train')
self.image= pygame.image.load(os.path.join('Images','train.png')).convert_alpha()
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = Train.coord[0][0], Train.coord[0][1]
self.coord = Train.coord
self.moveStep = 0
self.speed = Train.getSpeed()
self.exits = True
Train.FPS = FPS
self.wait = Train.getWait()
self.targetName = 'train'
def getWait():
return int( random.random()*Train.FPS* Train.maxWait)
def kill(self):
self.wait = Train.getWait()
self.moveStep = 0
self.speed = Train.getSpeed()
def getSpeed():
return int(random.random()*3) + 1
def display(self, display):
display.blit(self.image, (self.rect.x, self.rect.y))
self.rect.x, self.rect.y = Train.coord[self.moveStep][0], Train.coord[self.moveStep][1]
def move(self):
#print(self.wait)
if self.wait < 0:
if self.moveStep < len(Train.coord) - self.speed:
self.moveStep += self.speed
else:
self.wait = Train.getWait()
self.moveStep = 0
self.speed = Train.getSpeed()
else:
self.wait -= 1
coord = [
[969, 133],
[968, 133],
[967, 133],
[966, 133],
[965, 133],
[964, 133],
[963, 133],
[962, 133],
[961, 133],
[960, 133],
[959, 133],
[958, 133],
[957, 133],
[956, 133],
[955, 133],
[954, 133],
[953, 133],
[952, 133],
[951, 133],
[950, 133],
[949, 133],
[948, 133],
[947, 133],
[946, 133],
[945, 133],
[944, 133],
[943, 133],
[942, 133],
[941, 133],
[940, 133],
[939, 133],
[938, 133],
[937, 133],
[936, 133],
[935, 133],
[934, 133],
[933, 133],
[932, 133],
[931, 133],
[930, 133],
[929, 133],
[928, 133],
[927, 133],
[926, 133],
[925, 133],
[924, 133],
[923, 133],
[922, 133],
[921, 133],
[920, 133],
[919, 133],
[918, 133],
[917, 133],
[916, 133],
[915, 133],
[914, 133],
[913, 133],
[912, 133],
[911, 133],
[910, 133],
[909, 133],
[908, 133],
[907, 133],
[906, 133],
[905, 133],
[904, 133],
[903, 133],
[902, 133],
[901, 133],
[900, 133],
[899, 133],
[898, 133],
[897, 133],
[896, 133],
[895, 133],
[894, 133],
[893, 133],
[892, 133],
[891, 133],
[890, 133],
[889, 133],
[888, 133],
[887, 133],
[886, 133],
[885, 133],
[884, 133],
[883, 133],
[882, 133],
[881, 133],
[880, 133],
[879, 133],
[878, 133],
[877, 133],
[876, 133],
[875, 133],
[874, 133],
[873, 133],
[872, 133],
[871, 133],
[870, 133],
[869, 133],
[868, 133],
[867, 133],
[866, 133],
[865, 133],
[864, 133],
[863, 133],
[862, 133],
[861, 133],
[860, 133],
[859, 133],
[858, 133],
[857, 133],
[856, 133],
[855, 133],
[854, 133],
[853, 133],
[852, 133],
[851, 133],
[850, 133],
[849, 133],
[848, 133],
[847, 133],
[846, 133],
[845, 132],
[844, 132],
[843, 132],
[842, 132],
[841, 132],
[840, 132],
[839, 132],
[838, 132],
[837, 132],
[836, 132],
[835, 132],
[834, 132],
[833, 132],
[832, 132],
[831, 132],
[830, 132],
[829, 132],
[828, 132],
[827, 132],
[826, 131],
[825, 131],
[824, 131],
[823, 131],
[822, 131],
[821, 131],
[820, 131],
[819, 131],
[818, 131],
[817, 131],
[816, 131],
[815, 131],
[814, 130],
[813, 130],
[812, 130],
[811, 130],
[810, 130],
[809, 130],
[808, 130],
[807, 130],
[806, 130],
[805, 130],
[804, 129],
[803, 129],
[802, 129],
[801, 129],
[800, 129],
[799, 129],
[798, 129],
[797, 129],
[796, 128],
[795, 128],
[794, 128],
[793, 128],
[792, 128],
[791, 128],
[790, 127],
[789, 127],
[788, 127],
[787, 127],
[786, 127],
[785, 127],
[784, 127],
[783, 126],
[782, 126],
[781, 126],
[780, 126],
[779, 126],
[778, 126],
[777, 125],
[776, 125],
[775, 125],
[774, 125],
[773, 125],
[772, 124],
[771, 124],
[770, 124],
[769, 124],
[768, 124],
[767, 123],
[766, 123],
[765, 123],
[764, 123],
[763, 123],
[762, 122],
[761, 122],
[760, 122],
[759, 122],
[758, 122],
[757, 121],
[756, 121],
[755, 121],
[754, 121],
[753, 120],
[752, 120],
[751, 120],
[750, 120],
[749, 119],
[748, 119],
[747, 119],
[746, 119],
[745, 118],
[744, 118],
[743, 118],
[742, 118],
[741, 117],
[740, 117],
[739, 117],
[738, 117],
[737, 116],
[736, 116],
[735, 116],
[734, 116],
[733, 115],
[732, 115],
[731, 115],
[730, 114],
[729, 114],
[728, 114],
[727, 113],
[726, 113],
[725, 113],
[724, 112],
[723, 112],
[722, 112],
[721, 112],
[720, 111],
[719, 111],
[718, 111],
[717, 110],
[716, 110],
[715, 110],
[714, 109],
[713, 109],
[712, 109],
[711, 108],
[710, 108],
[709, 108],
[708, 107],
[707, 107],
[706, 107],
[705, 106],
[704, 106],
[703, 105],
[702, 105],
[701, 105],
[700, 104],
[699, 104],
[698, 104],
[697, 103],
[696, 103],
[695, 103],
[694, 102],
[693, 102],
[692, 101],
[691, 101],
[690, 100],
[689, 100],
[688, 99],
[687, 99],
[686, 99],
[685, 98],
[684, 98],
[683, 97],
[682, 97],
[681, 96],
[680, 96],
[679, 95],
[678, 95],
[677, 95],
[676, 94],
[675, 93],
[674, 93],
[673, 92],
[672, 92],
[671, 91],
[670, 91],
[669, 90],
[668, 90],
[667, 89],
[666, 88],
[665, 88],
[664, 87],
[663, 87],
[662, 86],
[661, 85],
[660, 85],
[659, 84],
[658, 83],
[657, 83],
[656, 82],
[655, 81],
[654, 80],
[653, 80],
[652, 79],
[651, 78],
[650, 77],
[649, 76],
[648, 75],
[647, 75],
[646, 74],
[645, 73],
[644, 72],
[643, 71],
[642, 70],
[641, 69],
[640, 68],
[639, 67],
[638, 66],
[637, 65],
[636, 64],
[635, 63],
[634, 61],
[633, 60],
[632, 59],
[631, 58],
[630, 57],
[629, 55],
[628, 54],
[627, 53],
[626, 51],
[625, 50],
[624, 49],
[623, 47],
[622, 46],
[621, 44],
[620, 43],
[619, 41],
[618, 40],
[617, 38],
[616, 37],
[615, 35],
[614, 33],
[613, 32],
[612, 30],
[611, 29],
[610, 27],
[609, 25],
[608, 24],
[607, 22],
[606, 21],
[605, 19],
[604, 17],
[603, 16],
[602, 14],
[601, 13],
[600, 11],
[599, 10],
[598, 8],
[597, 6],
[596, 5],
[595, 3],
[594, 2],
[593, 1],
[592, -1],
[591, -2],
[590, -4],
[589, -5],
[588, -7],
[587, -8],
[586, -9],
[585, -11],
[584, -12],
[583, -13],
[582, -15],
[581, -16],
[580, -17],
[579, -18],
[578, -20],
[577, -21],
[576, -22],
[575, -23],
[574, -25],
[573, -26],
[572, -27],
[571, -28],
[570, -29],
[569, -30],
[568, -32],
[567, -33],
[566, -34],
[565, -35],
[564, -36],
[563, -37],
[562, -38],
[561, -39],
[560, -40],
[559, -41],
[558, -43],
[557, -44],
[556, -45],
[555, -46],
[554, -47],
[553, -48],
[552, -49],
[551, -50],
[550, -51],
[549, -52],
[548, -53],
[547, -54],
[546, -55],
[545, -55],
[544, -56],
[543, -57],
[542, -58],
[541, -59],
[540, -60],
[539, -61],
[538, -62],
[537, -63],
[536, -64],
[535, -65],
[534, -66],
[533, -66],
[532, -67],
[531, -68],
[530, -69],
[529, -70],
[528, -70],
[527, -71],
[526, -72],
[525, -73],
[524, -74],
[523, -75],
[522, -75],
[521, -76],
[520, -77],
[519, -78],
[518, -79],
[517, -79],
[516, -80],
[515, -81],
[514, -82],
[513, -82],
[512, -83],
[511, -84],
[510, -85],
[509, -85],
[508, -86],
[507, -87],
[506, -87],
[505, -88],
[504, -89],
[503, -90],
[502, -90],
[501, -91],
[500, -92],
[499, -92],
[498, -93],
[497, -94],
[496, -94],
[495, -95],
[494, -96],
[493, -96],
[492, -97],
[491, -98],
[490, -98],
[489, -99],
[488, -99],
[487, -100],
[486, -101],
[485, -101],
[484, -102],
[483, -103],
[482, -103],
[481, -104],
[480, -104],
[479, -105],
[478, -105],
[477, -106],
[476, -107],
[475, -107],
[474, -108],
[473, -108],
[472, -109],
[471, -109],
[470, -110],
[469, -111],
[468, -111],
[467, -112],
[466, -112],
[465, -113],
[464, -113],
[463, -114],
[462, -114],
[461, -115],
[460, -115],
[459, -116],
[458, -116],
[457, -117],
[456, -117],
[455, -118],
[454, -118],
[453, -119],
[452, -119],
[451, -120],
[450, -120],
[449, -121],
[448, -121],
[447, -122],
[446, -122],
[445, -122],
[444, -123],
[443, -123],
[442, -124],
[441, -124],
[440, -125],
[439, -125],
[438, -126],
[437, -126],
[436, -126],
[435, -127],
[434, -127],
[433, -128],
[432, -128],
[431, -128],
[430, -129],
[429, -129],
[428, -130],
[427, -130],
[426, -130],
[425, -131],
[424, -132],
[423, -133],
[422, -134],
[421, -135],
[420, -136],
[419, -137],
[418, -138],
[417, -139],
[416, -140],
[415, -141],
[414, -142],
[413, -143],
[412, -144],
[411, -145],
[410, -146],
[409, -147],
[408, -148],
[407, -149],
[406, -150],
[405, -151],
[404, -152],
[403, -153],
[402, -154],
[401, -155],
[400, -156],
[399, -157],
[398, -158],
[397, -159],
[396, -160],
[395, -161],
[394, -162],
[393, -163],
[392, -164],
[391, -165],
[390, -166],
[389, -167],
[388, -168],
[387, -169],
[386, -170]
]
|
kubapok/tank-game
|
Train.py
|
Python
|
mit
| 8,587
|
from urllib import request
from PyQt5.QtCore import QThread
class Downloader(QThread):
def __init__(self, wrapper, icon, path):
QThread.__init__(self)
self.wrapper = wrapper
self.icon = icon
self.path = path
def run(self):
try:
file_name, headers = request.urlretrieve(self.icon, self.path)
self.wrapper.icon = file_name
except:
pass
|
raelgc/scudcloud
|
scudcloud/downloader.py
|
Python
|
mit
| 429
|
from sqlalchemy import Column, ForeignKey, Integer, String, Float
from htsohm.db import Base
class GasLoading(Base):
__tablename__ = "gas_loadings"
id = Column(Integer, primary_key=True)
# relationship with `materials`
material_id = Column(Integer, ForeignKey("materials.id"))
# simulation input
adsorbate = Column(String(16))
pressure = Column(Float)
temperature = Column(Float)
# simulation output
absolute_volumetric_loading = Column(Float)
absolute_gravimetric_loading = Column(Float)
absolute_molar_loading = Column(Float)
excess_volumetric_loading = Column(Float)
excess_gravimetric_loading = Column(Float)
excess_molar_loading = Column(Float)
host_host_avg = Column(Float)
host_host_vdw = Column(Float)
host_host_cou = Column(Float)
adsorbate_adsorbate_avg = Column(Float)
adsorbate_adsorbate_vdw = Column(Float)
adsorbate_adsorbate_cou = Column(Float)
host_adsorbate_avg = Column(Float)
host_adsorbate_vdw = Column(Float)
host_adsorbate_cou = Column(Float)
# bin
bin_value = Column(Integer)
|
akaija/HTSOHM-dev
|
htsohm/db/gas_loading.py
|
Python
|
mit
| 1,295
|
import os
ROOT = '/sdcard/realdata/'
SEGMENT_LENGTH = 60
|
heidecjj/openpilot
|
selfdrive/loggerd/config.py
|
Python
|
mit
| 58
|
from office365.runtime.client_value import ClientValue
class SecondaryAdministratorsInfo(ClientValue):
def __init__(self, email=None, loginName=None, userPrincipalName=None):
"""
:param str email:
:param str loginName:
:param str userPrincipalName:
"""
super(SecondaryAdministratorsInfo, self).__init__()
self.email = email
self.loginName = loginName
self.userPrincipalName = userPrincipalName
|
vgrem/Office365-REST-Python-Client
|
office365/sharepoint/tenant/administration/secondary_administrators_info.py
|
Python
|
mit
| 474
|
"""
Django settings for creativejunkiez project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u&rtt(ezjj*)o&ef+_7dwrq*k*0=1(r1+1bpr=1yrxb+zju+ls'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.comments',
'django_yaba'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'creativejunkiez.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'creativejunkiez.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
anistark/creativejunkiez
|
creativejunkiez/settings.py
|
Python
|
mit
| 2,716
|
from account.forms import SignupForm
def signup(request, form_class=SignupForm,
template_name="account/signup.html", success_url=None):
if success_url is None:
success_url = get_default_redirect(request)
if request.method == "POST":
form = form_class(request.POST)
if form.is_valid():
username, password = form.save()
user = authenticate(username=username, password=password)
auth_login(request, user)
request.user.message_set.create(
message=_("Successfully logged in as %(username)s.") % {
'username': user.username
})
return HttpResponseRedirect(success_url)
else:
code = request.GET.get("code")
signup_code = check_signup_code(code)
if signup_code:
form = form_class(initial={"signup_code": code})
else:
if not settings.ACCOUNT_OPEN_SIGNUP:
# if account signup is not open we want to fail when there is
# no sign up code or what was provided failed.
return render_to_reponse("signup_code/failure.html", {
"code": code,
}, context_instance=RequestContext(request))
else:
form = form_class()
return render_to_response(template_name, {
"form": form,
}, context_instance=RequestContext(request))
|
ingenieroariel/pinax
|
apps/signup_codes/views.py
|
Python
|
mit
| 1,430
|
# -*- coding: utf-8 -*-
try:
from django.urls import get_script_prefix, set_script_prefix
except ImportError:
from django.core.urlresolvers import get_script_prefix, set_script_prefix
class script_prefix(object):
def __init__(self, newpath):
self.newpath = newpath
self.oldprefix = get_script_prefix()
def __enter__(self):
set_script_prefix(self.newpath)
def __exit__(self, type, value, traceback):
set_script_prefix(self.oldprefix)
|
ierror/django-js-reverse
|
django_js_reverse/tests/utils.py
|
Python
|
mit
| 489
|
from builtins import map
from builtins import range
from builtins import object
import unittest
import ROOT
import os
from PyAnalysisTools.PlottingUtils import HistTools as ht
from PyAnalysisTools.base import InvalidInputError
cwd = os.path.dirname(__file__)
ROOT.gROOT.SetBatch(True)
class PlotConfig(object):
pass
class TestHistTools(unittest.TestCase):
def setUp(self):
self.hist = ROOT.TH1F('h', '', 10, -1., 1.)
self.hist.FillRandom('gaus', 10000)
self.plot_config = PlotConfig()
self.plot_config.axis_labels = None
def tearDown(self):
del self.hist
def test_normalise(self):
ht.normalise(self.hist)
self.assertAlmostEqual(self.hist.Integral(), 1., delta=1e-5)
def test_normalise_empty(self):
hist = ROOT.TH1F('h', '', 10, -1., 1.)
res = ht._normalise_1d_hist(hist)
self.assertEqual(0., res.Integral())
def test_normalise_asymmetric(self):
h = ht.rebin(self.hist, [-1, 0.4, 1.])
ht.normalise(h)
self.assertAlmostEqual(h.Integral(), 1., delta=1e-5)
def test_normalise_stack(self):
stack = ROOT.THStack()
stack.Add(self.hist)
res = ht._normalise_1d_hist(stack)
self.assertEqual(sum([i.Integral() for i in res.GetHists()]), self.hist.Integral())
def test_normalise_list(self):
ht.normalise([self.hist])
self.assertAlmostEqual(self.hist.Integral(), 1., delta=1.e-5)
def test_scale_hist(self):
ht.scale(self.hist, 10.)
self.assertEqual(self.hist.Integral(), 100000.)
def test_get_color(self):
self.hist.Draw('hist')
colors = ht.get_colors([self.hist])
self.assertTrue(colors == [602] or colors == [1])
def test_read_bin_from_label(self):
self.hist.GetXaxis().SetBinLabel(2, 'label')
self.assertEqual(ht.read_bin_from_label(self.hist, 'label'), 2)
def test_read_bin_from_label_non_existing(self):
self.assertEqual(ht.read_bin_from_label(self.hist, 'label'), None)
def test_read_bin_from_multi_label(self):
self.hist.GetXaxis().SetBinLabel(2, 'label')
self.hist.GetXaxis().SetBinLabel(3, 'label')
self.assertEqual(ht.read_bin_from_label(self.hist, 'label'), 2)
def test_set_axis_labels_no_labels(self):
self.assertEqual(ht.set_axis_labels(self.hist, self.plot_config), None)
def test_set_axis_labels(self):
self.plot_config.axis_labels = list(map(str, list(range(self.hist.GetNbinsX()))))
self.assertEqual(ht.set_axis_labels(self.hist, self.plot_config), None)
self.assertEqual(self.hist.GetXaxis().GetBinLabel(5), '4')
def test_rebin_const(self):
h = ht.rebin(self.hist, 5)
self.assertEqual(h.GetNbinsX(), 2)
def test_rebin_list(self):
h = ht.rebin(self.hist, [-1, 0.4, 1.])
self.assertEqual(h.GetNbinsX(), 2)
self.assertEqual(h.GetXaxis().GetBinLowEdge(1), -1.)
self.assertEqual(h.GetXaxis().GetBinLowEdge(2), 0.4)
self.assertEqual(h.GetXaxis().GetBinLowEdge(3), 1.0)
def test_rebin_const_list(self):
h = ht.rebin([self.hist], 5)
self.assertIsInstance(h, list)
self.assertEqual(h[0].GetNbinsX(), 2)
def test_rebin_list_list(self):
h = ht.rebin([self.hist], [-1, 0.4, 1.])
self.assertIsInstance(h, list)
self.assertEqual(h[0].GetNbinsX(), 2)
self.assertEqual(h[0].GetXaxis().GetBinLowEdge(2), 0.4)
def test_rebin_const_dict(self):
h = ht.rebin({'foo': self.hist}, 5)
self.assertIsInstance(h, dict)
self.assertEqual(h['foo'].GetNbinsX(), 2)
def test_rebin_list_dict(self):
h = ht.rebin({'foo': self.hist}, [-1, 0.4, 1.])
self.assertIsInstance(h, dict)
self.assertEqual(h['foo'].GetNbinsX(), 2)
self.assertEqual(h['foo'].GetXaxis().GetBinLowEdge(2), 0.4)
def test_rebin_const_dict_list(self):
h = ht.rebin({'foo': [self.hist]}, 5)
self.assertIsInstance(h, dict)
self.assertEqual(h['foo'][0].GetNbinsX(), 2)
def test_rebin_list_dict_list(self):
h = ht.rebin({'foo': [self.hist]}, [-1, 0.4, 1.])
self.assertIsInstance(h, dict)
self.assertEqual(h['foo'][0].GetNbinsX(), 2)
self.assertEqual(h['foo'][0].GetXaxis().GetBinLowEdge(2), 0.4)
def test_rebin_asymmetric(self):
h = ht.rebin(self.hist, [-1, 0.4, 1.])
self.assertEqual(h.GetNbinsX(), 2)
self.assertAlmostEqual(h.GetBinContent(1), self.hist.Integral(self.hist.FindBin(-1.),
self.hist.FindBin(0.39))/1.4, delta=1e-3)
def test_rebin_asymetric_disable_binwidth_division(self):
h = ht.rebin(self.hist, [-1, 0.4, 1.], disable_bin_width_division=True)
self.assertEqual(h.GetNbinsX(), 2)
self.assertEqual(h.GetNbinsX(), 2)
self.assertEqual(h.GetBinContent(1), self.hist.Integral(self.hist.FindBin(-1.), self.hist.FindBin(0.39)))
def test_rebin_invalid_dict(self):
self.assertRaises(InvalidInputError, ht.rebin, {'foo': tuple(self.hist)}, [-1, 0.4, 1.])
def test_rebin_invalid_factor(self):
self.assertRaises(InvalidInputError, ht.rebin, self.hist, (-1, 0.4, 1.))
def test_rebin_entity(self):
self.assertEqual(ht.rebin(self.hist, None), self.hist)
self.assertEqual(ht.rebin(self.hist, 1.), self.hist)
def test_has_asymmetric_binning(self):
h = ht.rebin(self.hist, [-1, -0.4, 1.])
self.assertTrue(ht.has_asymmetric_binning(h))
def test_has_asymmetric_binning_sym_binning(self):
self.assertFalse(ht.has_asymmetric_binning(self.hist))
def test_overflow_merge(self):
self.hist.Fill(100)
expected = self.hist.GetBinContent(10) + self.hist.GetBinContent(11)
ht.merge_overflow_bins(self.hist)
self.assertEqual(self.hist.GetBinContent(10), expected)
def test_overflow_merge_dict(self):
self.hist.Fill(100)
expected = self.hist.GetBinContent(10) + self.hist.GetBinContent(11)
ht.merge_overflow_bins({'foo': self.hist})
self.assertEqual(self.hist.GetBinContent(10), expected)
def test_underflow_merge(self):
self.hist.Fill(-1100)
expected = self.hist.GetBinContent(0) + self.hist.GetBinContent(1)
ht.merge_underflow_bins(self.hist)
self.assertEqual(self.hist.GetBinContent(1), expected)
def test_underflow_merge_list(self):
self.hist.Fill(-1100)
expected = self.hist.GetBinContent(0) + self.hist.GetBinContent(1)
ht.merge_underflow_bins({'foo': self.hist})
self.assertEqual(self.hist.GetBinContent(1), expected)
|
morgenst/PyAnalysisTools
|
tests/unit/TestHistTools.py
|
Python
|
mit
| 6,722
|
import sys
import argparse
import json
from controllers.TwitterDumpController import *
from controllers.SentiWordNetController import *
import dateutil.parser
def main():
parser = argparse.ArgumentParser(description="Generates sentiment analysis data a")
parser.add_argument("file", type=str, default="", help="The Twitter dump's path.")
parser.add_argument("--output", type=argparse.FileType("w"), default=sys.stdout, help="The file to which the output should be redirected. Default is stdout.")
group = parser.add_mutually_exclusive_group()
group.add_argument("--sentiwordnet", type=str, help="Generate sentiment analysis with the SentiWordNet lexicon provided")
group.add_argument("--textblob", action="store_true", help="Generate sentiment analysis with the TextBlob module")
group.add_argument("--smileys", action="store_true", help="Generate sentiment analysis based on the presence of smileys")
global args
args = parser.parse_args()
# Load tweets
twitterDumpController = TwitterDumpController(args.file)
tweets = twitterDumpController.load()
# Analyze tweets
result = {}
if args.sentiwordnet != None:
print "Loading SentiWordNet lexicon.."
sentiWordNetController = SentiWordNetController(args.sentiwordnet)
sentiWordNetController.load()
print "Loaded SentiWordNet lexicon."
print "Analyzing tweets.."
count = 0
for tweet in tweets:
tweetAnalysis = sentiWordNetController.analyzeSentence(tweet["text"])
date = dateutil.parser.parse(tweet["created_at"]).strftime("%Y-%m-%d")
if date in result:
result[date].append(tweetAnalysis)
else:
result[date] = [tweetAnalysis]
count += 1
if count % 500 == 0:
print "Analyzed", count, "tweets.."
print "Analyzed", count, "tweets."
elif args.textblob == True:
from textblob import TextBlob
print "Analyzing tweets.."
count = 0
for tweet in tweets:
blob = TextBlob(tweet["text"])
tweetAnalysis = {"polarity": 0, "subjectivity": 0}
for sentence in blob.sentences:
tweetAnalysis["polarity"] += sentence.sentiment.polarity
tweetAnalysis["subjectivity"] += sentence.sentiment.subjectivity
if len(blob.sentences) > 0:
tweetAnalysis["polarity"] /= len(blob.sentences)
tweetAnalysis["subjectivity"] /= len(blob.sentences)
date = dateutil.parser.parse(tweet["created_at"]).strftime("%Y-%m-%d")
if date in result:
result[date].append(tweetAnalysis)
else:
result[date] = [tweetAnalysis]
count += 1
if count % 500 == 0:
print "Analyzed", count, "tweets.."
print "Analyzed", count, "tweets."
elif args.smileys == True:
print "Analyzing tweets.."
count = 0
for tweet in tweets:
tweetAnalysis = {"sentiment": 0}
if any([x in tweet["text"] for x in [":)", ":D", ":d", ";)", "=)", ":>"]]):
tweetAnalysis = {"sentiment": 1}
elif any([x in tweet["text"] for x in [":(", ";(", "=(", ":<"]]):
tweetAnalysis = {"sentiment": -1}
date = dateutil.parser.parse(tweet["created_at"]).strftime("%Y-%m-%d")
if date in result:
result[date].append(tweetAnalysis)
else:
result[date] = [tweetAnalysis]
count += 1
if count % 500 == 0:
print "Analyzed", count, "tweets.."
print "Analyzed", count, "tweets."
args.output.write(json.dumps(result))
args.output.close()
if __name__ == "__main__":
main()
|
victorpopescu/TwitterStockAnalyzer
|
main.py
|
Python
|
mit
| 3,882
|
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.jit.backend.llsupport.codemap import CodemapStorage, \
CodemapBuilder, unpack_traceback, find_codemap_at_addr
NULL = lltype.nullptr(rffi.CArray(lltype.Signed))
def test_register_codemap():
codemap = CodemapStorage()
codemap.setup()
codemap.register_codemap((100, 20, [13, 14, 15]))
codemap.register_codemap((300, 30, [16, 17, 18]))
codemap.register_codemap((200, 100, [19, 20, 21, 22, 23]))
#
raw100 = find_codemap_at_addr(100, NULL)
assert find_codemap_at_addr(119, NULL) == raw100
assert not find_codemap_at_addr(120, NULL)
#
raw200 = find_codemap_at_addr(200, NULL)
assert raw200 != raw100
assert find_codemap_at_addr(299, NULL) == raw200
#
raw300 = find_codemap_at_addr(329, NULL)
assert raw300 != raw100 and raw300 != raw200
assert find_codemap_at_addr(300, NULL) == raw300
#
codemap.free()
def test_free_with_alignment():
codemap = CodemapStorage()
codemap.setup()
builder = CodemapBuilder()
builder.enter_portal_frame(23, 34, 0)
builder.enter_portal_frame(45, 56, 20)
codemap.register_codemap(builder.get_final_bytecode(200, 100))
assert unpack_traceback(215) == [34]
assert unpack_traceback(225) == [34, 56]
codemap.free_asm_block(190, 310) # a bit larger
assert unpack_traceback(215) == []
assert unpack_traceback(225) == []
codemap.free()
|
oblique-labs/pyVM
|
rpython/jit/backend/llsupport/test/test_codemap.py
|
Python
|
mit
| 1,455
|
digits = [0 for i in range(10)]
for c in input():
digits[int(c)] += 1
sum = 0
for i in range(10):
sum += i * digits[i]
if sum % 3 != 0:
for i in range(10):
if digits[i] > 0 and (sum - i) % 3 == 0:
digits[i] -= 1
sum -= i
break
for step in range(2):
if sum % 3 != 0:
for i in range(10):
if digits[i] > 0 and i % 3 != 0:
digits[i] -= 1
sum -= i
break
for i in range(9, -1, -1):
print(str(i) * digits[i], end='')
print()
|
dluschan/school
|
olymp/divisible.py
|
Python
|
mit
| 554
|
#!/usr/bin/env python
import os
from setuptools import setup
import versioneer
from pip.download import PipSession
from pip.req import parse_requirements
def get_requirements(filename):
''' Parse a pip-style requirements.txt file to setuptools format '''
install_reqs = parse_requirements(filename, session=PipSession())
return [str(ir.req) for ir in install_reqs]
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
setup(name='django-namespaced',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Automagic url namespace resolution',
long_description=README,
author='Ferrix Hovi',
author_email='ferrix@codetry.fi',
install_requires=get_requirements('requirements.txt'),
setup_requires=get_requirements('development.txt'),
packages=['namespaced'],
url='https://github.com/codetry/django_namespaced/',
license='MIT License',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
codetry/django_namespaced
|
setup.py
|
Python
|
mit
| 1,442
|
import gc
import inspect
exclude = [
"function",
"type",
"list",
"dict",
"tuple",
"wrapper_descriptor",
"module",
"method_descriptor",
"member_descriptor",
"instancemethod",
"builtin_function_or_method",
"frame",
"classmethod",
"classmethod_descriptor",
"_Environ",
"MemoryError",
"_Printer",
"_Helper",
"getset_descriptor",
]
def dumpObjects():
gc.collect()
oo = gc.get_objects()
for o in oo:
if getattr(o, "__class__", None):
name = o.__class__.__name__
if name not in exclude:
filename = inspect.getabsfile(o.__class__)
print "Object of class:", name, "...",
print "defined in file:", filename
if __name__=="__main__":
class TestClass:
pass
testObject1 = TestClass()
testObject2 = TestClass()
dumpObjects()
|
ActiveState/code
|
recipes/Python/457665_Debug_runtime_objects_using/recipe-457665.py
|
Python
|
mit
| 952
|
import pytest
import sentlex.sentanalysis_potts as sentdoc
import sentlex
TESTDOC_ADJ = 'good/JJ good/JJ good/JJ good/JJ good/JJ good/JJ good/JJ good/JJ good/JJ good/JJ'
TESTDOC_UNTAGGED = 'this cookie is good. it is very good indeed'
TESTDOC_BADADJ = 'bad_JJ Bad_JJ bAd_JJ'
TESTDOC_NEGATED = 'not/DT bad/JJ movie/NN ./. blah/NN blah/NN not/DT really/RR good/JJ either/DT ./.'
TESTDOC_CORRUPT = 'this_DT doc_NN is_VB not_DT not_DT not_DT in great/JJ shape/JJ good_JJ good_JJ good_JJ'
TESTDOC_EMPTY = ''
@pytest.fixture(scope='module')
def moby():
return sentlex.MobyLexicon()
@pytest.fixture(scope='module')
def ds(moby):
ds = sentdoc.PottsDocSentiScore()
ds.verbose = False
ds.set_lexicon(moby)
ds.set_parameters(a=True, v=False, n=False, r=False, negation=True, negation_window=15, negation_adjustment=0.5)
return ds
def test_potts_parameters(ds):
assert ds.config.negation
assert ds.config.negation_window == 15
assert ds.config.negation_adjustment == 0.5
def test_atenuation(ds):
negated_sent = 'not/DT good/JJ'
(dpos, dneg) = ds.classify_document(negated_sent)
assert dneg > dpos
ds.set_parameters(negation=True, negation_window=15, atenuation=True, at_pos=0.5, at_neg=0.5)
(dpos, dneg) = ds.classify_document(negated_sent)
ds.set_parameters(negation=True, negation_window=15, atenuation=True, at_pos=1.0, at_neg=1.0)
(dposfull, dnegfull) = ds.classify_document(negated_sent)
assert dpos > dneg
assert dposfull > dpos
def test_score_potts(ds):
(dpos, dneg) = ds.classify_document(TESTDOC_ADJ)
assert dpos > 0.0
# again, for negative text
(dpos, dneg) = ds.classify_document(TESTDOC_BADADJ)
assert dneg > 0.0
# negated text
(dpos, dneg) = ds.classify_document(TESTDOC_NEGATED)
assert dpos > 0.0
# currupt data - should still work
(dpos, dneg) = ds.classify_document(TESTDOC_CORRUPT)
assert dpos > dneg
def test_sample_classes(moby):
for algo in [sentdoc.AV_LightPottsSentiScore(moby),
sentdoc.A_LightPottsSentiScore(moby),
sentdoc.AV_AggressivePottsSentiScore(moby),
sentdoc.A_AggressivePottsSentiScore(moby)]:
algo.verbose = False
(p, n) = algo.classify_document(TESTDOC_NEGATED)
assert n > 0.0
|
bohana/sentlex
|
tests/test_potts.py
|
Python
|
mit
| 2,316
|
# django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
import logging
from django.conf import settings
from django import template, shortcuts, http
from salesforce.testrunner.example import models, forms
log = logging.getLogger(__name__)
def list_accounts(request):
accounts = models.Account.objects.all()[0:5]
return shortcuts.render_to_response('list-accounts.html', dict(
title = "List First 5 Accounts",
accounts = accounts,
))
def search_accounts(request):
accounts = []
if(request.method == 'POST'):
form = forms.SearchForm(request.POST)
if(form.is_valid()):
accounts = models.Account.objects.filter(Name__icontains=form.cleaned_data['query'])
else:
form = forms.SearchForm()
return shortcuts.render_to_response('search-accounts.html', dict(
title = "Search Accounts by Email",
accounts = accounts,
form = form,
))
|
chromakey/django-salesforce
|
salesforce/testrunner/example/views.py
|
Python
|
mit
| 1,070
|
#! /usr/bin/python2
import json
import pycurl
from io import BytesIO
import time
import calendar
import datetime
import sys
import getopt
import socket
base_url = "http://" + socket.getfqdn() + ":19888"
begin_rel = 24 * 3600
end_rel = 0
utc = 0
debug = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "hm:b:e:ud", ["help", "mapred-url=", "begin=", "end=", "utc", "debug"])
except getopt.GetoptError:
print 'Args error'
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('jobs.py [-h|--help] [-m|--mapred-url URL] [-b|--begin] [-e|--end] [-u|--utc] [-d|--debug]')
sys.exit(0)
elif opt in ('-m', '--mapred-url'):
base_url = arg
elif opt in ('-b', '--begin'):
begin_rel = int(arg)
elif opt in ('-e', '--end'):
end_rel = int(arg)
elif opt in ('-u', '--utc'):
utc = 1
elif opt in ('-d', '--debug'):
debug = 1
else:
print 'Args error'
sys.exit(2)
# epoch time of local date
# now = datetime.date.today().strftime('%s')
now0 = datetime.date.today()
if utc:
# epoch time of GMT date
now = calendar.timegm(datetime.datetime(now0.year, now0.month, now0.day, 0, 0).timetuple())
else:
# epoch time of local date
now = int(time.mktime(datetime.datetime(now0.year, now0.month, now0.day, 0, 0).timetuple()))
print '# ' + str(now0)
begin = now - begin_rel
end = now - end_rel
url = base_url + "/ws/v1/history/mapreduce/jobs?finishedTimeBegin=" + str(1000 * begin) + "&finishedTimeEnd=" + str(1000 * end)
print '# ' + url
b = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
# c.setopt(pycurl.WRITEDATA, b)
c.setopt(pycurl.WRITEFUNCTION, b.write)
c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)
c.setopt(pycurl.USERPWD, ":")
c.perform()
s = b.getvalue().decode('utf-8')
if c.getinfo(c.RESPONSE_CODE) != 200:
print s
print 'Status: %d' % c.getinfo(c.RESPONSE_CODE)
c.close()
b.close()
raise Exception()
c.close()
j = json.loads(s)
if debug:
print json.dumps(j, indent=4)
class User:
jobs = 0
fails = 0
total = 0
completed = 0
wait = 0
time = 0
wait_min = -1
wait_max = -1
users = dict()
if j["jobs"]:
for job in j["jobs"]["job"]:
username = job["user"]
if username not in users:
users[username] = User()
user = users[username]
wait = job["startTime"] - job["submitTime"]
user.jobs += 1
if job['state'] != 'NEW' and job['state'] != 'INITED' and job['state'] != 'RUNNING' and job['state'] != 'SUCCEEDED':
user.fails += 1
user.total += job['reducesTotal'] + job['mapsTotal']
user.completed += job['reducesCompleted'] + job['mapsCompleted']
user.wait += wait
user.time += job["finishTime"] - job["startTime"]
if user.wait_min == -1 or wait < user.wait_min:
user.wait_min = wait
if user.wait_max == -1 or wait > user.wait_max:
user.wait_max = wait
# print '#[progress]', username, users[username].total, user.completed, user.wait, user.time
sql_begin = datetime.datetime.fromtimestamp(begin).strftime('%Y-%m-%d %H:%M:%S')
sql_end = datetime.datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M:%S')
print "INSERT INTO measure (name, start, end) VALUES ('jobs', '%s', '%s');" % (sql_begin, sql_end)
for username, user in users.iteritems():
print "INSERT INTO jobs (id_measure, user, jobs, fails, subjobs, real_wait, real_time, wait_min, wait_max) VALUES (last_insert_id(), '%s', %d, %d, %d, %d, %d, %d, %d);" % (username, user.jobs, user.fails, user.completed, user.wait, user.time, user.wait_min, user.wait_max)
|
MetaCenterCloudPuppet/cesnet-site_hadoop
|
files/accounting/jobs.py
|
Python
|
mit
| 3,427
|
import numpy as np
import matplotlib.pyplot as plt
import kernels
import random
import utils
class Regression(object):
def __init__(self, Ytrain, kernel=kernels.RBF(), add_noise=0.001, print_jit=False, Ytest=None, Xtest=None, Xtrain=None, cent_threshold=None):
self.Xtest = Xtest
self.Xtrain = Xtrain
self.Ytest = Ytest
self.Ytrain = Ytrain
self.add_noise = add_noise
self.kernel = kernel
self.cent_threshold = cent_threshold
self.print_jit = print_jit
if len(self.Xtrain) == 2:
for n,item in enumerate(self.Xtrain):
if isinstance(item[0], str) == False:
Xtrain_num = np.asarray(item).reshape(-1,len(item[0]))
Xtest_num = np.asarray(self.Xtest[n]).reshape(-1,len(item[0]))
Xtest_num = utils.normalize_centre(Xtrain_num, Xtest_num)
Xtrain_num = utils.normalize_centre(Xtrain_num)
else:
Xtrain_smiles = item
Xtest_smiles = self.Xtest[n]
elif isinstance(self.Xtrain[0], str) == False:
Xtrain = np.asarray(self.Xtrain).reshape(-1,1)
Xtest = np.asarray(self.Xtest).reshape(-1,1)
Xtest_num = utils.normalize_centre(Xtrain, Xtest)
Xtrain_num = utils.normalize_centre(Xtrain)
# if self.pca == True:
# Xtrain_num, W = GPy.util.linalg.pca(Xtrain_num, self.latent_dim)
# jitter = 0.05*np.random.rand((Xtrain_num.shape[0]), (Xtrain_num.shape[1]))
# jitter -= 0.025
# Xtrain_num = Xtrain_num - jitter
#
# Xtest_num = np.dot(W,Xtest_num.T).T
# jitter = 0.05*np.random.rand((Xtest_num.shape[0]), (Xtest_num.shape[1]))
# jitter -= 0.025
# Xtest_num = Xtest_num - jitter
if len(self.Xtrain) == 2:
self.Xtrain = []
self.Xtrain.append(Xtrain_num)
self.Xtrain.append(Xtrain_smiles)
self.Xtest = []
self.Xtest.append(Xtest_num)
self.Xtest.append(Xtest_smiles)
elif isinstance(self.Xtrain[0], str) == False:
self.Xtrain = Xtrain_num
self.Xtest = Xtest_num
elif isinstance(self.Xtrain[0], str) == True:
self.Xtrain = self.Xtrain
self.Xtest = self.Xtest
# Compute posterior mean vector
Xtrain_cov = self.kernel.compute(self.Xtrain, self.Xtrain, noise=True)
train_test_cov = self.kernel.compute(self.Xtrain, self.Xtest)
# print train_test_cov
tr_chol, jitter = kernels.jit_chol(Xtrain_cov, print_jit=self.print_jit)
trtecov_div_trchol = np.linalg.solve(tr_chol,train_test_cov)
ytr_div_trchol = np.linalg.solve(tr_chol,self.Ytrain)
post_mean = (np.dot(trtecov_div_trchol.T, ytr_div_trchol)).reshape(-1,1)
noise = add_noise*np.reshape([random.gauss(0, np.sqrt(self.kernel.noise_var)) for i in range(0,post_mean.shape[0])],(-1,1))
self.post_mean = post_mean + noise
# Compute posterior standard deviation and uncertainty bounds
test_cov = self.kernel.compute(self.Xtest,self.Xtest)
self.cov_post = (test_cov) - np.dot(trtecov_div_trchol.T,trtecov_div_trchol)
self.post_s = np.sqrt(np.absolute(np.diag(self.cov_post))).reshape(-1,1)
def predict(self):
# Return the posterior mean and upper and lower 95% confidence bounds
return self.post_mean, self.post_mean+(2*self.post_s), self.post_mean-(2*self.post_s)
def plot_by_index(self):
upper = (self.post_mean + (2*self.post_s)).flat
lower = (self.post_mean - (2*self.post_s)).flat
index = np.arange(1,(self.Ytest.shape[0]+1),1)
colours = []
for i in xrange(len(self.Ytest)):
if len(self.Xtrain) == 2:
if self.Xtest[1][i] in self.Xtrain[1]:
print self.Xtest[i]
colours.append('y')
else:
colours.append('r')
else:
if self.Xtest[i] in self.Xtrain:
print self.Xtest[i]
colours.append('y')
else:
colours.append('r')
Y = self.Ytest
Y1 = self.Ytest
Y2 = self.Ytest
Y3 = self.Ytest
post_mean = self.post_mean
Ytest = np.sort(self.Ytest, axis=0)
upper = (np.array([upper for Y,upper in sorted(zip(Y,upper))])).flat
lower = (np.array([lower for Y1,lower in sorted(zip(Y1,lower))])).flat
post_mean = (np.array([post_mean for Y2,post_mean in sorted(zip(Y2,post_mean))]))
cmap = [colours for Y3,colours in sorted(zip(Y3,colours))]
# Plot index against posterior mean function, uncertainty and true test values
fig = plt.figure()
plt.xlim(0, max(index)+1)
plt.xlabel('Compound')
plt.ylabel('Centred output')
plt.plot(index, post_mean, 'r--', lw=2)
plt.fill_between(index, lower, upper, color='#87cefa')
plt.scatter(index, Ytest, c=cmap, s=40)
if self.cent_threshold is not None:
plt.plot([0, max(index)+1],[self.cent_threshold, self.cent_threshold])
plt.show(block=True)
def r_squared(self):
obs_mean = np.mean(self.Ytest)
ss_tot = np.sum((self.Ytest-obs_mean)**2)
ss_res = np.sum((self.Ytest-self.post_mean)**2)
r_sq = 1 - (ss_res/ss_tot)
return r_sq
def classify(self): # ADD ROC PLOT, ENRICHMENT FACTORS
assert self.cent_threshold is not None, "An active/inactive threshold is required for classification."
true_positives, true_negatives = utils.classif(self.post_mean, self.Ytest, self.cent_threshold, roc=True)
enrichment_factors = []
Y2 = self.Ytest
post_mean = (np.sort(self.post_mean, axis=0))[::-1]
Ytest = (np.array([Y2 for self.post_mean,Y2 in sorted(zip(self.post_mean,Y2))]))[::-1]
tpr = [0.0]
fpr = [0.0]
actives = 0.0
inactives = 0.0
for index,value in enumerate(post_mean):
if Ytest[index] >= self.cent_threshold:
actives += 1.0
else:
inactives += 1.0
tpr.append(actives/float(true_positives))
fpr.append(inactives/float(true_negatives))
print true_positives
print actives
print true_negatives
print inactives
fig = plt.figure()
x= [0.0,1,0]
plt.plot(x,x, linestyle='dashed', color='red', linewidth=2)
plt.plot(fpr,tpr, 'g', linewidth=5)
plt.show()
# def plot_prior(self): # UPDATE FOR SMILES
# if self.Xtrain.shape[1] == 1:
# plotting.plot_prior_1D(self.Xtest, self.test_cov, Ytest=self.Ytest)
# elif self.Xtrain.shape[1] == 2:
# plotting.plot_prior_2D(self.Xtest, self.test_cov, Ytest=self.Ytest)
# else:
# print "The dimensionality of the input space is too high to visualize."
# def plot_posterior(self): # UPDATE FOR SMILES
# if self.Xtrain.shape[1] == 1:
# plotting.plot_posterior_1D(self.Xtest, self.Xtrain, self.Ytrain, self.post_mean, self.post_s, self.cov_post, Ytest=self.Ytest)
# elif self.Xtrain.shape[1] == 2:
# plotting.plot_posterior_2D(self.Xtest, self.Xtrain, self.Ytrain, self.post_mean, self.post_s, Ytest=self.Ytest)
# else:
# print "The dimensionality of the input space is too high to visualize. Use plot_by_index instead."
|
nafisa1/Gaussian_processes
|
regression.py
|
Python
|
mit
| 6,505
|
import factory
import factory.fuzzy
import pytz
from conferences.models import AudienceLevel, Conference, Deadline, Duration, Topic
from django.utils import timezone
from factory.django import DjangoModelFactory
from i18n.helpers.tests import LanguageFactory
from languages.models import Language
from pytest_factoryboy import register
from submissions.models import SubmissionType
@register
class ConferenceFactory(DjangoModelFactory):
name = LanguageFactory("name")
code = factory.Faker("text", max_nb_chars=10)
introduction = LanguageFactory("sentence")
start = factory.Faker("past_datetime", tzinfo=pytz.UTC)
end = factory.Faker("future_datetime", tzinfo=pytz.UTC)
timezone = pytz.timezone("Europe/Rome")
@classmethod
def _create(cls, model_class, *args, **kwargs):
specified_deadlines = {}
for deadline in Deadline.TYPES:
_type = deadline[0]
value = kwargs.pop(f"active_{_type}", None)
specified_deadlines[_type] = value
instance = super()._create(model_class, *args, **kwargs)
for _type, value in specified_deadlines.items():
if value is True:
instance.deadlines.add(DeadlineFactory(conference=instance, type=_type))
elif value is False:
instance.deadlines.add(
DeadlineFactory(
conference=instance,
type=_type,
start=timezone.now() - timezone.timedelta(days=10),
end=timezone.now() - timezone.timedelta(days=5),
)
)
return instance
@factory.post_generation
def topics(self, create, extracted, **kwargs):
"""Accepts a list of topic names and adds each topic to the
Conference allowed submission topics.
If a topic with that name doesn't exists, a new one is created.
This fixture makes easier to add allowed topics to a Conference in the tests
"""
if not create:
return
if extracted:
for topic in extracted:
self.topics.add(Topic.objects.get_or_create(name=topic)[0])
@factory.post_generation
def languages(self, create, extracted, **kwargs):
"""Accepts a list of language codes and adds each language to the
Conference allowed languages.
This fixture makes easier to add allowed languages to a Conference in the tests
"""
if not create:
return
if extracted:
for language_code in extracted:
self.languages.add(Language.objects.get(code=language_code))
@factory.post_generation
def submission_types(self, create, extracted, **kwargs):
"""Accepts a list of submission type names and adds
each submission type to the Conference allowed submission types.
If a submission type with that name doesn't exists, a new one is created.
This fixture makes easier to add allowed submission types
to a Conference in the tests
"""
if not create:
return
if extracted:
for submission_type in extracted:
self.submission_types.add(
SubmissionType.objects.get_or_create(name=submission_type)[0]
)
@factory.post_generation
def durations(self, create, extracted, **kwargs):
"""Accepts a list of durations (in minutes) and creates a duration object to the
Conference allowed durations.
This fixture makes easier to add durations to a Conference in the tests
"""
if not create:
return
if extracted:
for duration in extracted:
duration, created = Duration.objects.get_or_create(
duration=duration,
conference=self,
defaults={"name": f"{duration}m"},
)
if created:
duration.allowed_submission_types.set(SubmissionType.objects.all())
self.durations.add(duration)
@factory.post_generation
def audience_levels(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for audience_level in extracted:
self.audience_levels.add(
AudienceLevel.objects.get_or_create(name=audience_level)[0]
)
class Meta:
model = Conference
django_get_or_create = ("code",)
@register
class TopicFactory(DjangoModelFactory):
name = factory.Faker("word")
class Meta:
model = Topic
django_get_or_create = ("name",)
@register
class DeadlineFactory(DjangoModelFactory):
conference = factory.SubFactory(ConferenceFactory)
type = factory.fuzzy.FuzzyChoice([deadline[0] for deadline in Deadline.TYPES])
name = LanguageFactory("sentence")
description = LanguageFactory("sentence")
start = factory.Faker("past_datetime", tzinfo=pytz.UTC)
end = factory.Faker("future_datetime", tzinfo=pytz.UTC)
class Meta:
model = Deadline
@register
class AudienceLevelFactory(DjangoModelFactory):
name = factory.fuzzy.FuzzyChoice(("Beginner", "Intermidiate", "Advanced"))
class Meta:
model = AudienceLevel
django_get_or_create = ("name",)
@register
class DurationFactory(DjangoModelFactory):
conference = factory.SubFactory(ConferenceFactory)
name = factory.Faker("word")
duration = factory.Faker("pyint")
notes = factory.Faker("text")
class Meta:
model = Duration
|
patrick91/pycon
|
backend/conferences/tests/factories.py
|
Python
|
mit
| 5,673
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from collections import OrderedDict
def slack_post(token, channel, blind=False):
'''Post a slack message possibly with a picture. Prepare a function that
will be called later by the main script.'''
slack_api_url = 'https://slack.com/api/{}'
params = {
'token': token,
'channel': channel,
}
if blind:
params.update({
'as_user': 'false',
'username': 'Mini-Sentry',
'icon_url': 'https://wiki.teamfortress.com/w/images/e/ea/Red_Mini_Sentry.png'
})
url = slack_api_url.format('chat.postMessage')
else:
params['channels'] = params.pop('channel')
url = slack_api_url.format('files.upload')
def make_request(*args):
'''Will make the request, use the prepared params.'''
request_args = OrderedDict(
url=url,
params=params,
)
if blind:
request_args['params'].update({
'text': args[0]
})
else:
request_args['params'].update({
'title': args[0],
'initial_comment': args[1],
})
request_args['files'] = dict(file=args[2])
response = requests.post(**request_args)
return make_request
|
nobe4/mini-sentry
|
slack.py
|
Python
|
mit
| 1,345
|
import rq
from rq_retry_scheduler import Queue, Worker
def noop_target_function(*args, **kwargs):
pass
def fail_target_function(*args, **kwargs):
raise Exception("I am a failure of a function")
def test_init(worker):
assert worker.exc_handler in worker._exc_handlers
assert issubclass(worker.queue_class, Queue)
def test_exc_handler(mock, worker, queue):
job = queue.enqueue(fail_target_function)
enqueue = mock.patch.object(Queue, 'enqueue_job_in')
for count, delay in worker.retry_delays.items():
ret = worker.exc_handler(job, None, None, None)
assert ret is False
enqueue.assert_called_with(delay, job)
enqueue.reset_mock()
# Now test that it didn't retry
ret = worker.exc_handler(job, None, None, None)
assert ret is True
assert not enqueue.called
def test_cli_arguments(connection):
"""
The rq script populates the queue_class and instantiates the queues
with the RQ queue class.
Make sure that our worker changes the queue class and updates the
queues to be of the new queue class
"""
queue_names = ['unittest1', 'unittest2']
queues = [
rq.Queue(queue_name, connection=connection)
for queue_name in queue_names
]
w = Worker(queues, connection=connection, queue_class=rq.Queue)
assert issubclass(w.queue_class, Queue)
assert len(w.queues) == len(queues)
for queue in w.queues:
assert isinstance(queue, Queue), queue.name
def test_class_override_inherited(connection):
"""Test that passing a subclass of Queue isn't overwritten by the worker"""
class UnittestQueue(Queue):
pass
w = Worker(['unittest'], queue_class=UnittestQueue, connection=connection)
assert w.queue_class == UnittestQueue
assert len(w.queues) == 1
def test_queue_strings(connection):
"""Ensure that the worker can take an iterable of strings"""
queues = ['unittest']
w = Worker(queues, queue_class=rq.Queue, connection=connection)
assert issubclass(w.queue_class, Queue)
assert len(w.queues) == len(queues)
for queue in w.queues:
assert isinstance(queue, Queue), queue.name
def test_queue_string(connection):
"""Ensure that the worker can take a string"""
queues = 'unittest'
w = Worker(queues, queue_class=rq.Queue, connection=connection)
assert issubclass(w.queue_class, Queue)
assert len(w.queues) == 1
for queue in w.queues:
assert isinstance(queue, Queue), queue.name
|
mikemill/rq_retry_scheduler
|
tests/test_worker.py
|
Python
|
mit
| 2,523
|
# coding: utf-8
import flask
from apps.auth import helpers
from apps.user import models
from .import CONFIG
provider = helpers.make_provider(CONFIG)
bp = helpers.make_provider_bp(provider.name, __name__)
@bp.route('/authorized/')
def authorized():
resp = provider.authorized_response()
if resp is None:
return 'Access denied: error=%s error_description=%s' % (
flask.request.args['error'],
flask.request.args['error_description'],
)
access_token = resp['access_token']
flask.session['oauth_token'] = (access_token, '')
me = resp['user']
user_db = retrieve_user_from_instagram(me)
return helpers.signin_user_db(user_db)
@provider.tokengetter
def get_instagram_oauth_token():
return flask.session.get('oauth_token')
@bp.route('/signin/%s/' % provider.name)
def signin():
return helpers.signin(provider)
def retrieve_user_from_instagram(response):
auth_id = '%s_%s' % (provider.name, response['id'])
user_db = models.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return helpers.create_user_db(
auth_id,
response['full_name'] or response['username'],
response['username'],
)
|
gmist/3dhero2
|
main/apps/auth/providers/instagram/views.py
|
Python
|
mit
| 1,175
|
"""
Django settings for easter_egg project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b_8tf*4%@zu^0@k_#ha7c%w+ub!6!%%%3jwcp73)ij^&x!7i&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'easter_egg.urls'
WSGI_APPLICATION = 'easter_egg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "static"),
)
TEMPLATE_DIRS = (
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates').replace('\\', '/'),
)
|
tavern-consulting/easter-egg
|
easter_egg/settings.py
|
Python
|
mit
| 2,306
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXX X XXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXX XX XXXXX XXXX XXXXXX XX X XXXXXXXXX XX XXXXX XXX XXXX XXXX XXXXXXXX XXXXXX XXXXXXX XX
XXXXXXXXXXX XXXX XXX XXXXXXXX XXXXX XXX XXXXXXXXXXXX XXXXXXX XX XXXXXXXXXXX
XXXXXXXXXXX XX XXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XX XXXXX XX XXX
XXXXXXX XXXXXX XX XXXXXXXX XXX XXXXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXX XXXXXXXX XX XX XXXXXXX XXX XXX
XXXXXXXXXX X XXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXX XX XXXXX XX XXX XXXXX XXXXXXXX XXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX
XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX
XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX
XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX XXXXXXXXXX XXX XXXX XX XXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX
XXXXXXXXXX
XXXX
XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX
XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX
XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX
XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX
XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX
XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXX XXX XXXXXXXXXXXX XX
XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
dnaextrim/django_adminlte_x
|
adminlte/static/plugins/datatables/extensions/AutoFill/examples/simple.html.py
|
Python
|
mit
| 16,348
|
# -*- coding: utf-8 -*-
from deliver.tests.test_base import BaseTest, load_msg, load_all_msg
class ConverterTest(BaseTest):
'''Tests for the UnicodeMessage class'''
def setUp(self):
super(ConverterTest,self).setUp()
self.msg = load_msg('sample3')
def get_text(self, decode=False):
return self.msg.get_payload(0).get_payload(decode=decode)
def get_clean_text(self, forbidden_words):
return self.msg.get_payload(0).get_clean_payload(forbidden_words)
def set_text(self, payload):
self.msg.get_payload(0).set_payload(payload)
def test_get(self):
self.assertEqual(self.msg['To'], u'sender@host.com')
def test_get_special_chars(self):
self.assertEqual(self.msg['Subject'], u'Re: [Test] atensión: los 10 curros mejor pagados!')
def test_get_nokey(self):
self.assertEqual(self.msg['Heathen'], None)
def test_replace_header_ascii(self):
s = u'Memorias de Adriano'
self.msg.replace_header('Subject', s)
self.assertEqual(self.msg['Subject'], s)
self.assertEqual(self.msg._msg['Subject'], s.encode('ascii'))
def test_replace_header_special_chars(self):
s = u'Un día de cólera'
self.msg.replace_header('Subject', s)
self.assertEqual(self.msg['Subject'], s)
self.assertEqual(self.msg._msg['Subject'], '=?utf-8?q?Un_d=C3=ADa_de_c=C3=B3lera?=')
def test_replace_header_no_header(self):
s = u'quoted-printable'
self.msg.replace_header('Content-Transfer-Encoding', s)
self.assertEqual(self.msg['Content-Transfer-Encoding'], s)
def _test_get(self, s, encoded):
self.assertEqual(self.get_text(decode=True), s)
self.assertEqual(self.get_text(), encoded)
def _test_set(self, s, encoded):
self.set_text(s)
self._test_get(s, encoded)
def test_set_payload(self):
s = u'El perro del hortelano'
self.msg = load_msg('sample')
self._test_set(s, s)
def test_set_payload_special_chars(self):
s = u'Con cien cañones por banda, viento en popa a toda vela'
self.msg = load_msg('sample')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_set_payload_utf8(self):
s = u'Con cien cañones por banda, viento en popa a toda vela'
self.msg = load_msg('sample')
self.msg.get_payload(0).set_charset('utf-8')
self._test_set(s, u'Con cien ca=C3=B1ones por banda, viento en popa a toda vela')
def test_set_payload_base64(self):
s = u'Con cien cañones por banda, viento en popa a toda vela'
self.msg = load_msg('sample4')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_set_payload_base64_utf8(self):
s = u'Con cien cañones por banda, viento en popa a toda vela'
self.msg = load_msg('sample5')
self._test_set(s, u'Con cien ca=C3=B1ones por banda, viento en popa a toda vela')
def test_set_payload_empty(self):
s = u'Con cien cañones por banda, viento en popa a toda vela'
self.msg = load_msg('sample6')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_get_payload(self):
self.msg = load_msg('sample')
s = u'La direcci=F3n ha cambiado como pod=E9is comprobar en'
self.assertTrue(s in self.get_text())
def test_get_payload_decoded(self):
self.msg = load_msg('sample')
s = u'La dirección ha cambiado como podéis comprobar en el'
self.assertTrue(s in self.get_text(decode=True))
def test_get_payload_base64(self):
self.msg = load_msg('sample4')
self._test_get(u'á\n', u'4Qo=')
def test_get_payload_base64_utf8(self):
self.msg = load_msg('sample5')
self._test_get(u'á', u'w6E=')
def test_get_payload_empty(self):
self.msg = load_msg('sample6')
self._test_get(u'\n', u'\n')
def test_clean_word_no_replace(self):
self.assertEqual(self.msg._clean_word(u'panic', {}), u'panic')
def test_clean_word_replace(self):
self.assertEqual(self.msg._clean_word(u'panic', {u'panic' : u'don\'t'}), u'don\'t')
def test_clean_word_replace_case(self):
self.assertEqual(self.msg._clean_word(u'Panic', {u'panic' : u'don\'t'}), u'don\'t')
def test_clean_word_replace_special_chars(self):
self.assertEqual(self.msg._clean_word(u'Pánico', {u'pánico' : u'don\'t'}), u'don\'t')
def test_clean_word_surrounded(self):
self.assertEqual(self.msg._clean_word(u'*Pánico*?', {u'pánico' : u'don\'t'}), u'*don\'t*?')
def test_get_clean_payload(self):
words = self.config['forbidden_words']
payload = self.get_clean_text(words)
for word in words.keys():
self.assertFalse(word in payload, 'word %s was not removed' % word)
for replacement in words.values():
self.assertTrue(replacement in payload, 'word %s was not inserted' % word)
def test_walk(self):
for mail in load_all_msg():
list(mail.walk())
|
sirech/deliver
|
deliver/tests/converter/test_converter.py
|
Python
|
mit
| 5,147
|
"""Create Common Workflow Language (CWL) runnable files and tools from a world object.
"""
import copy
import functools
import json
import math
import operator
import os
import tarfile
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import defs, workflow
from bcbio.distributed import objectstore, resources
INTEGRATION_MAP = {"keep:": "arvados", "s3:": "s3", "sbg:": "sbgenomics",
"dx:": "dnanexus"}
def from_world(world, run_info_file, integrations=None):
base = utils.splitext_plus(os.path.basename(run_info_file))[0]
out_dir = utils.safe_makedir("%s-workflow" % (base))
out_file = os.path.join(out_dir, "main-%s.cwl" % (base))
samples = [xs[0] for xs in world] # unpack world data objects
analyses = list(set([x["analysis"] for x in samples]))
assert len(analyses) == 1, "Only support writing CWL for a single analysis type: %s" % analyses
try:
workflow_fn = defs.workflows[analyses[0].lower()]
except KeyError:
raise NotImplementedError("Unsupported CWL analysis type: %s" % analyses[0])
prep_cwl(samples, workflow_fn, out_dir, out_file, integrations)
def _cwl_workflow_template(inputs, top_level=False):
"""Retrieve CWL inputs shared amongst different workflows.
"""
ready_inputs = []
for inp in inputs:
cur_inp = copy.deepcopy(inp)
for attr in ["source", "valueFrom", "wf_duplicate"]:
cur_inp.pop(attr, None)
if top_level:
cur_inp = workflow._flatten_nested_input(cur_inp)
cur_inp = _clean_record(cur_inp)
ready_inputs.append(cur_inp)
return {"class": "Workflow",
"cwlVersion": "v1.0",
"hints": [],
"requirements": [{"class": "EnvVarRequirement",
"envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]},
{"class": "ScatterFeatureRequirement"},
{"class": "SubworkflowFeatureRequirement"}],
"inputs": ready_inputs,
"outputs": [],
"steps": []}
def _add_disk_estimates(cwl_res, inputs, file_estimates, disk):
"""Add disk usage estimates to CWL ResourceRequirement.
Based on inputs (which need to be staged) and disk
specifications (which estimate outputs).
"""
if not disk:
disk = {}
if file_estimates:
total_estimate = 0
for key, multiplier in disk.items():
if key in file_estimates:
total_estimate += int(multiplier * file_estimates[key])
for inp in inputs:
scale = 2.0 if inp.get("type") == "array" else 1.0
if workflow.is_cwl_record(inp):
for f in _get_record_fields(inp):
if f["name"] in file_estimates:
total_estimate += file_estimates[f["name"]] * scale
elif inp["id"] in file_estimates:
total_estimate += file_estimates[inp["id"]] * scale
if total_estimate:
# scale total estimate to allow extra room, round to integer
total_estimate = int(math.ceil(total_estimate * 1.5))
cwl_res["tmpdirMin"] = total_estimate
cwl_res["outdirMin"] += total_estimate
return cwl_res
def _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, step_cores, samples):
out_file = os.path.join(step_dir, "%s.cwl" % name)
resource_cores, mem_gb_per_core = resources.cpu_and_memory((programs or []) + ["default"], samples)
cores = min([step_cores, resource_cores]) if step_cores else resource_cores
mem_mb_total = int(mem_gb_per_core * cores * 1024)
bcbio_docker_disk = 1 * 1024 # Minimum requirements for bcbio Docker image
cwl_res = {"class": "ResourceRequirement",
"coresMin": cores, "ramMin": mem_mb_total, "outdirMin": bcbio_docker_disk}
cwl_res = _add_disk_estimates(cwl_res, inputs, file_estimates, disk)
docker_image = "bcbio/bcbio" if image == "bcbio" else "quay.io/bcbio/%s" % image
docker = {"class": "DockerRequirement", "dockerPull": docker_image, "dockerImageId": docker_image}
out = {"class": "CommandLineTool",
"cwlVersion": "v1.0",
"baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
"requirements": [],
"hints": [docker, cwl_res],
"arguments": [],
"inputs": [],
"outputs": []}
if programs:
def resolve_package(p):
out = {}
parts = p.split("=")
if len(parts) == 2:
out["package"] = parts[0]
out["version"] = [parts[1]]
else:
out["package"] = p
out["specs"] = ["https://anaconda.org/bioconda/%s" % out["package"]]
return out
out["hints"].append({"class": "SoftwareRequirement",
"packages": [resolve_package(p) for p in programs]})
# Use JSON for inputs, rather than command line arguments
# Correctly handles multiple values and batching across CWL runners
use_commandline_args = False
out["requirements"] += [{"class": "InlineJavascriptRequirement"},
{"class": "InitialWorkDirRequirement",
"listing": [{"entryname": "cwl.inputs.json",
"entry": "$(JSON.stringify(inputs))"}]}]
out["arguments"] += [{"position": 0, "valueFrom":
"sentinel_runtime=cores,$(runtime['cores']),ram,$(runtime['ram'])"},
"sentinel_parallel=%s" % parallel,
"sentinel_outputs=%s" % ",".join([_get_sentinel_val(v) for v in outputs]),
"sentinel_inputs=%s" % ",".join(["%s:%s" %
(workflow.get_base_id(v["id"]),
"record" if workflow.is_cwl_record(v) else "var")
for v in inputs])]
for i, inp in enumerate(inputs):
base_id = workflow.get_base_id(inp["id"])
inp_tool = copy.deepcopy(inp)
inp_tool["id"] = base_id
if inp.get("wf_duplicate"):
inp_tool["id"] += "_toolinput"
for attr in ["source", "valueFrom", "wf_duplicate"]:
inp_tool.pop(attr, None)
if _is_scatter_parallel(parallel) and _do_scatter_var(inp, parallel):
inp_tool = workflow._flatten_nested_input(inp_tool)
if use_commandline_args:
inp_binding = {"prefix": "%s=" % base_id,
"separate": False, "itemSeparator": ";;", "position": i}
inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
else:
inp_binding = None
inp_tool = _place_secondary_files(inp_tool, inp_binding)
inp_tool = _clean_record(inp_tool)
out["inputs"].append(inp_tool)
for outp in outputs:
outp_tool = copy.deepcopy(outp)
outp_tool = _clean_record(outp_tool)
outp_tool["id"] = workflow.get_base_id(outp["id"])
out["outputs"].append(outp_tool)
with open(out_file, "w") as out_handle:
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return os.path.join("steps", os.path.basename(out_file))
def _clean_record(rec):
"""Remove secondary files from record fields, which are currently not supported.
To be removed later when secondaryFiles added to records.
"""
if workflow.is_cwl_record(rec):
def _clean_fields(d):
if isinstance(d, dict):
if "fields" in d:
out = []
for f in d["fields"]:
f = utils.deepish_copy(f)
f.pop("secondaryFiles", None)
out.append(f)
d["fields"] = out
return d
else:
out = {}
for k, v in d.items():
out[k] = _clean_fields(v)
return out
else:
return d
return _clean_fields(rec)
else:
return rec
def _get_record_fields(d):
"""Get field names from a potentially nested record.
"""
if isinstance(d, dict):
if "fields" in d:
return d["fields"]
else:
for v in d.values():
fields = _get_record_fields(v)
if fields:
return fields
def _get_sentinel_val(v):
"""Retrieve expected sentinel value for an output, expanding records.
"""
out = workflow.get_base_id(v["id"])
if workflow.is_cwl_record(v):
out += ":%s" % ";".join([x["name"] for x in _get_record_fields(v)])
return out
def _place_input_binding(inp_tool, inp_binding, parallel):
"""Check nesting of variables to determine where to place the input binding.
We want to allow having multiple files together (like fasta_indices), combined
with the itemSeparator, but also support having multiple samples where we pass
things independently.
"""
if (parallel in ["multi-combined", "multi-batch", "batch-split", "batch-parallel",
"batch-merge", "batch-single"] and
tz.get_in(["type", "type"], inp_tool) == "array"):
inp_tool["type"]["inputBinding"] = inp_binding
else:
inp_tool["inputBinding"] = inp_binding
return inp_tool
def _place_secondary_files(inp_tool, inp_binding=None):
"""Put secondaryFiles at the level of the File item to ensure indexes get passed.
"""
def _is_file(val):
return (val == "File" or (isinstance(val, (list, tuple)) and "File" in val))
secondary_files = inp_tool.pop("secondaryFiles", None)
if secondary_files:
key = []
while (not _is_file(tz.get_in(key + ["type"], inp_tool))
and not _is_file(tz.get_in(key + ["items"], inp_tool))
and not _is_file(tz.get_in(key + ["items", "items"], inp_tool))):
key.append("type")
if tz.get_in(key, inp_tool):
inp_tool["secondaryFiles"] = secondary_files
elif inp_binding:
nested_inp_binding = copy.deepcopy(inp_binding)
nested_inp_binding["prefix"] = "ignore="
nested_inp_binding["secondaryFiles"] = secondary_files
inp_tool = tz.update_in(inp_tool, key, lambda x: nested_inp_binding)
return inp_tool
def _is_scatter_parallel(parallel):
return parallel.endswith("-parallel")
def _do_scatter_var(v, parallel):
"""Logic for scattering a variable.
"""
# For batches, scatter records only at the top level (double nested)
if parallel.startswith("batch") and workflow.is_cwl_record(v):
return (tz.get_in(["type", "type"], v) == "array" and
tz.get_in(["type", "type", "type"], v) == "array")
# Otherwise, scatter arrays
else:
return (tz.get_in(["type", "type"], v) == "array")
def _step_template(name, run_file, inputs, outputs, parallel, scatter=None):
"""Templating function for writing a step to avoid repeating namespaces.
"""
scatter_inputs = []
sinputs = []
for inp in inputs:
step_inp = {"id": workflow.get_base_id(inp["id"]), "source": inp["id"]}
if inp.get("wf_duplicate"):
step_inp["id"] += "_toolinput"
for attr in ["source", "valueFrom"]:
if attr in inp:
step_inp[attr] = inp[attr]
sinputs.append(step_inp)
# scatter on inputs from previous processes that have been arrayed
if (_is_scatter_parallel(parallel) and (_do_scatter_var(inp, parallel)
or (scatter and inp["id"] in scatter))):
scatter_inputs.append(step_inp["id"])
out = {"run": run_file,
"id": name,
"in": sinputs,
"out": [{"id": workflow.get_base_id(output["id"])} for output in outputs]}
if _is_scatter_parallel(parallel):
assert scatter_inputs, "Did not find items to scatter on: %s" % name
out.update({"scatterMethod": "dotproduct",
"scatter": scatter_inputs})
return out
def prep_cwl(samples, workflow_fn, out_dir, out_file, integrations=None):
"""Output a CWL description with sub-workflows and steps.
"""
step_dir = utils.safe_makedir(os.path.join(out_dir, "steps"))
variables, keyvals = _flatten_samples(samples, out_file, integrations)
file_estimates = _calc_input_estimates(keyvals, integrations)
out = _cwl_workflow_template(variables)
parent_wfs = []
steps, wfoutputs = workflow_fn(samples)
used_inputs = set([])
for cur in workflow.generate(variables, steps, wfoutputs):
if cur[0] == "step":
_, name, parallel, inputs, outputs, image, programs, disk, cores = cur
step_file = _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, cores, samples)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "upload":
for output in cur[1]:
wf_output = copy.deepcopy(output)
if "outputSource" not in wf_output:
wf_output["outputSource"] = wf_output.pop("source")
wf_output = _clean_record(wf_output)
out["outputs"].append(wf_output)
elif cur[0] == "wf_start":
parent_wfs.append(out)
out = _cwl_workflow_template(cur[1])
elif cur[0] == "wf_finish":
_, name, parallel, inputs, outputs, scatter = cur
wf_out_file = "wf-%s.cwl" % name
with open(os.path.join(out_dir, wf_out_file), "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
out = parent_wfs.pop(-1)
out["steps"].append(_step_template(name, wf_out_file, inputs, outputs, parallel, scatter))
used_inputs |= set(x["id"] for x in inputs)
else:
raise ValueError("Unexpected workflow value %s" % str(cur))
with open(out_file, "w") as out_handle:
out["inputs"] = [x for x in out["inputs"] if x["id"] in used_inputs]
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
sample_json = "%s-samples.json" % utils.splitext_plus(out_file)[0]
out_clean = _clean_final_outputs(copy.deepcopy({k: v for k, v in keyvals.items() if k in used_inputs}),
integrations)
with open(sample_json, "w") as out_handle:
json.dump(out_clean, out_handle, sort_keys=True, indent=4, separators=(',', ': '))
return out_file, sample_json
def _flatten_samples(samples, base_file, integrations=None):
"""Create a flattened JSON representation of data from the bcbio world map.
"""
flat_data = []
for data in samples:
data["reference"] = _indexes_to_secondary_files(data["reference"], data["genome_build"])
cur_flat = {}
for key_path in [["analysis"], ["description"], ["rgnames"], ["config", "algorithm"],
["metadata"], ["genome_build"], ["resources"],
["files"], ["reference"], ["genome_resources"], ["vrn_file"]]:
cur_key = "__".join(key_path)
for flat_key, flat_val in _to_cwldata(cur_key, tz.get_in(key_path, data)):
cur_flat[flat_key] = flat_val
flat_data.append(cur_flat)
out = {}
for key in sorted(list(set(reduce(operator.add, [d.keys() for d in flat_data])))):
# Periods in keys cause issues with WDL and some CWL implementations
clean_key = key.replace(".", "_")
out[clean_key] = []
for cur_flat in flat_data:
out[clean_key].append(cur_flat.get(key))
# special case for back-compatibility with fasta specifications -- yuck
if "reference__fasta__base" not in out and "reference__fasta" in out:
out["reference__fasta__base"] = out["reference__fasta"]
del out["reference__fasta"]
return _samplejson_to_inputs(out), out
def _indexes_to_secondary_files(gresources, genome_build):
"""Convert a list of genome indexes into a single file plus secondary files.
This ensures that all indices are staged together in a single directory.
"""
out = {}
for refname, val in gresources.items():
if isinstance(val, dict) and "indexes" in val:
# list of indexes -- aligners
if len(val.keys()) == 1:
indexes = val["indexes"]
if len(indexes) == 0:
raise ValueError("Did not find indexes for %s: %s" % (refname, val))
elif len(indexes) == 1:
val = {"indexes": indexes[0]}
else:
val = {"indexes": {"base": indexes[0], "indexes": indexes[1:]}}
# directory plus indexes -- snpEff
elif "base" in val and os.path.isdir(val["base"]) and len(val["indexes"]) > 0:
indexes = val["indexes"]
val = {"base": indexes[0], "indexes": indexes[1:]}
elif isinstance(val, dict) and genome_build in val:
val = _indexes_to_secondary_files(val, genome_build)
out[refname] = val
return out
def _add_suppl_info(inp, val):
"""Add supplementary information to inputs from file values.
"""
inp["type"] = _get_avro_type(val)
secondary = _get_secondary_files(val)
if secondary:
inp["secondaryFiles"] = secondary
return inp
def _get_secondary_files(val):
"""Retrieve associated secondary files.
Normalizes input values into definitions of available secondary files.
"""
out = []
if isinstance(val, (tuple, list)):
for x in val:
for s in _get_secondary_files(x):
if s and s not in out:
out.append(s)
elif isinstance(val, dict) and (val.get("class") == "File" or "File" in val.get("class")):
if "secondaryFiles" in val:
for sf in [x["path"] for x in val["secondaryFiles"]]:
rext = _get_relative_ext(val["path"], sf)
if rext and rext not in out:
out.append(rext)
return out
def _get_relative_ext(of, sf):
"""Retrieve relative extension given the original and secondary files.
"""
def half_finished_trim(orig, prefix):
return (os.path.basename(prefix).count(".") > 0 and
os.path.basename(orig).count(".") == os.path.basename(prefix).count("."))
prefix = os.path.commonprefix([sf, of])
while prefix.endswith(".") or (half_finished_trim(sf, prefix) and half_finished_trim(of, prefix)):
prefix = prefix[:-1]
exts_to_remove = of.replace(prefix, "")
ext_to_add = sf.replace(prefix, "")
# Return extensions relative to original
if not exts_to_remove or exts_to_remove.startswith("."):
return "^" * exts_to_remove.count(".") + ext_to_add
else:
raise ValueError("No cross platform way to reference complex extension: %s %s" % (sf, of))
def _get_avro_type(val):
"""Infer avro type for the current input.
"""
if isinstance(val, dict):
assert val.get("class") == "File" or "File" in val.get("class")
return "File"
elif isinstance(val, (tuple, list)):
types = []
for ctype in [_get_avro_type(v) for v in val]:
if isinstance(ctype, dict):
nested_types = [x["items"] for x in types if isinstance(x, dict)]
if ctype["items"] not in nested_types:
types.append(ctype)
elif isinstance(ctype, (list, tuple)):
for x in ctype:
if x not in types:
types.append(x)
elif ctype not in types:
types.append(ctype)
# handle empty types, allow null or a string "null" sentinel
if len(types) == 0:
types = ["null", "string"]
# collapse arrays for multiple types
if len(types) > 1 and all(isinstance(t, dict) and t["type"] == "array" for t in types):
types = [{"type": "array", "items": [t["items"] for t in types]}]
return {"type": "array", "items": (types[0] if len(types) == 1 else types)}
elif val is None:
return ["null", "string"]
# encode booleans as string True/False and unencode on other side
elif isinstance(val, bool) or isinstance(val, basestring) and val.lower() in ["true", "false", "none"]:
return ["string", "null", "boolean"]
elif isinstance(val, int):
return "long"
elif isinstance(val, float):
return "double"
else:
return "string"
def _samplejson_to_inputs(svals):
"""Convert sample output into inputs for CWL configuration files, with types.
"""
out = []
for key, val in svals.items():
out.append(_add_suppl_info({"id": "%s" % key}, val))
return out
def _to_cwldata(key, val):
"""Convert nested dictionary into CWL data, flatening and marking up files.
Moves file objects to the top level, enabling insertion in CWL inputs/outputs.
"""
out = []
if isinstance(val, dict):
if len(val) == 2 and "base" in val and "indexes" in val:
if len(val["indexes"]) > 0 and val["base"] == val["indexes"][0]:
out.append(("%s__indexes" % key, _item_to_cwldata(val["base"])))
else:
out.append((key, _to_cwlfile_with_indexes(val)))
# Dump shared nested keys like resources as a JSON string
elif key in workflow.ALWAYS_AVAILABLE:
out.append((key, _item_to_cwldata(json.dumps(val))))
else:
remain_val = {}
for nkey, nval in val.items():
cur_nkey = "%s__%s" % (key, nkey)
cwl_nval = _item_to_cwldata(nval)
if isinstance(cwl_nval, dict):
out.extend(_to_cwldata(cur_nkey, nval))
elif key in workflow.ALWAYS_AVAILABLE:
remain_val[nkey] = nval
else:
out.append((cur_nkey, cwl_nval))
if remain_val:
out.append((key, json.dumps(remain_val, sort_keys=True, separators=(',', ':'))))
else:
out.append((key, _item_to_cwldata(val)))
return out
def _to_cwlfile_with_indexes(val):
"""Convert reads with ready to go indexes into the right CWL object.
Identifies the top level directory and creates a tarball, avoiding
trying to handle complex secondary setups which are not cross platform.
Skips doing this for reference files, which take up too much time and
space to unpack multiple times.
"""
if val["base"].endswith(".fa") and any([x.endswith(".fa.fai") for x in val["indexes"]]):
return _item_to_cwldata(val["base"])
else:
dirname = os.path.dirname(val["base"])
assert all([x.startswith(dirname) for x in val["indexes"]])
return {"class": "File", "path": _directory_tarball(dirname)}
def _item_to_cwldata(x):
""""Markup an item with CWL specific metadata.
"""
if isinstance(x, (list, tuple)):
return [_item_to_cwldata(subx) for subx in x]
elif (x and isinstance(x, basestring) and
(((os.path.isfile(x) or os.path.isdir(x)) and os.path.exists(x)) or
objectstore.is_remote(x))):
if os.path.isfile(x) or objectstore.is_remote(x):
out = {"class": "File", "path": x}
if x.endswith(".bam"):
out["secondaryFiles"] = [{"class": "File", "path": x + ".bai"}]
elif x.endswith((".vcf.gz", ".bed.gz")):
out["secondaryFiles"] = [{"class": "File", "path": x + ".tbi"}]
elif x.endswith(".fa"):
secondary = [x + ".fai", os.path.splitext(x)[0] + ".dict"]
secondary = [y for y in secondary if os.path.exists(y) or objectstore.is_remote(x)]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": y} for y in secondary]
elif x.endswith(".fa.gz"):
secondary = [x + ".fai", x + ".gzi", x.replace(".fa.gz", "") + ".dict"]
secondary = [y for y in secondary if os.path.exists(y) or objectstore.is_remote(x)]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": y} for y in secondary]
elif x.endswith(".fq.gz") or x.endswith(".fastq.gz"):
secondary = [x + ".gbi"]
secondary = [y for y in secondary if os.path.exists(y) or objectstore.is_remote(x)]
if secondary:
out["secondaryFiles"] = [{"class": "File", "path": y} for y in secondary]
else:
out = {"class": "File", "path": _directory_tarball(x)}
return out
elif isinstance(x, bool):
return str(x)
else:
return x
def _directory_tarball(dirname):
"""Create a tarball of a complex directory, avoiding complex secondaryFiles.
Complex secondary files do not work on multiple platforms and are not portable
to WDL, so for now we create a tarball that workers will unpack.
"""
assert os.path.isdir(dirname)
base_dir, tarball_dir = os.path.split(dirname)
while base_dir and not os.path.exists(os.path.join(base_dir, "seq")):
base_dir, extra_tarball = os.path.split(base_dir)
tarball_dir = os.path.join(extra_tarball, tarball_dir)
tarball = os.path.join(base_dir, "%s-wf.tar.gz" % (tarball_dir.replace(os.path.sep, "--")))
if not utils.file_exists(tarball):
with utils.chdir(base_dir):
with tarfile.open(tarball, "w:gz") as tar:
tar.add(tarball_dir)
return tarball
def _clean_final_outputs(keyvals, integrations=None):
def clean_path(integrations, x):
retriever = _get_retriever(x, integrations)
if retriever:
return retriever.clean_file(x)
else:
return x
def null_to_string(x):
"""Convert None values into the string 'null'
Required for platforms like SevenBridges without null support from inputs.
"""
return "null" if x is None else x
keyvals = _adjust_items(keyvals, null_to_string)
keyvals = _adjust_files(keyvals, functools.partial(clean_path, integrations))
return keyvals
def _adjust_items(xs, adjust_fn):
if isinstance(xs, (list, tuple)):
return [_adjust_items(x, adjust_fn) for x in xs]
elif isinstance(xs, dict):
out = {}
for k, v in xs.items():
out[k] = _adjust_items(v, adjust_fn)
return out
else:
return adjust_fn(xs)
def _adjust_files(xs, adjust_fn):
"""Walk over key/value, tuples applying adjust_fn to files.
"""
if isinstance(xs, dict):
if "path" in xs:
out = {}
out["path"] = adjust_fn(xs["path"])
for k, vs in xs.items():
if k != "path":
out[k] = _adjust_files(vs, adjust_fn)
return out
else:
out = {}
for k, vs in xs.items():
out[k] = _adjust_files(vs, adjust_fn)
return out
elif isinstance(xs, (list, tuple)):
return [_adjust_files(x, adjust_fn) for x in xs]
else:
return xs
def _calc_input_estimates(keyvals, integrations=None):
"""Calculate estimations of input file sizes for disk usage approximation.
These are current dominated by fastq/BAM sizes, so estimate based on that.
"""
out = {}
for key, val in keyvals.items():
size = _calc_file_size(val, 0, integrations)
if size:
out[key] = size
return out
def _calc_file_size(val, depth, integrations):
if isinstance(val, (list, tuple)):
sizes = [_calc_file_size(x, depth + 1, integrations) for x in val]
sizes = [x for x in sizes if x]
if sizes:
# Top level, biggest item, otherwise all files together
return max(sizes) if depth == 0 else sum(sizes)
elif isinstance(val, dict) and "path" in val:
return _get_file_size(val["path"], integrations)
return None
def _get_retriever(path, integrations):
if path.startswith(tuple(INTEGRATION_MAP.keys())):
return integrations.get(INTEGRATION_MAP[path.split(":")[0] + ":"])
def _get_file_size(path, integrations):
"""Return file size in megabytes, including querying remote integrations
"""
retriever = _get_retriever(path, integrations)
if retriever:
return retriever.file_size(path)
elif os.path.exists(path):
return os.path.getsize(path) / (1024.0 * 1024.0)
|
biocyberman/bcbio-nextgen
|
bcbio/cwl/create.py
|
Python
|
mit
| 29,386
|
#!/usr/bin/env python
from random import randrange, choice
from string import ascii_lowercase as lc
# from sys import maxint
# 64 bit has a longer maxint
from time import ctime
tlds = ('com', 'edu', 'net', 'org', 'gov')
for i in xrange(randrange(5, 11)):
dtint = randrange(2**32) # pick date
dtstr = ctime(dtint) # date string
llen = randrange(4, 8) # login is shorter
login =''.join(choice(lc) for j in range(llen))
dlen = randrange(llen,13) # domain is longer
dom =''.join(choice(lc) for j in xrange(dlen))
print '%s::%s@%s.%s::%d-%d-%d' %(dtstr, login,
dom, choice(tlds), dtint, llen, dlen)
|
MarsBighead/mustang
|
Python/gendata.py
|
Python
|
mit
| 658
|
import json
import pika
class Consumer(object):
"""This class connects to RabbitMQ, binds an 'exchange' then begins receiving \
messages. It does not respond to the sender of the message, it only sends an \
acknowledgement."""
def __init__(self, rabbit_url, exchange, exchange_type, queue, routing_key, action):
self._exchange = exchange
self._exchange_type = exchange_type
self._queue = queue
self._routing_key = routing_key
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = rabbit_url
self._action = action
def _connect(self):
return pika.SelectConnection(
pika.URLParameters(self._url),
self._on_connection_open,
stop_ioloop_on_close=False)
def _close_connection(self):
self._connection.close()
def _add_on_connection_close_callback(self):
self._connection.add_on_close_callback(self._on_connection_closed)
def _on_connection_closed(self, connection, reply_code, reply_text):
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
self._connection.add_timeout(5, self._reconnect)
def _on_connection_open(self, unused_connection):
self._add_on_connection_close_callback()
self._open_channel()
def _reconnect(self):
self._connection.ioloop.stop()
if not self._closing:
self._connection = self._connect()
self._connection.ioloop.start()
def _add_on_channel_close_callback(self):
self._channel.add_on_close_callback(self._on_channel_closed)
def _on_channel_closed(self, channel, reply_code, reply_text):
self._connection.close()
def _on_channel_open(self, channel):
self._channel = channel
self._add_on_channel_close_callback()
self._setup_exchange(self._exchange)
def _setup_exchange(self, exchange_name):
self._channel.exchange_declare(
self._on_exchange_declareok,
exchange_name,
self._exchange_type)
def _on_exchange_declareok(self, unused_frame):
self._setup_queue(self._queue)
def _setup_queue(self, queue_name):
self._channel.queue_declare(self._on_queue_declareok, queue_name)
def _on_queue_declareok(self, method_frame):
self._channel.queue_bind(
self._on_bindok,
self._queue,
self._exchange,
self._routing_key)
def _add_on_cancel_callback(self):
self._channel.add_on_cancel_callback(self._on_consumer_cancelled)
def _on_consumer_cancelled(self, method_frame):
if self._channel:
self._channel.close()
def _acknowledge_message(self, delivery_tag):
self._channel.basic_ack(delivery_tag)
def _on_message(self, ch, basic_deliver, props, body):
self._action(json.loads(body))
self._acknowledge_message(basic_deliver.delivery_tag)
def _on_cancelok(self, unused_frame):
self._close_channel()
def _stop_consuming(self):
if self._channel:
self._channel.basic_cancel(self._on_cancelok, self._consumer_tag)
def _start_consuming(self):
self._add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self._on_message, self._queue)
def _on_bindok(self, unused_frame):
self._start_consuming()
def _close_channel(self):
self._channel.close()
def _open_channel(self):
self._connection.channel(on_open_callback=self._on_channel_open)
def run(self):
self._connection = self._connect()
self._connection.ioloop.start()
def stop(self):
self._closing = True
self._stop_consuming()
self._connection.ioloop.start()
class Receiver(Consumer):
"""This class receives messages from a 'direct' exchange, where only one consumer \
will receive the message."""
def __init__(self, rabbit_url, exchange, queue, routing_key, action):
super(Receiver, self).__init__(
rabbit_url=rabbit_url,
exchange=exchange,
exchange_type='direct',
queue=queue,
routing_key=routing_key,
action=action)
class Listener(Consumer):
"""This class receives messages from a 'fanout' exchange, where all consumers \
will receive the message"""
def __init__(self, rabbit_url, exchange, queue, routing_key, action):
super(Listener, self).__init__(
rabbit_url=rabbit_url,
exchange=exchange,
exchange_type='fanout',
queue=queue,
routing_key=routing_key,
action=action)
|
projectweekend/Pika-Pack
|
pika_pack/async.py
|
Python
|
mit
| 4,801
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='pysnap',
version='0.1.1',
description='Snapchat API client in Python',
long_description=open('README.md').read(),
author='Martin Polden',
author_email='martin.polden@gmail.com',
url='https://github.com/martinp/pysnap',
packages=['pysnap'],
scripts=['bin/get_snaps.py', 'bin/get_stories.py'],
install_requires=[
'docopt>=0.6.1',
'requests>=2.2.1',
'cryptography>=1.2.2',
],
license=open('LICENSE').read()
)
|
martinp/pysnap
|
setup.py
|
Python
|
mit
| 610
|
# -*- coding: utf-8 -*-
import scrapy
class NamedayItem(scrapy.Item):
day = scrapy.Field()
month = scrapy.Field()
official_names = scrapy.Field()
swedish_names = scrapy.Field()
same_names = scrapy.Field()
orthodox_names = scrapy.Field()
unofficial_names = scrapy.Field()
|
spedepekka/finnish-namedays
|
extractor/items.py
|
Python
|
mit
| 301
|
# -*- coding: utf-8 -*-
#
# statistical physics documentation build configuration file, created by
# sphinx-quickstart on Tue May 27 11:24:59 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '_themes/'))
# Only for bootstrap theme. comment it when using other themes
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('exts'))
# Import alabaster theme
# import alabaster
# html_theme_path = [alabaster.get_path()]
# html_theme_path = ["_themes",]
# html_theme = 'alabaster'
html_sidebars = {
'**': [
# 'about.html',
# 'navigation.html',
# 'globaltoc.html',
# 'relations.html',
# 'searchbox.html',
# 'donate.html',
# 'chat.html',
# 'localtoc.html',
# 'sourcelink.html',
# 'searchbox.html'
]
}
html_theme_options = {
'github_user': 'NeuPhysics',
'github_repo': 'statisticalphysics',
'gratipay_user': 'emptymalei',
'analytics_id': 'UA-44466929-2',
'description': 'Statistical Physics',
}
# theme_canonical_url = 'http://statisticalphysics.leima.is/' # Not working, please find themes/theme.conf and change canonical url there.
#theme_canonical_url = 'http://statisticalphysics.leima.is/'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
# 'numfig',
"math_dollar",
"sphinx_sitemap"
]
html_baseurl = 'https://statisticalphysics.leima.is/'
latex_preamble_data = r"""
\usepackage{hyperref}
\usepackage{mathrsfs}
\usepackage{color}
\usepackage{xcolor}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{slashed}
\usepackage{graphicx}
\usepackage{graphics}
% Fix figure too large problem
%http://tex.stackexchange.com/questions/83020/set-standard-default-scaling-of-includegraphics
\setkeys{Gin}{width=0.85\textwidth}
%\setkeys{Gin}{width=.65\csname Gin@nat@width\endcsname,keepaspectratio}
%\let\ORIincludegraphics\includegraphics
%\renewcommand{\includegraphics}[2][]{\ORIincludegraphics[width=0.85,#1]{#2}}
% Adding Math Equations to Section Titles Leads to Error
%https://www.topbug.net/blog/2015/12/10/a-collection-of-issues-about-the-latex-output-in-sphinx-and-the-solutions/
\usepackage{etoolbox}
\robustify\(
\robustify\)
% fix Footnotes Numbering Does Not Reset on Every Page
%https://www.topbug.net/blog/2015/12/10/a-collection-of-issues-about-the-latex-output-in-sphinx-and-the-solutions/
\usepackage{perpage}
\MakePerPage{footnote}
\newcommand{\overlr}[1]{\overset\leftrightarrow{#1}}
\newcommand{\overl}[1]{\overset\leftarrow{#1}}
\newcommand{\overr}[1]{\overset\rightarrow{#1}}
\usepackage{dsfont}
\usepackage{yfonts}
\def\degrees{^\circ}
\def\d{{\rm d}}
\setcounter{tocdepth}{2}
\def\sign{\mathop{\mathrm{sign}}}
\def\L{{\mathcal L}}
\def\H{{\mathcal H}}
\def\M{{\mathcal M}}
\def\matrix{}
\def\fslash#1{#1 \!\!\!/}
\def\F{{\bf F}}
\def\R{{\bf R}}
\def\J{{\bf J}}
\def\x{{\bf x}}
\def\y{{\bf y}}
\def\h{{\rm h}}
\def\a{{\rm a}}
\newcommand{\bfx}{\mbox{\boldmath $x$}}
\newcommand{\bfy}{\mbox{\boldmath $y$}}
\newcommand{\bfz}{\mbox{\boldmath $z$}}
\newcommand{\bfv}{\mbox{\boldmath $v$}}
\newcommand{\bfu}{\mbox{\boldmath $u$}}
\newcommand{\bfF}{\mbox{\boldmath $F$}}
\newcommand{\bfJ}{\mbox{\boldmath $J$}}
\newcommand{\bfU}{\mbox{\boldmath $U$}}
\newcommand{\bfY}{\mbox{\boldmath $Y$}}
\newcommand{\bfR}{\mbox{\boldmath $R$}}
\newcommand{\bfg}{\mbox{\boldmath $g$}}
\newcommand{\bfc}{\mbox{\boldmath $c$}}
\newcommand{\bfxi}{\mbox{\boldmath $\xi$}}
\newcommand{\bra}[1]{\left\langle #1\right|}
\newcommand{\ket}[1]{\left| #1\right\rangle}
\newcommand{\braket}[2]{\langle #1 \mid #2 \rangle}
\newcommand{\avg}[1]{\left< #1 \right>}
%\def\back{\!\!\!\!\!\!\!\!\!\!}
\def\back{}
\def\col#1#2{\left(\matrix{#1#2}\right)}
\def\row#1#2{\left(\matrix{#1#2}\right)}
\def\mat#1{\begin{pmatrix}#1\end{pmatrix}}
\def\matd#1#2{\left(\matrix{#1\back0\cr0\back#2}\right)}
\def\p#1#2{{\partial#1\over\partial#2}}
\def\cg#1#2#3#4#5#6{({#1},\,{#2},\,{#3},\,{#4}\,|\,{#5},\,{#6})}
\def\half{{\textstyle{1\over2}}}
\def\jsym#1#2#3#4#5#6{\left\{\matrix{
{#1}{#2}{#3}
{#4}{#5}{#6}
}\right\}}
\def\diag{\hbox{diag}}
\font\dsrom=dsrom10
\def\one{\hbox{\dsrom 1}}
\def\res{\mathop{\mathrm{Res}}}
\def\mathnot#1{\text{"$#1$"}}
%See Character Table for cmmib10:
%http://www.math.union.edu/~dpvc/jsmath/download/extra-fonts/cmmib10/cmmib10.html
\font\mib=cmmib10
\def\balpha{\hbox{\mib\char"0B}}
\def\bbeta{\hbox{\mib\char"0C}}
\def\bgamma{\hbox{\mib\char"0D}}
\def\bdelta{\hbox{\mib\char"0E}}
\def\bepsilon{\hbox{\mib\char"0F}}
\def\bzeta{\hbox{\mib\char"10}}
\def\boldeta{\hbox{\mib\char"11}}
\def\btheta{\hbox{\mib\char"12}}
\def\biota{\hbox{\mib\char"13}}
\def\bkappa{\hbox{\mib\char"14}}
\def\blambda{\hbox{\mib\char"15}}
\def\bmu{\hbox{\mib\char"16}}
\def\bnu{\hbox{\mib\char"17}}
\def\bxi{\hbox{\mib\char"18}}
\def\bpi{\hbox{\mib\char"19}}
\def\brho{\hbox{\mib\char"1A}}
\def\bsigma{\hbox{\mib\char"1B}}
\def\btau{\hbox{\mib\char"1C}}
\def\bupsilon{\hbox{\mib\char"1D}}
\def\bphi{\hbox{\mib\char"1E}}
\def\bchi{\hbox{\mib\char"1F}}
\def\bpsi{\hbox{\mib\char"20}}
\def\bomega{\hbox{\mib\char"21}}
\def\bvarepsilon{\hbox{\mib\char"22}}
\def\bvartheta{\hbox{\mib\char"23}}
\def\bvarpi{\hbox{\mib\char"24}}
\def\bvarrho{\hbox{\mib\char"25}}
\def\bvarphi{\hbox{\mib\char"27}}
%how to use:
%$$\alpha\balpha$$
%$$\beta\bbeta$$
%$$\gamma\bgamma$$
%$$\delta\bdelta$$
%$$\epsilon\bepsilon$$
%$$\zeta\bzeta$$
%$$\eta\boldeta$$
%$$\theta\btheta$$
%$$\iota\biota$$
%$$\kappa\bkappa$$
%$$\lambda\blambda$$
%$$\mu\bmu$$
%$$\nu\bnu$$
%$$\xi\bxi$$
%$$\pi\bpi$$
%$$\rho\brho$$
%$$\sigma\bsigma$$
%$$\tau\btau$$
%$$\upsilon\bupsilon$$
%$$\phi\bphi$$
%$$\chi\bchi$$
%$$\psi\bpsi$$
%$$\omega\bomega$$
%
%$$\varepsilon\bvarepsilon$$
%$$\vartheta\bvartheta$$
%$$\varpi\bvarpi$$
%$$\varrho\bvarrho$$
%$$\varphi\bvarphi$$
%small font
\font\mibsmall=cmmib7
\def\bsigmasmall{\hbox{\mibsmall\char"1B}}
\def\Tr{\hbox{Tr}\,}
\def\Arg{\hbox{Arg}\,}
\def\atan{\hbox{atan}\,}
\def\cosh{\hbox{cosh}\,}
"""
pngmath_latex_preamble = latex_preamble_data
latex_elements = {"preamble": latex_preamble_data}
rst_prolog = """
.. role:: strike
:class: strike
.. role:: highlight-text
:class: highlight-text
.. |nbsp| unicode:: 0xA0
:trim:
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Statistical Physics'
copyright = u'2021, Lei Ma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '3.14159'
# The full version, including alpha/beta/rc tags.
release = '3.14159'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
numfig = True
html_theme = 'bootstrap'
html_theme_options = {
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Chapters",
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Sections",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
# 'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo". Previously "flatly".
'bootswatch_theme': "readable",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
# 'navbar_class': "navbar navbar-inverse",
'navbar_class': "navbar navbar-inverse",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
}
# For bootstrap theme:
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# bootswatch_theme = "cosmo"
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# html_theme = "sphinx_rtd_theme"
# html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Statistical Physics Notes'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'StatisticalPhysicsdoc'
# -- Options for LaTeX output ---------------------------------------------
## I grabbed from https://github.com/jterrace/sphinxtr/blob/master/conf.py for this additional_preamble config
# ADDITIONAL_PREAMBLE = """
#\input{preamble._tex}
#\usepackage{sphinx}
#"""
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': ADDITIONAL_PREAMBLE,
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'statisticalphysics.tex', u'Statistical Physics Notes',
u'Lei Ma', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
latex_docclass = {
'manual': 'puthesis',
}
latex_additional_files = [
'_config/tex/puthesis.cls',
'_config/tex/preamble._tex',
# '_config/tex/footer._tex',
'_config/tex/sphinx.sty',
'_config/tex/Makefile',
# '_config/tex/refstyle.bst',
# '_refs.bib',
'_config/tex/ccicons.sty',
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statisticalphysics', u'Statistical Physics Notes',
[u'Lei Ma'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'StatisticalPhysics', u'Statistical Physics Notes',
u'Lei Ma', 'StatisticalPhysics', 'Notes for Statistical Physics',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Statistical Physics'
epub_author = u'Lei Ma'
epub_publisher = u'Lei Ma'
epub_copyright = u'2021, Lei Ma'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
emptymalei/statisticalphysics
|
conf.py
|
Python
|
mit
| 18,471
|
"""Auto-generated file, do not edit by hand. BN metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BN = PhoneMetadata(id='BN', country_code=673, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-578]\\d{6}', possible_number_pattern='\\d{7}'),
fixed_line=PhoneNumberDesc(national_number_pattern='[2-5]\\d{6}', possible_number_pattern='\\d{7}', example_number='2345678'),
mobile=PhoneNumberDesc(national_number_pattern='[78]\\d{6}', possible_number_pattern='\\d{7}', example_number='7123456'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='99[135]', possible_number_pattern='\\d{3}', example_number='991'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='([2-578]\\d{2})(\\d{4})', format='\\1 \\2')])
|
ayushgoel/FixGoogleContacts
|
phonenumbers/data/region_BN.py
|
Python
|
mit
| 1,815
|
from django.conf.urls import re_path
from .views import (
proposal_submit,
proposal_submit_kind,
proposal_detail,
proposal_edit,
proposal_speaker_manage,
proposal_cancel,
proposal_pending_join,
proposal_pending_decline,
document_create,
document_delete,
document_download,
)
urlpatterns = [
re_path(r"^submit/$", proposal_submit, name="proposal_submit"),
re_path(
r"^submit/([\w\-]+)/$",
proposal_submit_kind,
name="proposal_submit_kind",
),
re_path(r"^(\d+)/$", proposal_detail, name="proposal_detail"),
re_path(r"^(\d+)/edit/$", proposal_edit, name="proposal_edit"),
re_path(
r"^(\d+)/speakers/$",
proposal_speaker_manage,
name="proposal_speaker_manage",
),
re_path(r"^(\d+)/cancel/$", proposal_cancel, name="proposal_cancel"),
re_path(
r"^(\d+)/join/$", proposal_pending_join, name="proposal_pending_join"
),
re_path(
r"^(\d+)/decline/$",
proposal_pending_decline,
name="proposal_pending_decline",
),
re_path(
r"^(\d+)/document/create/$",
document_create,
name="proposal_document_create",
),
re_path(
r"^document/(\d+)/delete/$",
document_delete,
name="proposal_document_delete",
),
re_path(
r"^document/(\d+)/([^/]+)$",
document_download,
name="proposal_document_download",
),
]
|
pydata/conf_site
|
symposion/proposals/urls.py
|
Python
|
mit
| 1,453
|
# coding: utf-8
from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from requests.auth import HTTPBasicAuth
from .resource_mapping import RESOURCE_MAPPING
class HarvestClientAdapter(JSONAdapterMixin, TapiocaAdapter):
resource_mapping = RESOURCE_MAPPING
api_root = 'https://api.harvestapp.com/v2/'
def get_request_kwargs(self, api_params, *args, **kwargs):
params = super(HarvestClientAdapter, self).get_request_kwargs(
api_params, *args, **kwargs)
params.setdefault('headers', {}).update({
'Authorization': 'Bearer %s' % api_params.get('token', ''),
'Harvest-Account-Id': api_params.get('account_id', ''),
'User-Agent': api_params.get('user_agent', '')
})
return params
def get_iterator_list(self, response_data):
return response_data
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
pass
def response_to_native(self, response):
if response.content.strip():
return super(HarvestClientAdapter, self).response_to_native(response)
Harvest = generate_wrapper_from_adapter(HarvestClientAdapter)
|
vintasoftware/tapioca-harvest
|
tapioca_harvest/tapioca_harvest.py
|
Python
|
mit
| 1,269
|
# coding: utf-8
import datetime
import pytz
from django.test import TestCase
from ditto.core.utils import datetime_from_str
from ditto.pinboard.factories import AccountFactory, BookmarkFactory
from ditto.pinboard.templatetags import ditto_pinboard
class TemplatetagsRecentBookmarksTestCase(TestCase):
def setUp(self):
account_1 = AccountFactory(username="terry")
account_2 = AccountFactory(username="bob")
self.bookmarks_1 = BookmarkFactory.create_batch(6, account=account_1)
self.bookmarks_2 = BookmarkFactory.create_batch(6, account=account_2)
self.bookmarks_1[5].is_private = True
self.bookmarks_1[5].save()
def test_recent_bookmarks(self):
"Returns recent public bookmarks from all accounts."
bookmarks = ditto_pinboard.recent_bookmarks()
self.assertEqual(10, len(bookmarks))
# bookmarks[6] would be self.bookmarks_1[5] if [5] wasn't private.
self.assertEqual(bookmarks[6].pk, self.bookmarks_1[4].pk)
def test_recent_bookmarks_account(self):
"Only fetches recent public bookmarks from the named account."
bookmarks = ditto_pinboard.recent_bookmarks(account="terry")
self.assertEqual(5, len(bookmarks))
self.assertEqual(bookmarks[0].pk, self.bookmarks_1[4].pk)
def test_recent_bookmarks_limit(self):
bookmarks = ditto_pinboard.recent_bookmarks(limit=8)
self.assertEqual(8, len(bookmarks))
self.assertEqual(bookmarks[6].pk, self.bookmarks_1[4].pk)
class TemplatetagsDayBookmarksTestCase(TestCase):
def setUp(self):
account_1 = AccountFactory(username="terry")
account_2 = AccountFactory(username="bob")
self.bookmarks_1 = BookmarkFactory.create_batch(6, account=account_1)
self.bookmarks_2 = BookmarkFactory.create_batch(6, account=account_2)
post_time = datetime.datetime(2015, 3, 18, 12, 0, 0).replace(tzinfo=pytz.utc)
self.bookmarks_1[3].post_time = post_time
self.bookmarks_1[3].save()
self.bookmarks_1[5].is_private = True
self.bookmarks_1[5].post_time = post_time + datetime.timedelta(hours=1)
self.bookmarks_1[5].save()
self.bookmarks_2[4].post_time = post_time + datetime.timedelta(hours=2)
self.bookmarks_2[4].save()
def test_day_bookmarks(self):
"Returns public bookmarks from all accounts."
bookmarks = ditto_pinboard.day_bookmarks(datetime.date(2015, 3, 18))
self.assertEqual(2, len(bookmarks))
self.assertEqual(bookmarks[1].pk, self.bookmarks_1[3].pk)
def test_day_bookmarks_account(self):
"Only fetches public bookmarks from the named account."
bookmarks = ditto_pinboard.day_bookmarks(
datetime.date(2015, 3, 18), account="terry"
)
self.assertEqual(1, len(bookmarks))
self.assertEqual(bookmarks[0].pk, self.bookmarks_1[3].pk)
def test_day_bookmarks_none(self):
"Fetches no bookmarks when there aren't any on supplied date."
bookmarks = ditto_pinboard.recent_bookmarks(datetime.date(2015, 3, 19))
self.assertEqual(0, len(bookmarks))
class AnnualBookmarkCountsTestCase(TestCase):
def setUp(self):
account_1 = AccountFactory(username="terry")
account_2 = AccountFactory(username="bob")
# Bookmarks in 2015 and 2016 for account_1:
BookmarkFactory.create_batch(
3, post_time=datetime_from_str("2015-01-01 12:00:00"), account=account_1
)
BookmarkFactory.create_batch(
2, post_time=datetime_from_str("2016-01-01 12:00:00"), account=account_1
)
# And one for account_2 in 2015:
BookmarkFactory(
account=account_2, post_time=datetime_from_str("2015-01-01 12:00:00")
)
# And one private bookmark for account_1 in 2015:
BookmarkFactory(
account=account_1,
is_private=True,
post_time=datetime_from_str("2015-01-01 12:00:00"),
)
def test_response(self):
"Returns correct data for all users."
bookmarks = ditto_pinboard.annual_bookmark_counts()
self.assertEqual(len(bookmarks), 2)
self.assertEqual(bookmarks[0]["year"], 2015)
self.assertEqual(bookmarks[0]["count"], 4)
self.assertEqual(bookmarks[1]["year"], 2016)
self.assertEqual(bookmarks[1]["count"], 2)
def test_response_for_user(self):
"Returns correct data for one user."
bookmarks = ditto_pinboard.annual_bookmark_counts(account="terry")
self.assertEqual(len(bookmarks), 2)
self.assertEqual(bookmarks[0]["year"], 2015)
self.assertEqual(bookmarks[0]["count"], 3)
self.assertEqual(bookmarks[1]["year"], 2016)
self.assertEqual(bookmarks[1]["count"], 2)
def test_empty_years(self):
"It should include years for which there are no bookmarks."
# Add a photo in 2018, leaving a gap for 2017:
BookmarkFactory(post_time=datetime_from_str("2018-01-01 12:00:00"))
bookmarks = ditto_pinboard.annual_bookmark_counts()
self.assertEqual(len(bookmarks), 4)
self.assertEqual(bookmarks[2]["year"], 2017)
self.assertEqual(bookmarks[2]["count"], 0)
class PopularBookmarkTagsTestCase(TestCase):
def test_tags(self):
"Contains the correct data"
bookmark_1 = BookmarkFactory()
bookmark_1.tags.set(["fish", "carp"])
bookmark_2 = BookmarkFactory()
bookmark_2.tags.set(["fish", "cod"])
tags = ditto_pinboard.popular_bookmark_tags()
self.assertEqual(len(tags), 3)
self.assertEqual(tags[0].name, "fish")
self.assertEqual(tags[0].num_times, 2)
self.assertEqual(tags[1].name, "carp")
self.assertEqual(tags[1].num_times, 1)
self.assertEqual(tags[2].name, "cod")
self.assertEqual(tags[2].num_times, 1)
def test_tags_privacy_bookmarks(self):
"Doesn't display tags from private bookmarks"
bookmark_1 = BookmarkFactory(is_private=True)
bookmark_1.tags.set(["fish", "carp"])
bookmark_2 = BookmarkFactory(is_private=False)
bookmark_2.tags.set(["fish", "cod"])
tags = ditto_pinboard.popular_bookmark_tags()
self.assertEqual(len(tags), 2)
self.assertEqual(tags[0].name, "fish")
self.assertEqual(tags[0].num_times, 1)
self.assertEqual(tags[1].name, "cod")
self.assertEqual(tags[1].num_times, 1)
def test_tags_privacy_tags(self):
"Doesn't display private .tags"
bookmark = BookmarkFactory()
bookmark.tags.set(["ispublic", ".notpublic", "alsopublic"])
tags = ditto_pinboard.popular_bookmark_tags()
self.assertEqual(len(tags), 2)
# Tags are ordered by popularity, so can't be sure
# which is 'alsopublic' and which is 'ispublic':
tag_names = [tag.name for tag in tags]
self.assertIn("alsopublic", tag_names)
self.assertIn("ispublic", tag_names)
def test_tags_limit_default(self):
"It should return 10 tags by default"
bookmark = BookmarkFactory()
bookmark.tags.set(["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"])
tags = ditto_pinboard.popular_bookmark_tags()
self.assertEqual(len(tags), 10)
def test_tags_limit_custom(self):
"It should return `limit` tags"
bookmark = BookmarkFactory()
bookmark.tags.set(["1", "2", "3", "4", "5"])
tags = ditto_pinboard.popular_bookmark_tags(limit=3)
self.assertEqual(len(tags), 3)
|
philgyford/django-ditto
|
tests/pinboard/test_templatetags.py
|
Python
|
mit
| 7,579
|
import os
import testinfra
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
"build-essential",
"bzip2",
"curl",
"libssl-dev",
"locales",
"ntp",
"openssl",
"software-properties-common",
"ssl-cert",
"wget",
])
def test_paquetes_esenciales(host, name):
p = host.package(name)
assert p.is_installed
@pytest.mark.parametrize('name', [
"bash",
"git",
"htop",
"silversearcher-ag",
"vim",
"tree",
"multitail",
"most",
"unzip",
])
def test_paquetes_desarrollo(host, name):
p = host.package(name)
assert p.is_installed
|
MSA-Argentina/ansible-roles
|
server-bootstrap/molecule/default/tests/test_default.py
|
Python
|
mit
| 768
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, MapCompose, TakeFirst
import datetime
DATE_OUTPUT_FORMAT = '%m/%d/%Y %H:%M:%S %Z'
BOOKING_DATE_INPUT_FORMAT = '%m/%d/%Y %H:%M:%S'
class GwinnettInmate(scrapy.Item):
county_name = scrapy.Field()
timestamp = scrapy.Field()
url = scrapy.Field()
inmate_id = scrapy.Field()
inmate_lastname = scrapy.Field()
inmate_firstname = scrapy.Field()
inmate_middlename = scrapy.Field()
inmate_sex = scrapy.Field()
inmate_race = scrapy.Field()
inmate_age = scrapy.Field()
inmate_dob = scrapy.Field()
inmate_address = scrapy.Field()
booking_timestamp = scrapy.Field()
release_timestamp = scrapy.Field()
processing_numbers = scrapy.Field()
agency = scrapy.Field()
facility = scrapy.Field()
charges = scrapy.Field()
severity = scrapy.Field()
bond_amount = scrapy.Field()
current_status = scrapy.Field()
court_dates = scrapy.Field()
days_jailed = scrapy.Field()
other = scrapy.Field()
def parse_race(race_val):
if race_val == 'B':
return GwinnettInmateLoader.Race.Black
elif race_val == 'W':
return GwinnettInmateLoader.Race.White
else:
return GwinnettInmateLoader.Race.Unknown
def parse_sex(sex_val):
if sex_val == 'FEMALE':
return GwinnettInmateLoader.Sex.Female
elif sex_val == 'MALE':
return GwinnettInmateLoader.Sex.Male
else:
return GwinnettInmateLoader.Sex.Unknown
def parse_severity(severity_val):
if severity_val == 'F':
return GwinnettInmateLoader.ChargeSeverity.Felony
elif severity_val == 'M':
return GwinnettInmateLoader.ChargeSeverity.Misdemeanor
else:
return GwinnettInmateLoader.ChargeSeverity.Unknown
def parse_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, BOOKING_DATE_INPUT_FORMAT)
def format_timestamp(timestamp):
return timestamp.isoformat(' ')
class GwinnettInmateLoader(ItemLoader):
class Race:
Asian = 'asian'
Black = 'black'
Hispanic = 'hispanic'
Middle_Eastern = 'middle-eastern'
Native_American = 'native-american'
Pacific_Islander = 'pacific-islander'
Unknown = 'unknown'
White = 'white'
class Sex:
Female = 'f'
Male = 'm'
Unknown = 'unknown'
class ChargeSeverity:
Misdemeanor = 'misdemeanor'
Felony = 'felony'
Unknown = 'unknown'
default_output_processor = TakeFirst()
timestamp_out = MapCompose(format_timestamp)
inmate_lastname_in = MapCompose(str.strip)
inmate_firstname_in = MapCompose(str.strip)
inmate_middlename_in = MapCompose(str.strip)
inmate_sex_in = MapCompose(str.strip, parse_sex)
inmate_race_in = MapCompose(str.strip, parse_race)
inmate_age_in = MapCompose(str.strip, int)
inmate_dob_in = MapCompose(str.strip)
inmate_address_in = MapCompose(str.strip)
booking_timestamp_in = MapCompose(str.strip, parse_timestamp)
booking_timestamp_out = MapCompose(format_timestamp)
release_timestamp_in = MapCompose(str.strip)
processing_numbers_in = MapCompose(str.strip)
agency_in = MapCompose(str.strip)
facility_in = MapCompose(str.strip)
charges_out = Join(' | ')
severity_in = MapCompose(str.strip, parse_severity)
severity_out = Join(' | ')
bond_amount_in = MapCompose(str.strip)
current_status_in = MapCompose(str.strip)
court_dates_in = MapCompose(str.strip)
days_jailed_in = MapCompose(str.strip)
other_in = MapCompose(str.strip)
|
lahoffm/aclu-bail-reform
|
src/webscraper/gwinnett/gwinnett/items.py
|
Python
|
mit
| 3,640
|
from django.shortcuts import redirect
from braces.views import LoginRequiredMixin
from vanilla import CreateView, DetailView
from arcade.games.forms import NewGameForm
from arcade.games.models import Game
class CreateNewGameView(LoginRequiredMixin, CreateView):
model = Game
form_class = NewGameForm
template_name = 'games/create.html'
def form_valid(self, form):
game = Game()
game.author = self.request.user
packaged_app_archive = form.cleaned_data['packaged_app_archive']
game.packaged_app.save(packaged_app_archive.name, packaged_app_archive, save=True)
return redirect(game)
class GameDetailView(DetailView):
model = Game
template_name = 'games/detail.html'
|
Osmose/arcade
|
arcade/games/views.py
|
Python
|
mit
| 735
|
import numpy as np
import pandas as pd
from collections import defaultdict
def predict(trees, examples):
n_rows, _ = examples.shape
results = pd.DataFrame(index=range(n_rows), columns=trees.keys())
results["prediction"] = pd.Series(index=range(n_rows))
for column in results.columns:
results[column] = np.uint8(255)
non_ones_columns = results.columns[-2:]
for i in range(n_rows):
for k, tree in trees.items():
results.set_value(i, k, tree.predict(examples[i]))
result = results.ix[i].drop(non_ones_columns)
val = result[result == 1].index[0] if result.sum() == 1 else results.ix[i]["ag"]
results.set_value(i, "prediction", val)
return results.as_matrix(columns=["prediction"])
def score_predictions(tree, examples, targets):
size, _ = examples.shape
score = 0
for i in range(size):
if tree.predict(examples[i]) == targets[i]:
score += 1
return score
|
MLNotWar/decision-trees-algorithm
|
src/predictor.py
|
Python
|
mit
| 972
|
"""
Django settings for wtf_proj project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
from os.path import dirname, join, abspath
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(abspath(__file__)))
TEMPLATES = join(BASE_DIR, "templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm#3%y9yvv%b@a_*xxa)qffso2^t*+a$=vx#ej*qw0vlf0el021'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["23.23.73.48", "127.0.0.1", "localhost", "wtf-dict.herokuapp.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'wtf_proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
TEMPLATES
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wtf_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# Via Whitenoise
STATIC_HOST = os.environ.get('DJANGO_STATIC_HOST', '')
STATIC_URL = STATIC_HOST + '/static/'
STATIC_ROOT = join(BASE_DIR, "staticfiles")
UNCOLLECTED_STATIC_ROOT = join(BASE_DIR, "uncollected_static")
STATICFILES_DIRS = (
UNCOLLECTED_STATIC_ROOT,
)
WHITENOISE_ROOT = join(BASE_DIR, "basic_files")
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
TimelyToga/wtf_is
|
wtf_proj/wtf_proj/settings.py
|
Python
|
mit
| 3,721
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class UserTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"attributes": null,
"is_online": true,
"is_notifiable": null,
"friendly_name": null,
"joined_channels_count": 0,
"date_created": "2016-03-24T21:05:19Z",
"date_updated": "2016-03-24T21:05:19Z",
"links": {
"user_channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels"
},
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.create(identity="identity")
values = {'Identity': "identity", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"attributes": null,
"is_online": true,
"is_notifiable": null,
"friendly_name": null,
"joined_channels_count": 0,
"date_created": "2016-03-24T21:05:19Z",
"date_updated": "2016-03-24T21:05:19Z",
"links": {
"user_channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels"
},
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.create(identity="identity")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"next_page_url": null,
"key": "users"
},
"users": [
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"attributes": null,
"is_online": true,
"is_notifiable": null,
"friendly_name": null,
"date_created": "2016-03-24T21:05:19Z",
"date_updated": "2016-03-24T21:05:19Z",
"joined_channels_count": 0,
"links": {
"user_channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels"
},
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"next_page_url": null,
"key": "users"
},
"users": []
}
'''
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"attributes": null,
"is_online": true,
"is_notifiable": null,
"friendly_name": null,
"joined_channels_count": 0,
"date_created": "2016-03-24T21:05:19Z",
"date_updated": "2016-03-24T21:05:19Z",
"links": {
"user_channels": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels"
},
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.chat.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users(sid="USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
|
tysonholub/twilio-python
|
tests/integration/chat/v1/service/test_user.py
|
Python
|
mit
| 10,034
|
def middle_way(a, b):
return [a[1],b[1]]
print(middle_way([1, 2, 3], [4, 5, 6])) # [2, 5]
print(middle_way([7, 7, 7], [3, 8, 0])) # [7, 8]
print(middle_way([5, 2, 9], [1, 4, 5])) # [2, 4]
|
frainfreeze/studying
|
projects/practice/other/022.py
|
Python
|
mit
| 190
|
# coding=utf-8
import pickle
from petsc4py import PETSc
import numpy as np
from scipy.io import savemat
# filename = 'sphere'
# with open(filename + '_pick.bin', 'rb') as input:
# unpick = pickle.Unpickler(input)
#
# viewer = PETSc.Viewer().createBinary(filename + '_M.bin', 'r')
# M = PETSc.Mat().create(comm=PETSc.COMM_WORLD)
# M.setType('dense')
# M = M.load(viewer)
#
# viewer = PETSc.Viewer().createBinary(filename + '_F.bin', 'r')
# F = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
# F = F.load(viewer)
# deltaLength = 0.05 ** np.arange(0.25, 1.05, 0.1)
# epsilon = np.arange(0.1, 2, 0.2)
# deltaLength, epsilon = np.meshgrid(deltaLength, epsilon)
# deltaLength = deltaLength.flatten()
# epsilon = epsilon.flatten()
# sphere_err = epsilon.copy()
# for i0 in range(sphere_err.size):
# d = deltaLength[i0]
# e = epsilon[i0]
# fileName = 'sphere_%d_%f_%f' % (i0, d, e)
# PETSc.Sys.Print(fileName)
# pass
# class a():
# def printme(self):
# PETSc.Sys.Print('a')
#
# class b(a):
# def printme(self):
# PETSc.Sys.Print('b')
#
# class c(a):
# def printme(self):
# PETSc.Sys.Print('c')
#
# class d(c, b):
# def notiong(self):
# pass
#
# if __name__ == '__main__':
# d1 = d()
# d1.printme()
|
pcmagic/stokes_flow
|
try_code/try_pickle.py
|
Python
|
mit
| 1,266
|
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
low = 0
high = len(nums) - 1
while high - low > 1:
mid = (low + high) // 2
left_ele = nums[mid-1]
mid_ele = nums[mid]
right_ele = nums[mid+1]
# print(left_ele, mid_ele, right_ele)
if mid_ele > left_ele:
if mid_ele > right_ele:
return mid
# right > mid > left
low = mid + 1
else: # mid < left
if mid_ele > right_ele:
# left > mid > right
high = mid - 1
else:
# left > mid, right > mid
high = mid - 1
if nums[low] > nums[high]:
return low
return high
s = Solution()
data = [
[1,2,3,1],
[1,2,1,3,5,6,4]
]
for d in data:
print(s.findPeakElement(d))
|
daicang/Leetcode-solutions
|
162-find-peak-element.py
|
Python
|
mit
| 977
|
# -*- encoding: utf-8 -*-
from supriya.tools.ugentools.InfoUGenBase import InfoUGenBase
class BlockSize(InfoUGenBase):
r'''A block size info unit generator.
::
>>> ugentools.BlockSize.ir()
BlockSize.ir()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Info UGens'
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
):
InfoUGenBase.__init__(
self,
calculation_rate=calculation_rate,
)
|
andrewyoung1991/supriya
|
supriya/tools/ugentools/BlockSize.py
|
Python
|
mit
| 545
|
from datahandle import Vault
if __name__ == '__main__':
print('This program cannot be run in DOS mode')
|
CleyFaye/FOS_View
|
fosfile/__init__.py
|
Python
|
mit
| 110
|
import numpy as np
import random
from matplotlib import pyplot as plt
from matplotlib import animation
from collections import deque
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return line,
# animation function. This is called sequentially
buffer_size = 300
buffer = deque([0]*buffer_size, maxlen=buffer_size)
class Walker:
def __init__(self,velocity,position,lower_bound,upper_bound):
self.velocity = velocity
self.position=position
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def next_point(self):
x = self.position + random.random() * self.velocity
if x > self.upper_bound:
x = 2*self.upper_bound - x
self.velocity *=-1
elif x < self.lower_bound:
x = 2* self.lower_bound - x
self.velocity *=-1
self.position = x
return x
def make_animate( buffer, walker):
def animate(i):
x = np.linspace(0, 2, buffer_size)
buffer.append(walker.next_point())
y = list(buffer)
line.set_data(x, y)
return line,
return animate
w = Walker(0.3,0,0,1)
animate = make_animate(buffer, w)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
blit=True)
plt.show()
|
meclav/whistle
|
src/playground/plottingworks.py
|
Python
|
mit
| 1,444
|
def baseurl(url):
return url[:url.rfind('/')]
def is_installed(settings):
"""
Check Django settings and verify that tinymce application is included
in INSTALLED_APPS to be sure that staticfiles will work properly and
serve required files.
"""
if not hasattr(settings, 'INSTALLED_APPS'):
raise RuntimeError('Django settings should contain INSTALLED_APPS.')
if 'tinymce' not in settings.INSTALLED_APPS:
raise RuntimeError('Add tinymce to INSTALLED_APPS in settings.')
return True
class ConfigurationError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def parse_profile(profile):
D = profile
# directionality
i = D.get('directionality', None)
if i is None:
D['directionality'] = 'ltr'
elif i not in ('ltr', 'rtl'):
raise ConfigurationError('directionality must be ltr or rtl')
# browser_spellcheck
i = D.get('browser_spellcheck', None)
if i is None:
D['browser_spellcheck'] = False
elif i not in (False, True):
raise ConfigurationError('browser_spellcheck must be True or False')
# nowrap
i = D.get('nowrap', None)
if i is None:
D['nowrap'] = False
elif i not in (False, True):
raise ConfigurationError('nowrap must be True or False')
# skin
i = D.get('skin', None)
if i is None:
D['skin'] = 'lightgray'
# theme
i = D.get('theme', None)
if i is None:
D['theme'] = 'modern'
# inline
D['inline'] = D.get('inline', False)
# convert_fonts_to_spans
D['convert_fonts_to_spans'] = D.get('convert_fonts_to_spans', True)
# element_format
i = D.get('element_format', None)
if i is None:
D['element_format'] = 'xhtml'
elif i not in ('xhtml', 'html'):
raise ConfigurationError('element_format must be xhtml or html')
# fix_list_elements
i = D.get('fix_list_elements', None)
if i is None:
D['fix_list_elements'] = False
elif i not in (False, True):
raise ConfigurationError('fix_list_elements must be True or False')
# force_p_newlines
i = D.get('force_p_newlines', None)
if i is not None and i not in (False, True):
raise ConfigurationError('force_p_newlines must be True or False')
# force_hex_style_colors
i = D.get('force_hex_style_colors', None)
if i is not None and i not in (False, True):
raise ConfigurationError('force_hex_style_colors must be True or False')
# keep_styles
i = D.get('keep_styles', None)
if i is not None and i not in (False, True):
raise ConfigurationError('keep_styles must be True or False')
# protect
i = D.get('protect', None)
if i is not None and not (isinstance(i, tuple) or isinstance(i, list)):
raise ConfigurationError('protect must be tuple or list')
# schema
i = D.get('schema', None)
if i is not None and i not in ('html4', 'html5', 'html5-strict'):
raise ConfigurationError('schema must be html4, html5 or html5-strict')
# visual
i = D.get('visual', None)
if i is not None and i not in (False, True):
raise ConfigurationError('visual must be True or False')
# custom_undo_redo_levels
i = D.get('custom_undo_redo_levels', None)
if i is not None and not isinstance(i, int):
raise ConfigurationError('custom_undo_redo_levels must be int')
# menu
i = D.get('menu', None)
if i is not None and not isinstance(i, dict):
raise ConfigurationError('menu must be dict')
# statusbar
i = D.get('statusbar', None)
if i is not None and i not in (False, True):
raise ConfigurationError('statusbar must be True or False')
# resize
i = D.get('resize', None)
if i is not None and i not in (False, True, 'both'):
raise ConfigurationError('resize must be True, False or "both"')
# convert_urls
i = D.get('convert_urls', None)
if i is not None and i not in (False, True):
raise ConfigurationError('convert_urls must be True or False')
# relative_urls
i = D.get('relative_urls', None)
if i is not None and i not in (False, True):
raise ConfigurationError('relative_urls must be True or False')
# remove_script_host
i = D.get('remove_script_host', None)
if i is not None and i not in (False, True):
raise ConfigurationError('remove_script_host must be True or False')
# document_base_url
i = D.get('document_base_url', None)
if i is not None and (not isinstance(i, str) or not i.endswith('/')):
raise ConfigurationError('document_base_url must be str and end with "/"')
# file_browser_callback_types
i = D.get('file_browser_callback_types', None)
if i is not None:
if not isinstance(i, str):
raise ConfigurationError('file_browser_callback_types must be str and combination of file, image or media')
allowed_types = ('file', 'image', 'media')
types = set(i.split(' '))
if not all([(t in allowed_types) for t in types]):
raise ConfigurationError('file_browser_callback_types must be str and combination of file, image or media')
return D
|
dani0805/django-tinymce4
|
tinymce/utils.py
|
Python
|
mit
| 5,268
|
"""Get optimal contraction sequence using netcon algorithm
Reference:
R. N. C. Pfeifer, et al.: Phys. Rev. E 90, 033315 (2014)
"""
__author__ = "Satoshi MORITA <morita@issp.u-tokyo.ac.jp>"
__date__ = "24 March 2016"
import sys
import logging
import time
import config
import itertools
class TensorFrame:
"""Tensor class for netcon.
Attributes:
rpn: contraction sequence with reverse polish notation.
bits: bits representation of contracted tensors.
bonds: list of uncontracted bonds.
is_new: a flag.
"""
def __init__(self,rpn=[],bits=0,bonds=[],cost=0.0,is_new=True):
self.rpn = rpn[:]
self.bits = bits
self.bonds = bonds
self.cost = cost
self.is_new = is_new
def __repr__(self):
return "TensorFrame({0}, bonds={1}, cost={2:.6e}, bits={3}, is_new={4})".format(
self.rpn, self.bonds, self.cost, self.bits, self.is_new)
def __str__(self):
return "{0} : bonds={1} cost={2:.6e} bits={3} new={4}".format(
self.rpn, self.bonds, self.cost, self.bits, self.is_new)
class NetconOptimizer:
def __init__(self, prime_tensors, bond_dims):
self.prime_tensors = prime_tensors
self.BOND_DIMS = bond_dims[:]
def optimize(self):
"""Find optimal contraction sequence.
Args:
tn: TensorNetwork in tdt.py
bond_dims: List of bond dimensions.
Return:
rpn: Optimal contraction sequence with reverse polish notation.
cost: Total contraction cost.
"""
tensordict_of_size = self.init_tensordict_of_size()
n = len(self.prime_tensors)
xi_min = float(min(self.BOND_DIMS))
mu_cap = 1.0
prev_mu_cap = 0.0 #>=0
while len(tensordict_of_size[-1])<1:
logging.info("netcon: searching with mu_cap={0:.6e}".format(mu_cap))
next_mu_cap = sys.float_info.max
for c in range(2,n+1):
for d1 in range(1,c//2+1):
d2 = c-d1
t1_t2_iterator = itertools.combinations(tensordict_of_size[d1].values(), 2) if d1==d2 else itertools.product(tensordict_of_size[d1].values(), tensordict_of_size[d2].values())
for t1, t2 in t1_t2_iterator:
if self.are_overlap(t1,t2): continue
if self.are_direct_product(t1,t2): continue
cost = self.get_contracting_cost(t1,t2)
bits = t1.bits ^ t2.bits
if next_mu_cap <= cost:
pass
elif mu_cap < cost:
next_mu_cap = cost
elif t1.is_new or t2.is_new or prev_mu_cap < cost:
t_old = tensordict_of_size[c].get(bits)
if t_old is None or cost < t_old.cost:
tensordict_of_size[c][bits] = self.contract(t1,t2)
prev_mu_cap = mu_cap
mu_cap = max(next_mu_cap, mu_cap*xi_min)
for s in tensordict_of_size:
for t in s.values(): t.is_new = False
logging.debug("netcon: tensor_num=" + str([ len(s) for s in tensordict_of_size]))
t_final = tensordict_of_size[-1][(1<<n)-1]
return t_final.rpn, t_final.cost
def init_tensordict_of_size(self):
"""tensordict_of_size[k][bits] == calculated lowest-cost tensor which is contraction of k+1 prime tensors and whose bits == bits"""
tensordict_of_size = [{} for size in range(len(self.prime_tensors)+1)]
for t in self.prime_tensors:
rpn = t.name
bits = 0
for i in rpn:
if i>=0: bits += (1<<i)
bonds = frozenset(t.bonds)
cost = 0.0
tensordict_of_size[1].update({bits:TensorFrame(rpn,bits,bonds,cost)})
return tensordict_of_size
def get_contracting_cost(self,t1,t2):
"""Get the cost of contraction of two tensors."""
cost = 1.0
for b in (t1.bonds | t2.bonds):
cost *= self.BOND_DIMS[b]
cost += t1.cost + t2.cost
return cost
def contract(self,t1,t2):
"""Return a contracted tensor"""
assert (not self.are_direct_product(t1,t2))
rpn = t1.rpn + t2.rpn + [-1]
bits = t1.bits ^ t2.bits # XOR
bonds = frozenset(t1.bonds ^ t2.bonds)
cost = self.get_contracting_cost(t1,t2)
return TensorFrame(rpn,bits,bonds,cost)
def are_direct_product(self,t1,t2):
"""Check if two tensors are disjoint."""
return (t1.bonds).isdisjoint(t2.bonds)
def are_overlap(self,t1,t2):
"""Check if two tensors have the same basic tensor."""
return (t1.bits & t2.bits)>0
def print_tset(self,tensors_of_size):
"""Print tensors_of_size. (for debug)"""
for level in range(len(tensors_of_size)):
for i,t in enumerate(tensors_of_size[level]):
print(level,i,t)
|
smorita/Tensordot
|
netcon.py
|
Python
|
mit
| 5,072
|
"""Tests for DecisionTree.py on data sets."""
import importlib.util
import DecisionTree
spec = importlib.util.spec_from_file_location("tester", "../common/Tester.py")
tester = importlib.util.module_from_spec(spec)
spec.loader.exec_module(tester)
def __train(training_set):
tree = DecisionTree.ClassificationTree()
tree.build(training_set)
return tree
def __evaluate(predictor, input):
return predictor.evaluate(input)
def test_iris():
print("Testing on Iris Data Set...")
print("Accuracy:", tester.test("../data sets/IrisDataSet.csv", 120, __train,
__evaluate))
def test_letter():
print("Testing on Letter Data Set...")
print("Accuracy:",
tester.test("../data sets/LetterDataSet.csv", 10000, __train,
__evaluate))
if __name__ == '__main__':
test_iris()
test_letter()
|
FelixOpolka/Statistical-Learning-Algorithms
|
decision tree/DecisionTreeTests.py
|
Python
|
mit
| 885
|
#encoding:utf-8
subreddit = 'Texans'
t_channel = '@r_texans'
def send_post(submission, r2t):
return r2t.send_simple(submission, min_upvotes_limit=100, check_dups=True)
|
Fillll/reddit2telegram
|
reddit2telegram/channels/r_texans/app.py
|
Python
|
mit
| 175
|
'''
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element.
You may assume no duplicate exists in the array.
'''
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left, right = 0, len(nums) - 1
mid = 0
while left < right:
mid = (left + right) // 2
if nums[mid] > nums[mid + 1]:
return nums[mid + 1]
elif nums[mid] > nums[right]:
left = mid + 1
else:
right = mid
return nums[mid]
if __name__ == "__main__":
assert Solution().findMin([1, 2, 3, 4, 5]) == 1
assert Solution().findMin([2, 3, 4, 5, 1]) == 1
assert Solution().findMin([5, 1, 2, 3, 4]) == 1
|
gavinfish/leetcode-share
|
python/153 Find Minimum in Rotated Sorted Array.py
|
Python
|
mit
| 899
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
def posts():
print "Reading posts..."
data = pd.read_csv("data/posts.csv", dtype={'cooked': np.str}, na_values=[],
keep_default_na=False, encoding="utf-8")
return data
|
rux-pizza/discourse-analysis
|
data.py
|
Python
|
mit
| 267
|
from collections import deque
class AhoCorasick(object):
def __init__(self, keywords):
self.adj_list = []
self.adj_list.append({
"value" : "",
"next_states" : [],
"fail_state" : 0,
"output" : []
})
self.add_keywords(keywords)
self.set_fail_transitions()
def add_keywords(self, keywords):
for keyword in keywords:
self.add_keyword(keyword)
def find_next_state(self, current_state, value):
for node in self.adj_list[current_state]["next_states"]:
if self.adj_list[node]["value"] == value:
return node
return None
def add_keyword(self, keyword):
current_state = 0
j = 0
keyword = keyword.lower()
child = self.find_next_state(current_state, keyword[j])
while child != None:
current_state = child
j += 1
if j < len(keyword):
child = self.find_next_state(current_state, keyword[j])
else:
break
for i in xrange(j, len(keyword)):
node = {
"value" : keyword[i],
"next_states" : [],
"fail_state" : 0,
"output" : []
}
self.adj_list.append(node)
self.adj_list[current_state]["next_states"].append(len(self.adj_list) - 1)
current_state = len(self.adj_list) - 1
self.adj_list[current_state]["output"].append(keyword)
def set_fail_transitions(self):
q = deque()
child = 0
for node in self.adj_list[0]["next_states"]:
q.append(node)
self.adj_list[node]["fail_state"] = 0
while q:
r = q.popleft()
for child in self.adj_list[r]["next_states"]:
q.append(child)
state = self.adj_list[r]["fail_state"]
while self.find_next_state(state, self.adj_list[child]["value"]) == None and state != 0:
state = self.adj_list[state]["fail_state"]
self.adj_list[child]["fail_state"] = self.find_next_state(state, self.adj_list[child]["value"])
if self.adj_list[child]["fail_state"] is None:
self.adj_list[child]["fail_state"] = 0
self.adj_list[child]["output"] = self.adj_list[child]["output"] + self.adj_list[self.adj_list[child]["fail_state"]]["output"]
def get_matches(self, line):
line = line.lower()
current_state = 0
found = []
for i in xrange(len(line)):
while self.find_next_state(current_state, line[i]) is None and current_state != 0:
current_state = self.adj_list[current_state]["fail_state"]
current_state = self.find_next_state(current_state, line[i])
if current_state is None:
current_state = 0
else:
for j in self.adj_list[current_state]["output"]:
found.append({"index": i - len(j) + 1, "word": j})
return found
|
shams-sam/logic-lab
|
AhoCorasick/aho_corasick.py
|
Python
|
mit
| 3,132
|
#!/usr/bin/env python
# coding=utf-8
import logging
import tornado.ioloop
import tornado.web
import tornado.gen
import tornado.httpclient
import tornado.escape
import tornado.locale
import tornado.websocket
import tornado.httpserver
import tornado.options
from tornado.options import define, options
options.logging = 'debug'
define("port", default=9999, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
# self.write(
# '<html><body><form action="/form" method="POST">'
# '<input type="text" name="msg">'
# '<input type="text" name="msg">'
# '<input type="submit" value="Submit">'
# '</form></body></html>'
# )
self.write('get')
def post(self):
self.write(
'The host is %s, method is %s, remote_ip is %s and UA is %s' % (
self.request.host, self.request.method, self.request.remote_ip,
self.request.headers.get('User-Agent')
)
)
class AsyncHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
http = tornado.httpclient.AsyncHTTPClient()
res = yield http.fetch('http://apis.baidu.com/heweather/pro/attractions')
json = tornado.escape.json_decode(res.body)
self.write(json)
@tornado.web.asynchronous
def post(self):
http = tornado.httpclient.AsyncHTTPClient()
http.fetch('http://apis.baidu.com/heweather/pro/attractions', callback=self.on_response)
def on_response(self, res):
if res.error:
raise tornado.web.HTTPError(500)
json = tornado.escape.json_decode(res.body)
self.write(json)
self.finish()
if __name__ == '__main__':
tornado.options.parse_command_line()
settings = {
'debug': True,
'compiled_template_cache': False
}
app = tornado.web.Application([
(r'/', MainHandler),
(r'/async', AsyncHandler),
], **settings)
# app.listen(9999)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
logging.warning('start ..')
tornado.ioloop.IOLoop.current().start()
|
tao12345666333/Talk-Is-Cheap
|
python/tornado/web/base.py
|
Python
|
mit
| 2,228
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Parsing FARS vehicle CSV files and putting them into DB
"""
import csv
import sys
import os
from db_api import person
from db_api import accident
from db_api import vehicle
from fars_person_mapper import FARSPersonMapper
from fars_accident_mapper import FARSAccidentMapper
from fars_vehicle_mapper import FARSVehicleMapper
if len(sys.argv) != 3:
print('Usage: {0} <year> <fars_data_dir>'.format(sys.argv[0]))
exit(1)
year = int(sys.argv[1])
data_dir = sys.argv[2]
accident_path = os.path.join(data_dir, '{0}_accident.csv'.format(year))
person_path = os.path.join(data_dir, '{0}_person.csv'.format(year))
vehicle_path = os.path.join(data_dir, '{0}_vehicle.csv'.format(year))
accident_file = open(accident_path, 'rt')
person_file = open(person_path, 'rt')
vehicle_file = open(vehicle_path, 'rt')
try:
# Parse persons
person_reader = csv.reader(person_file)
first_row = next(person_reader)
mapper = FARSPersonMapper(first_row, year)
persons = []
driver_by_veh = {}
for row in person_reader:
if mapper.valid(row):
veh_id = mapper.veh_id(row)
if veh_id not in driver_by_veh:
driver_by_veh[veh_id] = None
new_person = person.new(
id=mapper.id(row),
acc_id=mapper.acc_id(row),
veh_id=veh_id,
sex=mapper.sex(row),
age=mapper.age(row),
injury_level=mapper.injury_level(row),
type=mapper.type(row),
seatbelt=mapper.seatbelt(row),
seated_pos=mapper.seated_pos(row)
)
persons.append(new_person)
if new_person['type'] == 'DRIVER':
driver_by_veh[veh_id] = new_person
# Parse vehicles
vehicle_reader = csv.reader(vehicle_file)
first_row = next(vehicle_reader)
mapper = FARSVehicleMapper(first_row, year)
vehicles = []
speed_limits_by_acc = {}
surface_conds_by_acc = {}
traffic_controls_by_acc = {}
vehicles_by_acc = {}
for row in vehicle_reader:
if mapper.valid(row):
veh_id = mapper.id(row)
acc_id = mapper.acc_id(row)
if acc_id not in vehicles_by_acc:
vehicles_by_acc[acc_id] = []
vehicles_by_acc[acc_id].append(veh_id)
new_vehicle = vehicle.new(
id=veh_id,
acc_id=acc_id,
driver_sex=mapper.driver_sex(row, driver_by_veh),
driver_age=mapper.driver_age(row, driver_by_veh),
passenger_count=mapper.passenger_count(row),
type=mapper.type(row),
make=mapper.make(row),
model=mapper.model(row),
fuel_type=mapper.fuel_type(row),
hit_and_run=mapper.hit_and_run(row),
skidded=mapper.skidded(row),
rollover=mapper.rollover(row),
jackknifing=mapper.jackknifing(row),
first_impact_area=mapper.first_impact_area(row),
maneuver=mapper.maneuver(row),
prior_location=mapper.prior_location(row),
driver_drinking=mapper.driver_drinking(row)
)
if year > 2009:
if acc_id not in speed_limits_by_acc:
speed_limits_by_acc[acc_id] = []
# print("Append speed_lim: {0}".format(mapper.speed_limit(row)))
speed_limits_by_acc[acc_id].append(mapper.speed_limit(row))
if year > 2009:
if acc_id not in surface_conds_by_acc:
surface_conds_by_acc[acc_id] = []
# print("Append surface_cond: {0}".format(mapper.surface_cond(row)))
surface_conds_by_acc[acc_id].append(mapper.surface_cond(row))
if year > 2009:
if acc_id not in traffic_controls_by_acc:
traffic_controls_by_acc[acc_id] = []
# print("Append traffic_control: {0}".format(mapper.traffic_control(row)))
traffic_controls_by_acc[acc_id].append(mapper.traffic_control(row))
vehicles.append(new_vehicle)
# Parse accidents
accident_reader = csv.reader(accident_file)
first_row = next(accident_reader)
mapper = FARSAccidentMapper(first_row, year)
accidents = []
for row in accident_reader:
if mapper.valid(row, vehicles_by_acc):
new_accident = accident.new(
id=mapper.id(row),
country='USA',
timestamp=mapper.timestamp(row),
day_of_week=mapper.day_of_week(row),
latitude=mapper.latitude(row),
longitude=mapper.longitude(row),
persons_count=mapper.persons_count(row),
fatalities_count=mapper.fatalities_count(row),
vehicles_count=mapper.vehicles_count(row, vehicles_by_acc),
speed_limit=mapper.speed_limit(row, speed_limits_by_acc),
snow=mapper.snow(row),
rain=mapper.rain(row),
wind=mapper.wind(row),
fog=mapper.fog(row),
relation_to_junction=mapper.relation_to_junction(row),
road_class=mapper.road_class(row),
surface_cond=mapper.surface_cond(row, surface_conds_by_acc),
lighting=mapper.lighting(row),
traffic_control=mapper.traffic_control(row, traffic_controls_by_acc),
other_conditions=mapper.other_conditions(row)
)
accidents.append(new_accident)
accident.insert(accidents)
vehicle.insert(vehicles)
person.insert(persons)
finally:
person_file.close()
accident_file.close()
vehicle_file.close()
|
lopiola/integracja_wypadki
|
scripts/fars_per_year_parser.py
|
Python
|
mit
| 5,825
|
"""ethdeveloper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from .views import HomePageView
from blog import urls as BlogUrls
from tools import urls as ToolsUrls
urlpatterns = [
url(r'^configethdev/', admin.site.urls),
url(r'^tools/', include(ToolsUrls), name="tools_urls"),
url(r'^$', HomePageView.as_view(), name="home_url"),
url(r'^blog/', include(BlogUrls), name="blog_urls"),
]
if settings.DEBUG :
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
ethdeveloper/ethdeveloper
|
ethdeveloper/urls.py
|
Python
|
mit
| 1,296
|
import databench
import math
import random
class Angular(databench.Analysis):
@databench.on
def connected(self):
"""Run as soon as a browser connects to this."""
inside = 0
for draws in range(1, 10000):
# generate points and check whether they are inside the unit circle
r1 = random.random()
r2 = random.random()
if r1 ** 2 + r2 ** 2 < 1.0:
inside += 1
# every 100 iterations, update status
if draws % 100 != 0:
continue
# debug
yield self.emit('log', {'draws': draws, 'inside': inside})
# calculate pi and its uncertainty given the current draws
p = inside / draws
uncertainty = 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws
# send status to frontend
yield self.set_state(pi={
'estimate': 4.0 * inside / draws,
'uncertainty': uncertainty,
})
yield self.emit('log', {'action': 'done'})
|
svenkreiss/databench_examples
|
analyses/angular/analysis.py
|
Python
|
mit
| 1,068
|
from ansiblelint import AnsibleLintRule
class SudoRule(AnsibleLintRule):
id = 'ANSIBLE0008'
shortdesc = 'Deprecated sudo'
description = 'Instead of sudo/sudo_user, use become/become_user.'
tags = ['deprecated']
def _check_value(self, play_frag):
results = []
if isinstance(play_frag, dict):
if 'sudo' in play_frag:
results.append(({'sudo': play_frag['sudo']},
'deprecated sudo feature'))
if 'sudo_user' in play_frag:
results.append(({'sudo_user': play_frag['sudo_user']},
'deprecated sudo_user feature'))
if isinstance(play_frag, list):
for item in play_frag:
output = self._check_value(item)
if output:
results += output
return results
def matchplay(self, file, play):
return self._check_value(play)
def matchtask(self, file, task):
return 'sudo' in task or 'sudo_user' in task
|
dataxu/ansible-lint
|
lib/ansiblelint/rules/SudoRule.py
|
Python
|
mit
| 1,047
|
import pygame
#pickle is necessary to load our pickled frame values
import pickle
#Player extends the pygame.sprite.Sprite class
class Player(pygame.sprite.Sprite):
#In the main program, we will pass a spritesheet and x-y position values to the constructor
def __init__(self, position, spritesheet):
pygame.sprite.Sprite.__init__(self)
#Load our pickled frame values and assign them to dicts
self.left_states = pickle.load(open("ls.dat", "rb"))
self.right_states = pickle.load(open("rs.dat", "rb"))
self.up_states = pickle.load(open("us.dat", "rb"))
self.down_states = pickle.load(open("ds.dat", "rb"))
#Assign the spritesheet to self.sheet
self.sheet = pygame.image.load(spritesheet)
#'Clip' the sheet so that only one frame is displayed (the first frame of down_states)
self.sheet.set_clip(pygame.Rect(self.down_states[0]))
#Create a rect to animate around the screen
self.image = self.sheet.subsurface(self.sheet.get_clip())
self.rect = self.image.get_rect()
#Assign the position parameter value to the topleft x-y values of the rect
self.rect.topleft = position
#We'll use this later to cycle through frames
self.frame = 0
#We'll use these values to move our character
self.change_x = 0
self.change_y = 0
'''The event handler handles keypresses for our class. If a key is pressed down
or released, the appropriate 'state' is passed to the update
method below.'''
def handle_event(self, event):
#Handles key presses
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.update('walk_left')
if event.key == pygame.K_RIGHT:
self.update('walk_right')
if event.key == pygame.K_UP:
self.update('walk_up')
if event.key == pygame.K_DOWN:
self.update('walk_down')
#Handles key releases
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
self.update('stand_left')
if event.key == pygame.K_RIGHT:
self.update('stand_right')
if event.key == pygame.K_UP:
self.update('stand_up')
if event.key == pygame.K_DOWN:
self.update('stand_down')
'''This method updates our character by passing the appropriate dict to the clip
method below and moves our rect object. If the direction is left, for example,
the character moves -5 pixels on the x-plane.'''
def update(self, direction):
if direction == 'walk_left':
self.clip(self.left_states)
self.rect.x -= 5
if direction == 'walk_right':
self.clip(self.right_states)
self.rect.x += 5
if direction == 'walk_up':
self.clip(self.up_states)
self.rect.y -= 5
if direction == 'walk_down':
self.clip(self.down_states)
self.rect.y +=5
'''These checks are necessary in order to return our character to a standing
position if no key is being pressed.'''
if direction == 'stand_left':
self.clip(self.left_states[0])
if direction == 'stand_right':
self.clip(self.right_states[0])
if direction == 'stand_up':
self.clip(self.up_states[0])
if direction == 'stand_down':
self.clip(self.down_states[0])
#Update the image for earch pass
self.image = self.sheet.subsurface(self.sheet.get_clip())
'''This method checks to see if it has been passed a dict or a single frame. If it is
a dict (animated), it clips the rect via the get_frame method. If it is a single frame
(standing), it directly clips the frame.'''
def clip(self, clipped_rect):
if type(clipped_rect) is dict:
self.sheet.set_clip(pygame.Rect(self.get_frame(clipped_rect)))
else:
self.sheet.set_clip(pygame.Rect(clipped_rect))
return clipped_rect
'''This method is used to cycle through frames. Since the 0th element of each frame set
is a an image of the character standing (we don't want to use this), we will instead
start at the 1st element.'''
def get_frame(self, frame_set):
self.frame += 1
if self.frame > (len(frame_set) - 1):
self.frame = 1
return frame_set[self.frame]
|
xorobabel/pygame-2d-jrpg-demo
|
player.py
|
Python
|
mit
| 4,622
|
"""
This module holds all view functions for the authentication module.
These functions include the following:
"""
from flask import Blueprint, flash, redirect, render_template, request, session, url_for
from app import logger
from app.mod_auth.form import LoginForm, RegistrationForm
from app.mod_auth.helper import onAuthRedirect, requireAuth, generateHash
from app.mod_auth.model import AuthLevel, User
auth = Blueprint('auth', __name__, template_folder = 'templates')
@auth.route('/')
def default():
"""The default route for the authentication-module.
"""
return redirect(url_for('auth.info'))
@auth.route('/register', methods = ['GET', 'POST'])
@onAuthRedirect()
def register():
"""This function allows to register a new user to the system.
Upon a GET request a RegistrationForm will be shown to the user.
Upon a POST request the form will be validated and if valid the user
will get assigned a AuthLevel and his password will be hashed.
He will then be added to the database and redirect to the default
route of the authentication-module.
Should the form be invalid, the user will be shown the form again.
"""
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
user = User()
form.populate_obj(user)
user.password = generateHash(user.password)
user.authLevel = AuthLevel.USER
user.save()
logger.info('A user has been added.')
flash('Your user account has been created.')
return redirect(url_for('auth.login'))
return render_template('auth/registration.html', form = form)
@auth.route('/login', methods = ['GET', 'POST'])
@onAuthRedirect()
def login():
"""This function logs a user into the system.
Upon a GET request a LoginForm will be shown to the user.
Upon a POST request the form will be validated and if valid the users
specified password will be hashed and compared to the stored
password.
Should they be equal the user will be logged in (as such
his User object will be stored in the session) and redirected to
the default page of the authentication-module.
Is this not the case or if the form was invalid in the first
place, he will be shown the form again.
"""
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
user = User.objects(username = form.username.data).first()
if user is not None:
if user.password == generateHash(form.password.data):
session['user'] = user
session['currency'] = u"\u20AC"
return redirect(session.get('next', url_for('budget.showSummary')))
logger.info('User %s has logged in.' % user.username)
flash('The specified username and/or password were incorrect.')
return render_template('auth/login.html', form = form)
@auth.route('/logout')
@requireAuth()
def logout():
"""This function logs a user out of the system.
Should the user be logged in, his User object will be poped from the
session and he will be redirected to the default page for the
authentication-module.
Should he not be logged in, please see: app.mod_auth.helper.requireAuth
"""
logger.info('User %s has logged out.' % session.get('user')['username'])
session.pop('user')
return redirect(url_for('budget.showSummary'))
@auth.route('/info')
@requireAuth()
def info():
return "This is a test."
|
Zillolo/mana-vault
|
app/mod_auth/controller.py
|
Python
|
mit
| 3,639
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': 'StegaTonic',
'description': 'StegaTonic',
'author': 'Michael Dubell',
'url': 'https://github.com/mjdubell/stegatonic',
'download_url': 'https://github.com/mjdubell/stegatonic',
'version': '1.0',
'install_requires': ['Pillow', 'nose', 'argparse', 'pycrypto'],
'packages': ['stegatonic'],
'scripts': [],
'license': 'LICENSE.txt'
}
setup(**config)
|
mjdubell/stegatonic
|
setup.py
|
Python
|
mit
| 524
|
# vim:ts=4:sts=4:sw=4:expandtab
"""The core of the system. Manages the database and operational logic. Functionality is
exposed over Thrift.
"""
import sys
import os
def manage():
from django.core.management import execute_manager
settings_module_name = os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
__import__(settings_module_name)
settings_module = sys.modules[settings_module_name]
# HACK
import django.core.management
old_fmm = django.core.management.find_management_module
def find_management_module(app_name):
if app_name == 'satori.core':
return os.path.join(os.path.dirname(__file__), 'management')
else:
return old_fmm(app_name)
django.core.management.find_management_module = find_management_module
# END OF HACK
execute_manager(settings_module)
|
zielmicha/satori
|
satori.core/satori/core/__init__.py
|
Python
|
mit
| 876
|
from ..errors import ImproperResponseError
import ipdb
class CrowdResponse(object):
crowd_request = None
method = None
status = None
task = None
response = None
path = None
def __init__(self, response, crowd_request, task):
try:
self.task = task
self.crowd_request = crowd_request
self.method = crowd_request.get_method()
self.response = response
self.status = response.get("status")
self.path = response.get("path")
except:
raise ImproperResponseError(value="Invalid Response.")
def __repr__(self):
return "<CrowdResponse: %s-%s-%s>" % (self.task.get_name(), self.crowd_request.get_method(), self.status)
|
Project-EPIC/crowdrouter
|
crowdrouter/context/crowdresponse.py
|
Python
|
mit
| 749
|
# Generated by Django 2.2 on 2020-03-10 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0052_remove_puzzle_num_pages'),
]
operations = [
migrations.AlterField(
model_name='team',
name='playtest_end_date',
field=models.DateTimeField(blank=True, help_text='The date/time at which a hunt will no longer be available to playtesters', null=True),
),
migrations.AlterField(
model_name='team',
name='playtest_start_date',
field=models.DateTimeField(blank=True, help_text='The date/time at which a hunt will become to the playtesters', null=True),
),
]
|
dlareau/puzzlehunt_server
|
huntserver/migrations/0053_auto_20200310_1503.py
|
Python
|
mit
| 749
|
import AppKit
from vanilla import VanillaBaseObject
columnPlacements = dict(
leading=AppKit.NSGridCellPlacementLeading,
center=AppKit.NSGridCellPlacementCenter,
trailing=AppKit.NSGridCellPlacementTrailing,
fill=AppKit.NSGridCellPlacementFill
)
rowPlacements = dict(
top=AppKit.NSGridCellPlacementTop,
center=AppKit.NSGridCellPlacementCenter,
bottom=AppKit.NSGridCellPlacementBottom,
fill=AppKit.NSGridCellPlacementFill
)
rowAlignments = dict(
firstBaseline=AppKit.NSGridRowAlignmentFirstBaseline,
lastBaseline=AppKit.NSGridRowAlignmentLastBaseline,
none=AppKit.NSGridRowAlignmentNone
)
class GridView(VanillaBaseObject):
"""
A view that allows the placement of other views within a grid.
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing
the position and size of the grid view.
**contents** The contents to display within the grid. See below for structure.
**columnWidth** The width for columns.
**columnSpacing** The amount of spacing between columns.
**columnPadding** The (left, right) padding for columns.
**columnPlacement** The horizontal placement of content within columns. Options:
* "leading"
* "center"
* "trailing"
* "fill"
**rowHeight** The height for rows.
**rowSpacing** The amount of spacing between rows.
**rowPadding** The (top, bottom) padding for rows.
**rowPlacement** The vertical placement of content within rows. Options:
* "top"
* "center"
* "bottom"
* "fill"
**rowAlignment** The alignment of the row. Options:
* "firstBaseline"
* "lastBaseline"
* "none"
**columnDescriptions** An optional list of dictionaries
defining specific attributes for the columns. Options:
* "width"
* "columnPadding"
* "columnPlacement"
**Contents Definition Structure**
Contents are defined as with a list of row definitions.
A row definition is a list of cell definitions or a
dictionary with this structure:
* **cells** A list of cell definitions.
* **rowHeight** (optional) A height for the row that overrides
the GridView level row height.
* **rowPadding** (optional) A (top, bottom) padding definition
for the row that overrides the GridView level row padding.
* **rowPlacement** (optional) A placement for the row that
overrides the GridView level row placement.
* **rowAlignment** (optional) An alignment for the row that
overrides the GridView level row placement.
Cells are defined with either a Vanilla object, a NSView
(or NSView subclass) object, None, or a dictionary with
this structure:
* **view** A Vanilla object or NSView (or NSView subclass) object.
* **width** (optional) A width to apply to the view.
* **height** (optional) A height to apply to the view.
* **columnPlacement** (optional) A horizontal placement for the
cell that overrides the row level or GridView level placement.
* **rowPlacement** (optional) A vertical placement for the cell
that overrides the row level or GridView level placement.
* **rowAlignment** (optional) A row alignment for the cell that
overrides the row level or GridView level alignment.
If a cell is defined as None, the cell will be merged with the
first cell directly above that has content.
"""
nsGridViewClass = AppKit.NSGridView
def __init__(self,
posSize,
contents,
columnWidth=None,
columnSpacing=0,
columnPadding=(0, 0),
columnPlacement="leading",
rowHeight=None,
rowSpacing=0,
rowPadding=(0, 0),
rowPlacement="top",
rowAlignment="firstBaseline",
columnDescriptions=None
):
if columnDescriptions is None:
columnDescriptions = [{} for i in range(len(contents[0]))]
self._setupView(self.nsGridViewClass, posSize)
gridView = self.getNSGridView()
gridView.setColumnSpacing_(columnSpacing)
gridView.setRowSpacing_(rowSpacing)
gridView.setXPlacement_(columnPlacements[columnPlacement])
gridView.setYPlacement_(rowPlacements[rowPlacement])
gridView.setRowAlignment_(rowAlignments[rowAlignment])
self._columnWidth = columnWidth
self._columnPadding = columnPadding
self._rowHeight = rowHeight
self._rowPadding = rowPadding
self._buildColumns(columnDescriptions)
self._buildRows(contents)
def getNSGridView(self):
return self._getContentView()
# Input Normalizing
def _normalizeRows(self, rows):
rows = [
self._normalizeRow(row)
for row in rows
]
return rows
def _normalizeRow(self, row):
if not isinstance(row, dict):
row = dict(cells=row)
if "height" not in row:
row["height"] = self._rowHeight
if "rowPadding" not in row:
row["rowPadding"] = self._rowPadding
row["cells"] = self._normalizeCells(row["cells"])
return row
def _normalizeCells(self, cells):
cells = [
self._normalizeCell(cell)
for cell in cells
]
return cells
def _normalizeCell(self, cell):
if cell is None:
return None
if not isinstance(cell, dict):
cell = dict(view=cell)
return cell
# Building
def _buildColumns(self, columnDescriptions):
gridView = self.getNSGridView()
for columnDescription in columnDescriptions:
column = gridView.addColumnWithViews_([])
self._setColumnAttributes(column, columnDescription)
def _setColumnAttributes(self, column, columnDescription):
width = columnDescription.get("width", self._columnWidth)
columnPadding = columnDescription.get("columnPadding", self._columnPadding)
columnPlacement = columnDescription.get("columnPlacement")
if width is not None:
column.setWidth_(width)
column.setLeadingPadding_(columnPadding[0])
column.setTrailingPadding_(columnPadding[1])
if columnPlacement is not None:
column.setXPlacement_(columnPlacements[columnPlacement])
def _populateColumns(self, columns):
gridView = self.getNSGridView()
for columnIndex, cells in enumerate(columns):
column = gridView.columnAtIndex_(columnIndex)
self._populateColumn(column, cells)
def _populateColumn(self, column, cells):
gridView = self.getNSGridView()
columnIndex = gridView.indexOfColumn_(column)
# merge cells
if None in cells:
mergers = [[]]
for rowIndex, cell in enumerate(cells):
if cell is None:
mergers[-1].append(rowIndex)
else:
if mergers[-1]:
mergers.append([])
for merger in mergers:
if not merger:
continue
start = merger[0] - 1
# can't merge first row with a nonexistent previous row
if start == -1:
continue
end = merger[-1]
length = end - start
gridView.mergeCellsInHorizontalRange_verticalRange_(
AppKit.NSMakeRange(columnIndex, 1),
AppKit.NSMakeRange(start, length)
)
# place the views
for rowIndex, cellData in enumerate(cells):
self._populateCell(columnIndex, rowIndex, cellData)
def _buildRows(self, rows):
gridView = self.getNSGridView()
rows = self._normalizeRows(rows)
for rowIndex in range(len(rows)):
gridView.addRowWithViews_([])
# set row attributes
for rowIndex, rowData in enumerate(rows):
row = gridView.rowAtIndex_(rowIndex)
self._setRowAttributes(row, rowData)
# populate columns
columns = {}
for rowData in rows:
cells = rowData["cells"]
for columnIndex, view in enumerate(cells):
if columnIndex not in columns:
columns[columnIndex] = []
columns[columnIndex].append(view)
columns = [cells for columnIndex, cells in sorted(columns.items())]
self._populateColumns(columns)
def _setRowAttributes(self, row, rowData):
height = rowData["height"]
rowPadding = rowData["rowPadding"]
rowPlacement = rowData.get("rowPlacement")
rowAlignment = rowData.get("rowAlignment")
if height is not None:
row.setHeight_(height)
row.setTopPadding_(rowPadding[0])
row.setBottomPadding_(rowPadding[1])
if rowPlacement is not None:
row.setYPlacement_(rowPlacements[rowPlacement])
if rowAlignment is not None:
row.setRowAlignment_(rowAlignments[rowAlignment])
def _populateRow(self, row, cells):
gridView = self.getNSGridView()
rowIndex = gridView.indexOfRow_(row)
for columnIndex, cellData in enumerate(cells):
self._populateCell(columnIndex, rowIndex, cellData)
def _populateCell(self, columnIndex, rowIndex, cellData):
if cellData is None:
return
gridView = self.getNSGridView()
view = cellData["view"]
if isinstance(view, VanillaBaseObject):
view = view._nsObject
cell = gridView.cellAtColumnIndex_rowIndex_(columnIndex, rowIndex)
cell.setContentView_(view)
columnPlacement = cellData.get("columnPlacement")
rowPlacement = cellData.get("rowPlacement")
rowAlignment = cellData.get("rowAlignment")
width = cellData.get("width")
height = cellData.get("height")
# special handling and defaults for
# views without an intrinsic size
if view.intrinsicContentSize() == (-1, -1):
if width is None:
width = gridView.columnAtIndex_(columnIndex).width()
if height is None:
height = gridView.rowAtIndex_(rowIndex).height()
if rowAlignment is None:
rowAlignment = "none"
if columnPlacement is None:
columnPlacement = "leading"
if rowPlacement is None:
rowPlacement = "top"
if columnPlacement is not None:
cell.setXPlacement_(columnPlacements[columnPlacement])
if rowPlacement is not None:
cell.setYPlacement_(rowPlacements[rowPlacement])
if rowAlignment is not None:
cell.setRowAlignment_(rowAlignments[rowAlignment])
constraints = []
if width is not None:
constraint = AppKit.NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
view,
AppKit.NSLayoutAttributeWidth,
AppKit.NSLayoutRelationEqual,
None,
AppKit.NSLayoutAttributeWidth,
1.0,
width
)
constraints.append(constraint)
if height is not None:
constraint = AppKit.NSLayoutConstraint.constraintWithItem_attribute_relatedBy_toItem_attribute_multiplier_constant_(
view,
AppKit.NSLayoutAttributeHeight,
AppKit.NSLayoutRelationEqual,
None,
AppKit.NSLayoutAttributeHeight,
1.0,
height
)
constraints.append(constraint)
if constraints:
cell.setCustomPlacementConstraints_(constraints)
# -------
# Columns
# -------
def getColumnCount(self):
"""
Get the number of columns.
"""
gridView = self.getNSGridView()
return gridView.numberOfColumns()
def columnIsVisible(self, index):
"""
Get the visibility of column at *index*.
"""
gridView = self.getNSGridView()
column = gridView.columnAtIndex_(index)
return not column.isHidden()
def showColumn(self, index, value):
"""
Set the visibility of column at *index*.
"""
gridView = self.getNSGridView()
column = gridView.columnAtIndex_(index)
column.setHidden_(not value)
def appendColumn(self, cells, columnWidth=None, columnPadding=None, columnPlacement=None):
"""
Append a column and populate it with a list of cells.
The cells must have the same structure as defined in *__init__*.
"""
gridView = self.getNSGridView()
column = gridView.addColumnWithViews_([])
columnDescription = {}
if columnWidth is not None:
columnDescription["columnWidth"] = columnWidth
if columnPadding is not None:
columnDescription["columnPadding"] = columnPadding
if columnPlacement is not None:
columnDescription["columnPlacement"] = columnPlacement
self._setColumnAttributes(column, columnDescription)
cells = self._normalizeCells(cells)
self._populateColumn(column, cells)
def insertColumn(self, index, cells, columnWidth=None, columnPadding=None, columnPlacement=None):
"""
Insert a column at *index* and populate it with a list of cells.
The cells must have the same structure as defined in *__init__*.
"""
gridView = self.getNSGridView()
column = gridView.insertColumnAtIndex_withViews_(index, [])
columnDescription = {}
if columnWidth is not None:
columnDescription["columnWidth"] = columnWidth
if columnPadding is not None:
columnDescription["columnPadding"] = columnPadding
if columnPlacement is not None:
columnDescription["columnPlacement"] = columnPlacement
self._setColumnAttributes(column, columnDescription)
cells = self._normalizeCells(cells)
self._populateColumn(column, cells)
def removeColumn(self, index):
"""
Remove column at *index*.
"""
gridView = self.getNSGridView()
gridView.removeColumnAtIndex_(index)
def moveColumn(self, fromIndex, toIndex):
"""
Move column at *fromIndex* to *toIndex*.
"""
gridView = self.getNSGridView()
gridView.moveColumnAtIndex_toIndex_(fromIndex, toIndex)
# ----
# Rows
# ----
def getRowCount(self):
"""
Get the number of rows.
"""
gridView = self.getNSGridView()
return gridView.numberOfRows()
def showRow(self, index, value):
"""
Set the visibility of row at *index*.
"""
gridView = self.getNSGridView()
row = gridView.rowAtIndex_(index)
row.setHidden_(not value)
def rowIsVisible(self, index):
"""
Get the visibility of row at *index*.
"""
gridView = self.getNSGridView()
row = gridView.rowAtIndex_(index)
return not row.isHidden()
def appendRow(self, cells, rowHeight=None, rowPadding=None, rowPlacement=None, rowAlignment=None):
"""
Append a row and populate it with a list of cells.
The cells must have the same structure as defined in *__init__*.
Merging is not possible with this method.
"""
gridView = self.getNSGridView()
rowDescription = dict(cells=cells)
if rowHeight is not None:
rowDescription["rowHeight"] = rowHeight
if rowPadding is not None:
rowDescription["rowPadding"] = rowPadding
if rowPlacement is not None:
rowDescription["rowPlacement"] = rowPlacement
if rowAlignment is not None:
rowDescription["rowAlignment"] = rowAlignment
rowDescription = self._normalizeRow(rowDescription)
row = gridView.addRowWithViews_([])
self._setRowAttributes(row, rowDescription)
self._populateRow(row, rowDescription["cells"])
def insertRow(self, index, cells, rowHeight=None, rowPadding=None, rowPlacement=None, rowAlignment=None):
"""
Insert a row at *index* and populate it with a list of cells.
The cells definition must have the same structure as defined in *__init__*.
Merging is not possible with this method.
"""
gridView = self.getNSGridView()
rowDescription = dict(cells=cells)
if rowHeight is not None:
rowDescription["rowHeight"] = rowHeight
if rowPadding is not None:
rowDescription["rowPadding"] = rowPadding
if rowPlacement is not None:
rowDescription["rowPlacement"] = rowPlacement
if rowAlignment is not None:
rowDescription["rowAlignment"] = rowAlignment
rowDescription = self._normalizeRow(rowDescription)
row = gridView.insertRowAtIndex_withViews_(index, [])
self._setRowAttributes(row, rowDescription)
self._populateRow(row, rowDescription["cells"])
def removeRow(self, index):
"""
Remove row at *index*.
"""
gridView = self.getNSGridView()
gridView.removeRowAtIndex_(index)
def moveRow(self, fromIndex, toIndex):
"""
Move row at *fromIndex* to *toIndex*.
"""
gridView = self.getNSGridView()
gridView.moveRowAtIndex_toIndex_(fromIndex, toIndex)
|
typesupply/vanilla
|
Lib/vanilla/vanillaGridView.py
|
Python
|
mit
| 17,625
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import scipy.stats
import matplotlib.pylab as plt
from .context import aep, ep
np.random.seed(42)
import pdb
def plot_model_no_control(model, plot_title='', name_suffix=''):
# plot function
mx, vx = model.get_posterior_x()
mins = np.min(mx, axis=0) - 0.5
maxs = np.max(mx, axis=0) + 0.5
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
mf, vf = model.predict_f(Xplot)
fig = plt.figure()
plt.imshow((mf[:, 0]).reshape(*xx.shape),
vmin=mf.min(), vmax=mf.max(), origin='lower',
extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto')
plt.colorbar()
plt.contour(
xx, yy, (mf[:, 0]).reshape(*xx.shape),
colors='k', linewidths=2, zorder=100)
zu = model.dyn_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'wo', mew=0, ms=4)
for i in range(mx.shape[0] - 1):
plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1],
'-bo', ms=3, linewidth=2, zorder=101)
plt.xlabel(r'$x_{t, 1}$')
plt.ylabel(r'$x_{t, 2}$')
plt.xlim([mins[0], maxs[0]])
plt.ylim([mins[1], maxs[1]])
plt.title(plot_title)
plt.savefig('/tmp/hh_gpssm_dim_0' + name_suffix + '.pdf')
fig = plt.figure()
plt.imshow((mf[:, 1]).reshape(*xx.shape),
vmin=mf.min(), vmax=mf.max(), origin='lower',
extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto')
plt.colorbar()
plt.contour(
xx, yy, (mf[:, 1]).reshape(*xx.shape),
colors='k', linewidths=2, zorder=100)
zu = model.dyn_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'wo', mew=0, ms=4)
for i in range(mx.shape[0] - 1):
plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1],
'-bo', ms=3, linewidth=2, zorder=101)
plt.xlabel(r'$x_{t, 1}$')
plt.ylabel(r'$x_{t, 2}$')
plt.xlim([mins[0], maxs[0]])
plt.ylim([mins[1], maxs[1]])
plt.title(plot_title)
plt.savefig('/tmp/hh_gpssm_dim_1' + name_suffix + '.pdf')
def find_interval(arr, val):
intervals = []
found = False
start_idx = 0
end_idx = 0
for i in range(arr.shape[0]):
if (not found) and arr[i] == val:
found = True
start_idx = i
elif found and arr[i] != val:
found = False
end_idx = i
intervals.append([start_idx, end_idx])
if found:
intervals.append([start_idx, arr.shape[0]])
return intervals
def plot_model_with_control(model, plot_title='', name_suffix=''):
# plot function
mx, vx = model.get_posterior_x()
mins = np.min(mx, axis=0) - 0.5
maxs = np.max(mx, axis=0) + 0.5
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
x_control = model.x_control
c_unique = np.unique(x_control)
colors = ['g', 'b', 'r', 'm']
for c_ind, c_val in enumerate(c_unique):
idxs = np.where(x_control == c_val)[0]
X_c_pad = c_val * np.ones((Xplot.shape[0], 1))
Xplot_c = np.hstack((Xplot, X_c_pad))
intervals = find_interval(x_control, c_val)
# mins = np.min(mx[idxs, :], axis=0)-0.5
# maxs = np.max(mx[idxs, :], axis=0)+0.5
mf, vf = model.predict_f(Xplot_c)
fig = plt.figure()
plt.imshow((mf[:, 0]).reshape(*xx.shape),
vmin=mf.min(), vmax=mf.max(), origin='lower',
extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto', alpha=0.4, cmap=plt.cm.viridis)
plt.colorbar()
plt.contour(
xx, yy, (mf[:, 0]).reshape(*xx.shape),
colors='k', linewidths=2, zorder=100)
zu = model.dyn_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ko', mew=0, ms=4)
# plt.plot(mx[idxs, 0], mx[idxs, 1], 'bo', ms=3, zorder=101)
for k, inter in enumerate(intervals):
idxs = np.arange(inter[0], inter[1])
for i in range(inter[0], inter[1] - 1):
plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1],
'-o', color=colors[k], ms=3, linewidth=1, zorder=101)
plt.xlabel(r'$x_{t, 1}$')
plt.ylabel(r'$x_{t, 2}$')
plt.xlim([mins[0], maxs[0]])
plt.ylim([mins[1], maxs[1]])
plt.title(plot_title)
plt.savefig('/tmp/hh_gpssm_dim_0' +
name_suffix + '_c_%.2f.pdf' % c_val)
fig = plt.figure()
plt.imshow((mf[:, 1]).reshape(*xx.shape),
vmin=mf.min(), vmax=mf.max(), origin='lower',
extent=[mins[0], maxs[0], mins[1], maxs[1]], aspect='auto', alpha=0.4, cmap=plt.cm.viridis)
plt.colorbar()
plt.contour(
xx, yy, (mf[:, 1]).reshape(*xx.shape),
colors='k', linewidths=2, zorder=100)
zu = model.dyn_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ko', mew=0, ms=4)
# plt.plot(mx[idxs, 0], mx[idxs, 1], 'bo', ms=3, zorder=101)
for k, inter in enumerate(intervals):
idxs = np.arange(inter[0], inter[1])
for i in range(inter[0], inter[1] - 1):
plt.plot(mx[i:i + 2, 0], mx[i:i + 2, 1],
'-o', color=colors[k], ms=3, linewidth=1, zorder=101)
# plt.xlabel(r'$x_{t, 1}$')
# plt.ylabel(r'$x_{t, 2}$')
plt.xlim([mins[0], maxs[0]])
plt.ylim([mins[1], maxs[1]])
plt.title(plot_title)
plt.savefig('/tmp/hh_gpssm_dim_1' +
name_suffix + '_c_%.2f.pdf' % c_val)
def plot_posterior_linear(params_fname, fig_fname, control=False, M=20):
# load dataset
data = np.loadtxt('./sandbox/hh_data.txt')
# use the voltage and potasisum current
data = data / np.std(data, axis=0)
y = data[:, :4]
xc = data[:, [-1]]
# init hypers
Dlatent = 2
Dobs = y.shape[1]
T = y.shape[0]
if control:
x_control = xc
no_panes = 5
else:
x_control = None
no_panes = 4
model_aep = aep.SGPSSM_Linear(y, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, x_control=x_control)
model_aep.load_model(params_fname)
my, vy, vyn = model_aep.get_posterior_y()
vy_diag = np.diagonal(vy, axis1=1, axis2=2)
vyn_diag = np.diagonal(vyn, axis1=1, axis2=2)
cs = ['k', 'r', 'b', 'g']
labels = ['V', 'm', 'n', 'h']
plt.figure()
t = np.arange(T)
for i in range(4):
yi = y[:, i]
mi = my[:, i]
vi = vy_diag[:, i]
vin = vyn_diag[:, i]
plt.subplot(no_panes, 1, i + 1)
plt.fill_between(t, mi + 2 * np.sqrt(vi), mi - 2 *
np.sqrt(vi), color=cs[i], alpha=0.4)
plt.plot(t, mi, '-', color=cs[i])
plt.plot(t, yi, '--', color=cs[i])
plt.ylabel(labels[i])
plt.xticks([])
plt.yticks([])
if control:
plt.subplot(no_panes, 1, no_panes)
plt.plot(t, x_control, '-', color='m')
plt.ylabel('I')
plt.yticks([])
plt.xlabel('t')
plt.savefig(fig_fname)
if control:
plot_model_with_control(model_aep, '', '_linear_with_control')
else:
plot_model_no_control(model_aep, '', '_linear_no_control')
def plot_posterior_gp(params_fname, fig_fname, control=False, M=20):
# load dataset
data = np.loadtxt('./sandbox/hh_data.txt')
# use the voltage and potasisum current
data = data / np.std(data, axis=0)
y = data[:, :4]
xc = data[:, [-1]]
# init hypers
Dlatent = 2
Dobs = y.shape[1]
T = y.shape[0]
if control:
x_control = xc
no_panes = 5
else:
x_control = None
no_panes = 4
model_aep = aep.SGPSSM_GP(y, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, x_control=x_control)
model_aep.load_model(params_fname)
my, vy, vyn = model_aep.get_posterior_y()
cs = ['k', 'r', 'b', 'g']
labels = ['V', 'm', 'n', 'h']
plt.figure()
t = np.arange(T)
for i in range(4):
yi = y[:, i]
mi = my[:, i]
vi = vy[:, i]
vin = vyn[:, i]
plt.subplot(no_panes, 1, i + 1)
plt.fill_between(t, mi + 2 * np.sqrt(vi), mi - 2 *
np.sqrt(vi), color=cs[i], alpha=0.4)
plt.plot(t, mi, '-', color=cs[i])
plt.plot(t, yi, '--', color=cs[i])
plt.ylabel(labels[i])
plt.xticks([])
plt.yticks([])
if control:
plt.subplot(no_panes, 1, no_panes)
plt.plot(t, x_control, '-', color='m')
plt.ylabel('I')
plt.yticks([])
plt.xlabel('t')
plt.savefig(fig_fname)
# if control:
# plot_model_with_control(model_aep, '', '_gp_with_control')
# else:
# plot_model_no_control(model_aep, '', '_gp_no_control')
def plot_prediction_gp(params_fname, fig_fname, M=20):
# load dataset
data = np.loadtxt('./sandbox/hh_data.txt')
# use the voltage and potasisum current
data = data / np.std(data, axis=0)
y = data[:, :4]
xc = data[:, [-1]]
# init hypers
Dlatent = 2
Dobs = y.shape[1]
T = y.shape[0]
x_control = xc
# x_control_test = np.flipud(x_control)
x_control_test = x_control * 1.5
no_panes = 5
model_aep = aep.SGPSSM_GP(y, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, x_control=x_control)
model_aep.load_model(params_fname)
print 'ls ', np.exp(model_aep.dyn_layer.ls)
my, vy, vyn = model_aep.get_posterior_y()
mxp, vxp, myp, vyp, vynp = model_aep.predict_forward(T, x_control_test)
cs = ['k', 'r', 'b', 'g']
labels = ['V', 'm', 'n', 'h']
plt.figure()
t = np.arange(T)
for i in range(4):
yi = y[:, i]
mi = my[:, i]
vi = vy[:, i]
vin = vyn[:, i]
mip = myp[:, i]
vip = vyp[:, i]
vinp = vynp[:, i]
plt.subplot(5, 1, i + 1)
plt.fill_between(t, mi + 2 * np.sqrt(vi), mi - 2 *
np.sqrt(vi), color=cs[i], alpha=0.4)
plt.plot(t, mi, '-', color=cs[i])
plt.fill_between(np.arange(T, 2 * T), mip + 2 * np.sqrt(vip),
mip - 2 * np.sqrt(vip), color=cs[i], alpha=0.4)
plt.plot(np.arange(T, 2 * T), mip, '-', color=cs[i])
plt.plot(t, yi, '--', color=cs[i])
plt.axvline(x=T, color='k', linewidth=2)
plt.ylabel(labels[i])
plt.xticks([])
plt.yticks([])
plt.subplot(no_panes, 1, no_panes)
plt.plot(t, x_control, '-', color='m')
plt.plot(np.arange(T, 2 * T), x_control_test, '-', color='m')
plt.axvline(x=T, color='k', linewidth=2)
plt.ylabel('I')
plt.yticks([])
plt.xlabel('t')
plt.savefig(fig_fname)
def model_linear(params_fname, control=False, M=20, alpha=0.5):
# load dataset
data = np.loadtxt('./sandbox/hh_data.txt')
data = data / np.std(data, axis=0)
y = data[:, :4]
xc = data[:, [-1]]
# init hypers
Dlatent = 2
Dobs = y.shape[1]
T = y.shape[0]
R = np.ones(Dobs) * np.log(0.01) / 2
lsn = np.log(0.01) / 2
params = {'sn': lsn, 'R': R}
# create AEP model
if control:
x_control = xc
else:
x_control = None
model_aep = aep.SGPSSM_Linear(y, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, x_control=x_control)
hypers = model_aep.init_hypers(y)
for key in params.keys():
hypers[key] = params[key]
model_aep.update_hypers(hypers)
# optimise
model_aep.set_fixed_params(['R', 'sn', 'sf'])
# model_aep.set_fixed_params(['sf'])
opt_hypers = model_aep.optimise(
method='L-BFGS-B', alpha=alpha, maxiter=20000, reinit_hypers=False)
model_aep.save_model(params_fname)
def model_gp(params_fname, control=False, M=20, alpha=0.5):
# TODO: predict with control
# load dataset
data = np.loadtxt('./sandbox/hh_data.txt')
data = data / np.std(data, axis=0)
y = data[:, :4]
xc = data[:, [-1]]
# init hypers
Dlatent = 2
Dobs = y.shape[1]
T = y.shape[0]
R = np.log([0.02]) / 2
lsn = np.log([0.02]) / 2
params = {'sn': lsn, 'sn_emission': R}
# create AEP model
if control:
x_control = xc
else:
x_control = None
model_aep = aep.SGPSSM_GP(y, Dlatent, M,
lik='Gaussian', prior_mean=0, prior_var=1000, x_control=x_control)
hypers = model_aep.init_hypers(y)
for key in params.keys():
hypers[key] = params[key]
model_aep.update_hypers(hypers)
# optimise
# model_aep.set_fixed_params(['sf_emission', 'sf_dynamic'])
model_aep.set_fixed_params(
['sf_emission', 'sf_dynamic', 'sn', 'sn_emission'])
opt_hypers = model_aep.optimise(
method='L-BFGS-B', alpha=alpha, maxiter=2000, reinit_hypers=False)
model_aep.save_model(params_fname)
if __name__ == '__main__':
M = 30
alpha = 0.4
# model_linear('/tmp/hh_gpssm_linear_no_control.pickle', control=False, M=M, alpha=alpha)
# plot_posterior_linear(
# '/tmp/hh_gpssm_linear_no_control.pickle',
# '/tmp/hh_gpssm_linear_no_control_posterior.pdf',
# control=False, M=M)
# model_linear('/tmp/hh_gpssm_linear_with_control.pickle', control=True, M=M, alpha=alpha)
# plot_posterior_linear(
# '/tmp/hh_gpssm_linear_with_control.pickle',
# '/tmp/hh_gpssm_linear_with_control_posterior.pdf',
# control=True, M=M)
# model_gp('/tmp/hh_gpssm_gp_no_control.pickle', control=False, M=M, alpha=alpha)
# plot_posterior_gp(
# '/tmp/hh_gpssm_gp_no_control.pickle',
# '/tmp/hh_gpssm_gp_no_control_posterior.pdf',
# control=False, M=M)
model_gp('/tmp/hh_gpssm_gp_with_control.pickle',
control=True, M=M, alpha=alpha)
plot_posterior_gp(
'/tmp/hh_gpssm_gp_with_control.pickle',
'/tmp/hh_gpssm_gp_with_control_posterior.pdf',
control=True, M=M)
plot_prediction_gp('/tmp/hh_gpssm_gp_with_control.pickle',
'/tmp/hh_gpssm_gp_with_control_prediction.pdf',
M=M)
|
thangbui/geepee
|
examples/gpssm_hodgkin_huxley.py
|
Python
|
mit
| 14,316
|
##################################################
#
# test_dev_parse.py - development tests
#
##################################################
import sys, unittest, re
sys.path.append("/home/gwatson/Work/GP4/src")
try:
from GP4.GP4_CompilerHelp import compile_string
import GP4.GP4_Exceptions
except ImportError, err:
print "Unable to load GP4 libs. sys.path is:"
for p in sys.path: print "\t",p
print err
from GP4_Test import simple_test, parse_and_run_test, GP4_Test
class test_dev(GP4_Test):
""" Test header decl and header insts -----------------------------------------"""
def test1(self, debug=1):
program = """
layout vlan_tag {
fields {
pcp : 3 signed, saturating;
vid : 12;
ethertype : 16;
vid2 : 9;
}
length (2+1) ;
max_length 33;
}
layout hdr2 {fields { a : 8 ; } }
vlan_tag metadata vlan_instance;
vlan_tag vlan_instance_stack [ 5 ];
"""
p4 = simple_test(program, debug=debug)
""" Test syntax error handling ---------------------------------------------------"""
def test2(self, debug=1):
program = """ layout vlan_tag { }"""
try:
p4 = simple_test(program, debug=debug)
except GP4.GP4_Exceptions.SyntaxError,err:
print "Syntax Error was expected"
""" Test parser funcs ------------------------------------------------------------"""
def test3(self, debug=1):
program = """
parser we_are_done { return P4_PARSING_DONE ; }
parser nxt_is_done { return we_are_done ; }
parser prsr_switch { return switch ( L2.DA ) { 1 : nxt_state ; } }
parser prsr_switch2 { return switch ( L2.DA, L2.SA ) {
12 : nxt_is_done;
5, 9 : five_or_nine;
800 mask 22,99 : masked_state;
default : def_state;
} }
parser do_stuff { extract ( L2_hdr ) ;
extract ( vlan_id[3] );
extract ( ip_hdr[next] );
set_metadata ( hdr.f1, 666 );
return P4_PARSING_DONE ;
}
"""
p4 = simple_test(program, debug=debug)
""" Test parser runtime ------------------------------------------------------------"""
def test4(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
layout L9_def {
fields { type : 5; three_bits : 3; }
}
L2_def L2_hdr;
L9_def L9_hdr;
parser start { extract ( L2_hdr ) ;
return DO_L9 ;
}
parser DO_L9 { extract ( L9_hdr ) ;
return P4_PARSING_DONE ;
}
"""
pkt = [ i for i in range(20) ]
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == 13, 'Expected 13 bytes consumed, Saw %d.' % num_bytes_used )
self.check_field( p4, 'L2_hdr.DA', 0x102030405 )
self.check_field( p4, 'L2_hdr.SA', 0x60708090a0b )
self.check_field( p4, 'L9_hdr.type', 1 )
self.check_field( p4, 'L9_hdr.three_bits', 4 )
""" Test parser runtime ------------------------------------------------------------"""
def test5(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
L2_def L2_hdr[2];
parser start { extract ( L2_hdr[0] ) ;
return DO_L2 ;
}
parser DO_L2 { extract ( L2_hdr[next] ) ;
return P4_PARSING_DONE ;
}
"""
pkt = [ i for i in range(20) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=debug)
except GP4.GP4_Exceptions.RuntimeError as err:
print "Runtime Error",err.data,"was expected"
""" Test parser. stack err ------------------------------------------------------------"""
def test5a(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
L2_def L2_hdr[2];
parser start { extract ( L2_hdr[2] ) ; /* out of range */
return P4_PARSING_DONE ;
}
"""
pkt = [ i for i in range(20) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=debug)
except GP4.GP4_Exceptions.RuntimeError as err:
print "Runtime Error was expected."
print err.data
""" Test parser. run time stack err ------------------------------------------------------------"""
def test5b(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
L2_def L2_hdr[1];
parser start { extract ( L2_hdr[0] ) ;
return P4_ERR ;
}
parser P4_ERR { extract ( L2_hdr[next] ) ; /* out of range */
return P4_PARSING_DONE ;
}
"""
pkt = [ i for i in range(20) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=debug)
except GP4.GP4_Exceptions.RuntimeError as ex_err:
print "Runtime Error was expected:", ex_err.data
""" Test parser. bad return state err ------------------------------------------------------------"""
def test5c(self, debug=1):
program = """
layout L2_def { fields { DA : 48; SA : 48; } }
L2_def L2_hdr[1];
parser start { extract ( L2_hdr[0] ) ;
return P4_ERR ;
}
"""
pkt = [ i for i in range(20) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=debug)
except GP4.GP4_Exceptions.SyntaxError as ex_err:
print "test5c: SyntaxError was expected:", ex_err.args
""" Test parser runtime ------------------------------------------------------------"""
def test6(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
L2_def L2_hdr[5];
parser start { extract ( L2_hdr[0] ) ;
return GET_L2_1 ;
}
parser GET_L2_1 { extract ( L2_hdr[next] ) ;
return GET_L2_2 ;
}
parser GET_L2_2 { extract ( L2_hdr[2] ) ;
return GET_L2_3 ;
}
parser GET_L2_3 { extract ( L2_hdr[next] ) ;
return GET_L2_4 ;
}
parser GET_L2_4 { extract ( L2_hdr[4] ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 5*12
pkt = [ i for i in range(60) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr[0].DA', 0x102030405 )
self.check_field( p4, 'L2_hdr[0].SA', 0x60708090a0b )
self.check_field( p4, 'L2_hdr[1].DA', 0x0c0d0e0f1011 )
self.check_field( p4, 'L2_hdr[1].SA', 0x121314151617 )
self.check_field( p4, 'L2_hdr[2].DA', 0x18191a1b1c1d )
self.check_field( p4, 'L2_hdr[2].SA', 0x1e1f20212223 )
self.check_field( p4, 'L2_hdr[3].DA', 0x242526272829 )
self.check_field( p4, 'L2_hdr[3].SA', 0x2a2b2c2d2e2f )
self.check_field( p4, 'L2_hdr[4].DA', 0x303132333435 )
self.check_field( p4, 'L2_hdr[4].SA', 0x363738393a3b )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
""" Test parser set metadata ------------------------------------------------------------"""
def test7(self, debug=1):
program = """
layout L2_def {
fields { DA : 48; SA : 48; }
}
layout meta_def {
fields { number: 32 ; unused : 64;}
}
L2_def L2_hdr;
meta_def metadata meta_hdr;
parser start { extract ( L2_hdr ) ;
return GET_META ;
}
parser GET_META { set_metadata ( meta_hdr.number, 1234 ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 1*12
pkt = [ i for i in range(12) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.DA', 0x102030405 )
self.check_field( p4, 'L2_hdr.SA', 0x60708090a0b )
self.check_field( p4, 'meta_hdr.number', 1234 )
self.check_field( p4, 'meta_hdr.unused', None )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
""" Test parser switch return ------------------------------------------------------------"""
def test8(self, debug=1):
program = """
layout L2_def { fields { type0: 8; } }
layout L3_def { fields { jjj: 8; } }
layout Type_0 { fields { type1: 8; } }
layout Type_1 { fields { four: 32; } }
L2_def L2_hdr;
L3_def L3_hdr[3];
Type_0 Type_0_hdr;
Type_1 Type_1_hdr;
parser start {
extract ( L2_hdr ) ; /* 0 */
extract ( L3_hdr[0] ) ; /* 1 */
extract ( L3_hdr[1] ) ; /* 2 */
return switch ( current(4,12), latest.jjj, L2_hdr.type0, L3_hdr[1].jjj )
/* 304 02 00 02 = 12952141826 */
{ 0 : GET_TYPE0 ;
1, 3 mask 7 : P4_PARSING_DONE ;
0x304020002 : GET_TYPE1 ;
default : GET_TYPE0 ;
}
}
parser GET_TYPE0 { extract ( Type_0_hdr ) ;
return P4_PARSING_DONE ;
}
parser GET_TYPE1 { extract ( Type_1_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 7
pkt = [ i for i in range(8) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.type0', 0x0 )
self.check_field( p4, 'L3_hdr[0].jjj', 0x1 )
self.check_field( p4, 'L3_hdr[1].jjj', 0x2 )
self.check_field( p4, 'Type_1_hdr.four', 0x3040506 )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
""" Test parser switch return default ---------------------------------------------"""
def test8a(self, debug=1):
program = """
layout L2_def { fields { type0: 8; } }
layout bad_def { fields { jjj: 8; } }
layout Type_1 { fields { four: 32; } }
L2_def L2_hdr;
bad_def bad_hdr;
Type_1 Type_1_hdr;
parser start {
extract ( L2_hdr ) ; /* 5 */
return switch ( L2_hdr.type0 )
{ 0,1,2,3,4, 6,7,8,9,0xa : BAD ;
default : GET_NEXT4 ;
}
}
parser BAD { extract ( bad_hdr ) ;
return BAD ;
}
parser GET_NEXT4 { extract ( Type_1_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 5
pkt = [ 5+i for i in range(8) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.type0', 0x5 )
self.check_field( p4, 'Type_1_hdr.four', 0x6070809 )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
""" Test parser switch return default ---------------------------------------------"""
def test8b(self, debug=1):
program = """
layout L2_def { fields { type0: 8; } }
layout bad_def { fields { jjj: 8; } }
layout Type_1 { fields { four: 32; } }
L2_def L2_hdr;
bad_def bad_hdr;
Type_1 Type_1_hdr;
parser start {
extract ( L2_hdr ) ; /* 5 */
return switch ( L2_hdr.type0 )
{ 4 mask 6 : SECOND ;
default : BAD ;
}
}
parser BAD { extract ( bad_hdr ) ;
return BAD ;
}
parser SECOND { extract ( Type_1_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 5
pkt = [ 5+i for i in range(8) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.type0', 0x5 )
self.check_field( p4, 'Type_1_hdr.four', 0x6070809 )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
""" Test use of length "*" in header decl ---------------------------------------------"""
def test9(self, debug=1):
program = """
layout L2_def { fields { len: 8;
other: 16;
data: *;
}
length (len * 2)>>1 + 1 - 1 ;
max_length 10;
}
L2_def L2_hdr;
parser start {
extract ( L2_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 10
pkt = [ 10+i for i in range(10) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.len', 10 )
self.check_field( p4, 'L2_hdr.other', 0xb0c )
self.check_field( p4, 'L2_hdr.data', 0xd0e0f10111213 )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.SyntaxError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.RuntimeParseError as err:
print "Unexpected Runtime Parse Error:",err.data
self.assert_(False)
""" Test use of length "*" in header decl ---------------------------------------------"""
def test9a(self, debug=1):
program = """
layout L2_def { fields { len: 8;
other: 16;
data: *;
}
length (len * 2)>>1 + 1 - 1 ;
max_length 1;
}
L2_def L2_hdr;
parser start {
extract ( L2_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 10
pkt = [ 10+i for i in range(10) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( False ) # should have exception raised
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.SyntaxError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.RuntimeParseError as err:
print "Good: Expected Runtime Parse Error:",err.data
self.assert_(True)
""" Test use of hex values ---------------------------------------------"""
def test10(self, debug=1):
program = """
layout L2_def { fields { len: 0x8;
other: 0x10;
data: *;
}
length (len * 0x2)>>1 + 1 - 1 ;
max_length 0x10;
}
L2_def L2_hdr;
parser start {
extract ( L2_hdr ) ;
return P4_PARSING_DONE ;
}
"""
exp_bytes_used = 10
pkt = [ 10+i for i in range(10) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start',
debug=debug)
self.assert_( err=='', 'Saw parse runtime err:' + str(err) )
self.assert_( num_bytes_used == exp_bytes_used,
'Expected %d bytes consumed, Saw %d.' % (exp_bytes_used, num_bytes_used ))
self.check_field( p4, 'L2_hdr.len', 10 )
self.check_field( p4, 'L2_hdr.other', 0xb0c )
self.check_field( p4, 'L2_hdr.data', 0xd0e0f10111213 )
except GP4.GP4_Exceptions.RuntimeError as err:
print "Unexpected Runtime Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.SyntaxError as err:
print "Unexpected Syntax Error:",err.data
self.assert_(False)
except GP4.GP4_Exceptions.RuntimeParseError as err:
print "Unexpected Runtime Parse Error:",err.data
self.assert_(False)
if __name__ == '__main__':
# unittest.main()
# can run all tests in dir via:
# python -m unittest discover
if (True):
single = unittest.TestSuite()
single.addTest( test_dev('test10' ))
unittest.TextTestRunner().run(single)
else:
program = """
layout L2_def { fields { DA : 48; SA : 48; } }
L2_def L2_hdr[1];
parser start { extract ( L2_hdr[0] ) ;
return P4_ERR ;
}
"""
pkt = [ i for i in range(20) ]
try:
(p4, err, num_bytes_used ) = parse_and_run_test(program, pkt, init_state='start', debug=0)
except GP4.GP4_Exceptions.SyntaxError as ex_err:
print "test5c: SyntaxError was expected:", ex_err.args
print "len args is",len(ex_err.args)
|
GregWatson/GP4
|
UnitTests/test_dev_parse.py
|
Python
|
mit
| 19,708
|
name = " "
while name != "nimesi":
print("Kirjoita nimesi.")
name =raw_input()
print ("Kiitos!")
|
GenericUser666/Hienot_skriptit
|
your_name.py
|
Python
|
mit
| 101
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/19/14
###Function: OR of incidence in adults to incidence in children vs. week number normalized by the first 'gp_normweeks' of the season. Incidence in children and adults is normalized by the size of the child and adult populations in the second calendar year of the flu season.
# 10/14/14 OR age flip.
# 10/15/14 ILI incidence ratio (obsolete)
# 10/19 incidence rate adjusted by any diagnosis visits (coverage adj = visits S9/visits S#) and ILI care-seeking behavior; change to relative risk
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python F3_zOR_time_v4.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v4 as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
norm = fxn.gp_normweeks
fs = 24
fssml = 16
### program ###
# dict_wk[wk] = seasonnum
# dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
# dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
# dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
d_wk, d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_OR_processing(incid, pop)
# dict_indices[(snum, classif period)] = [wk index 1, wk index 2, etc.]
d_indices = fxn.identify_retro_early_weeks(d_wk, d_totIncidAdj53ls)
# plot values
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
for s in ps:
ax.plot(xrange(len(wklab)), d_zRR53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth)
for s in ps:
beg_retro, end_retro = d_indices[(s, 'r')]
beg_early, end_early = d_indices[(s, 'e')]
plt.plot(range(beg_retro, end_retro), d_zRR53ls[s][beg_retro:end_retro], marker = 'o', color = fxn.gp_retro_early_colors[0], linewidth = 4)
plt.plot(range(beg_early, end_early), d_zRR53ls[s][beg_early:end_early], marker = 'o', color = fxn.gp_retro_early_colors[1], linewidth = 4)
ax.legend(loc='upper left')
ax.set_xticks(range(len(wklab))[::5])
ax.set_xticklabels(wklab[::5])
ax.set_xlim([0, fw-1])
ax.set_xlabel('Week Number', fontsize=fs)
ax.set_ylabel(fxn.gp_sigmat, fontsize=fs)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v4/zRR_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# plt.savefig(fxn.filename_dummy1, transparent=False, bbox_inches='tight', pad_inches=0)
# plt.close()
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs_v4/F_zRR_time_v4.py
|
Python
|
mit
| 3,552
|
"""
Udacity CS253 - Lesson 4 - Homework 1
"""
import webapp2, jinja2, os, handlers
app = webapp2.WSGIApplication([
('/signup', handlers.SignupPage),
('/welcome', handlers.WelcomePage),
('/login', handlers.LoginPage),
('/logout', handlers.LogoutPage)
], debug=True)
|
vcelis/cs253
|
lesson4/homework1-3/login.py
|
Python
|
mit
| 277
|
# Generated by Django 2.1.2 on 2019-01-28 07:07
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=64, verbose_name="Name")),
("email", models.EmailField(max_length=254)),
(
"phone",
phonenumber_field.modelfields.PhoneNumberField(
max_length=128, null=True
),
),
(
"industry",
models.CharField(
blank=True,
choices=[
("ADVERTISING", "ADVERTISING"),
("AGRICULTURE", "AGRICULTURE"),
("APPAREL & ACCESSORIES", "APPAREL & ACCESSORIES"),
("AUTOMOTIVE", "AUTOMOTIVE"),
("BANKING", "BANKING"),
("BIOTECHNOLOGY", "BIOTECHNOLOGY"),
(
"BUILDING MATERIALS & EQUIPMENT",
"BUILDING MATERIALS & EQUIPMENT",
),
("CHEMICAL", "CHEMICAL"),
("COMPUTER", "COMPUTER"),
("EDUCATION", "EDUCATION"),
("ELECTRONICS", "ELECTRONICS"),
("ENERGY", "ENERGY"),
("ENTERTAINMENT & LEISURE", "ENTERTAINMENT & LEISURE"),
("FINANCE", "FINANCE"),
("FOOD & BEVERAGE", "FOOD & BEVERAGE"),
("GROCERY", "GROCERY"),
("HEALTHCARE", "HEALTHCARE"),
("INSURANCE", "INSURANCE"),
("LEGAL", "LEGAL"),
("MANUFACTURING", "MANUFACTURING"),
("PUBLISHING", "PUBLISHING"),
("REAL ESTATE", "REAL ESTATE"),
("SERVICE", "SERVICE"),
("SOFTWARE", "SOFTWARE"),
("SPORTS", "SPORTS"),
("TECHNOLOGY", "TECHNOLOGY"),
("TELECOMMUNICATIONS", "TELECOMMUNICATIONS"),
("TELEVISION", "TELEVISION"),
("TRANSPORTATION", "TRANSPORTATION"),
("VENTURE CAPITAL", "VENTURE CAPITAL"),
],
max_length=255,
null=True,
verbose_name="Industry Type",
),
),
(
"website",
models.URLField(blank=True, null=True, verbose_name="Website"),
),
("description", models.TextField(blank=True, null=True)),
(
"created_on",
models.DateTimeField(auto_now_add=True, verbose_name="Created on"),
),
("is_active", models.BooleanField(default=False)),
],
options={
"ordering": ["-created_on"],
},
),
]
|
MicroPyramid/Django-CRM
|
accounts/migrations/0001_initial.py
|
Python
|
mit
| 3,797
|
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="scattercarpet", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["a", "b", "text", "name"]),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scattercarpet/_hoverinfo.py
|
Python
|
mit
| 594
|
#!/usr/bin/env python3
from zipstream import ZipStream
files = [
{'stream': [b'this\n', b'is\n', b'stream\n', b'of\n',b'data\n'],
'name': 'a.txt',
'compression':'deflate'},
{'file': '/tmp/z/car.jpeg'},
{'file': '/tmp/z/aaa.mp3',
'name': 'music.mp3'},
]
zs = ZipStream(files)
with open("example.zip", "wb") as fout:
for data in zs.stream():
fout.write(data)
|
m2ozg/zipstream
|
examples/simple.py
|
Python
|
mit
| 399
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import io
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir, pardir))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyFile"))
from msrest.exceptions import DeserializationError
from autorestswaggerbatfileservice import AutoRestSwaggerBATFileService, AutoRestSwaggerBATFileServiceConfiguration
from autorestswaggerbatfileservice.models import ErrorException
class FileTests(unittest.TestCase):
def test_files(self):
config = AutoRestSwaggerBATFileServiceConfiguration(base_url="http://localhost:3000")
config.log_level = log_level
config.connection.data_block_size = 1000
client = AutoRestSwaggerBATFileService(config)
def test_callback(data, response, progress = [0]):
self.assertFalse(response._content_consumed)
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers['Content-Length'])
print("Downloading... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_empty_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
def test_files_raw(self):
def test_callback(data, response, progress = [0]):
self.assertFalse(response._content_consumed)
self.assertTrue(len(data) > 0)
progress[0] += len(data)
total = float(response.headers['Content-Length'])
print("Downloading... {}%".format(int(progress[0]*100/total)))
self.assertIsNotNone(response)
config = AutoRestSwaggerBATFileServiceConfiguration(base_url="http://localhost:3000")
config.log_level = log_level
client = AutoRestSwaggerBATFileService(config)
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_empty_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
if __name__ == '__main__':
unittest.main()
|
stankovski/AutoRest
|
AutoRest/Generators/Python/Python.Tests/AcceptanceTests/file_tests.py
|
Python
|
mit
| 5,390
|
#!/usr/local/bin/python3.5
import time
import math
import os
import shutil
import tempfile
from six.moves import configparser
from yattag import Doc
import asyncio
import aiohttp
import async_timeout
import json
# Read config file in
mydir = os.path.dirname(os.path.realpath(__file__))
configReader = configparser.RawConfigParser()
configReader.read(mydir + "/config.txt")
config = {
'outputdir': "./",
'customtext': "Zone events running on and around Telara",
'name': "Simple RIFT Event Tracker",
}
for var in ["outputdir","name","customtext"]:
try:
config[var] = configReader.get("Tracker",var)
except ConfigParser.NoOptionError:
pass
allshards = {
'us': {
1704: 'Deepwood',
1707: 'Faeblight',
1702: 'Greybriar',
1721: 'Hailol',
1708: 'Laethys',
1701: 'Seastone',
1706: 'Wolfsbane',
},
'eu': {
2702: 'Bloodiron',
2714: 'Brisesol',
2711: 'Brutwacht',
2721: 'Gelidra',
2741: 'Typhiria',
2722: 'Zaviel',
}
}
os.environ['TZ'] = 'UTC'
async def fetch(session, url):
with async_timeout.timeout(10):
async with session.get(url) as response:
return await response.text()
async def main(loop):
for dc in allshards:
start_time = time.time()
# Construct a page at a time
doc, tag, text = Doc().tagtext()
with tag('html'):
with tag('head'):
doc.stag('meta', ('http-equiv', "Refresh"), ('content', 60))
doc.stag('meta', ('http-equiv', "Content-Type"), ('content', "text/html; charset=UTF-8"))
doc.stag('link', ('rel', "stylesheet"), ('type', "text/css"), ('href', "style.css"))
with tag('title'):
text(config['name'])
with tag('body'):
with tag('h2'):
text(config['name'], ' - ', dc.upper())
# Links to other DCs
with tag('p'):
for otherdc in allshards:
if (otherdc != dc):
with tag('a', href = otherdc + ".html"):
text(otherdc.upper())
with tag('p'):
text(config['customtext'])
# Event table
with tag('table'):
with tag('thead'):
with tag('tr'):
for title in ['Shard', 'Zone', 'Event Name', 'Elapsed Time']:
with tag('th'):
text(title)
with tag('tbody'):
# Get each shard's events
urls = []
for shardid in sorted(allshards[dc], key=allshards[dc].get):
urls.append("https://web-api-{0}.riftgame.com/chatservice/zoneevent/list?shardId={1}".format(dc, str(shardid)))
results = []
with aiohttp.ClientSession(loop=loop) as session:
results = await asyncio.gather(
*[fetch(session, url) for url in urls],
)
for idx, url in enumerate(urls):
shardid = int(url[-4:])
data = json.loads(results[idx])['data']
data.reverse()
# Print any events
displayshard = allshards[dc][shardid]
for zone in data:
# An event is running in a zone, so add a table row
if "name" in zone:
with tag('tr'):
with tag('td', klass = "bold"):
text(displayshard)
zoneclass = "secondary"
# Starfall zone IDs
if zone['zoneId'] in [788055204, 2007770238, 1208799201, 2066418614]:
zoneclass = "bold"
for display in [zone['zone'], zone['name'], str(int( math.floor((time.time() - zone['started']) / 60) )) + " min" ]:
with tag('td', klass = zoneclass):
text(display)
# already printed the shard name once, so clear it
displayshard = ""
with tag('p', klass = 'small tertiary'):
text("Generated at {0} in {1:.3f}s".format(time.strftime("%d-%b-%Y %H:%M:%S %Z"), (time.time() - start_time) ))
with tag('p', klass = 'small tertiary'):
text("Trion, Trion Worlds, RIFT, Storm Legion, Nightmare Tide, Starfall Prophecy, Telara, and their respective logos, are trademarks or registered trademarks of Trion Worlds, Inc. in the U.S. and other countries. This site is not affiliated with Trion Worlds or any of its affiliates.")
# Write page then move it over the old one
with tempfile.NamedTemporaryFile(delete=False) as outfile:
outfile.write(doc.getvalue().encode('utf8'))
os.chmod(outfile.name, 0o0644)
os.rename(outfile.name, config['outputdir'] + dc + ".html")
if not os.path.exists(config['outputdir'] + "index.html"):
os.symlink(config['outputdir'] + dc + ".html", config['outputdir'] + "index.html")
if not os.path.exists(config['outputdir'] + "style.css"):
shutil.copy2(mydir + "/style.css",config['outputdir'] + "style.css")
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
AlucardZero/python-rift-event-tracker
|
events.py
|
Python
|
mit
| 4,983
|
#!/usr/bin/env python
# SMTP transmission with authentication - Chapter 13 - login.py
import sys, smtplib, socket
from getpass import getpass
if len(sys.argv) < 4:
print "Syntax: %s server fromaddr toaddr [toaddr...]" % sys.argv[0]
sys.exit(2)
server, fromaddr, toaddrs = sys.argv[1], sys.argv[2], sys.argv[3:]
message = """To: %s
From: %s
Subject: Test Message from simple.py
Hello,
This is a test message sent to you from the login.py program
in Foundations of Python Network Programming.
""" % (', '.join(toaddrs), fromaddr)
sys.stdout.write("Enter username: ")
username = sys.stdin.readline().strip()
password = getpass("Enter password: ")
try:
s = smtplib.SMTP(server)
try:
s.login(username, password)
except smtplib.SMTPException, e:
print "Authentication failed:", e
sys.exit(1)
s.sendmail(fromaddr, toaddrs, message)
except (socket.gaierror, socket.error, socket.herror,
smtplib.SMTPException), e:
print " *** Your message may not have been sent!"
print e
sys.exit(1)
else:
print "Message successfully sent to %d recipient(s)" % len(toaddrs)
|
jac2130/BayesGame
|
foundations-of-python-network-programming/python2/13/login.py
|
Python
|
mit
| 1,130
|
"""Model based on VGG16:
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
Code based on the original Keras library implementation source code
"""
import warnings
import tensorflow as tf
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Reshape
from keras.layers import Lambda
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.utils.data_utils import get_file
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def model(weights='imagenet',
input_shape=(240, 320, 3)):
"""Instantiates the VGG16-based architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
# Arguments
weights: one of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
input_shape: optional shape tuple,
It should have exactly 3 input channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
img_input = Input(shape=input_shape)
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((4, 4), strides=(4, 4), name='block2_pool')(x)
x = Conv2D(48, (3, 3), activation='relu', padding='same', name='last_conv')(x)
# Top Layers
x = Flatten()(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(80*60*3, activation='relu', name='fc2')(x)
x = Reshape((60,80,3))(x)
x = Lambda(lambda x: tf.image.resize_bilinear(x , [240,320]) )(x)
x = Lambda(lambda x: tf.nn.l2_normalize(x, 3) )(x)
# Create model.
inputs = img_input
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
|
kaykanloo/msc-project
|
Code/Models/VGGLowPool4.py
|
Python
|
mit
| 3,160
|
'''
The MIT License (MIT)
Copyright (c) 2013-2017 Robert H Chase
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import datetime
import json
import re
import sys
import time
import traceback
import types
import urlparse
from httphandler import HTTPHandler
import logging
log = logging.getLogger(__name__)
class RESTRequest(object):
def __init__(self, handler):
self.handler = handler
self.context = handler.context.context # context from RESTMapper
self.http_message = handler.http_message
self.http_headers = handler.http_headers
self.http_content = handler.http_content
self.http_method = handler.http_method
self.http_multipart = handler.http_multipart
self.http_resource = handler.http_resource
self.http_query_string = handler.http_query_string
self.http_query = handler.http_query
self.timestamp = datetime.datetime.now()
self.is_delayed = False
def delay(self):
self.is_delayed = True
@property
def id(self):
return self.handler.id
def defer(self, deferred_fn, immediate_fn, error_fn=None, error_msg=None, error_200=False):
'''
defer the request until immediate_fn completes; then call deferred_fn
if immediate_fn does not complete succesfully, then deferred_fn is not called;
instead, the error is handled by responding on the request. the default error
response parameters are (400, result), which can be overridden in several ways
with the optional kwargs described below.
Parameters:
deferred_fn - called with result of immediate_fn on success
deferred_fn(request, result)
immediate_fn - function that takes a callback_fn
callback_fn is eventually called with (rc, result)
if rc != 0, immediate_fn failed
error_fn - called with (request, result) if immediate_fn fails
must respond on the request or risk hanging the connection
error_msg - used in lieu of result if immediate_fn fails
result is logged as a warning
error_200 - if True, respond with (200, {"error": result})
Notes:
1. deferred_fn is for the happy-path. it is not called with the (rc, result)
pattern, but is instead called with (request, result). the idea is that
the handing of the request is what is deferred by this method, and that
if everthing is working, we keep going sequentially through the logic.
the deferred_fn is meant to mirror a rest handler's signature.
2. the immediate_fn is called with a callback as the only parameter and
is expected to invoke that callback with the (rc, result) pattern upon
completion. rc is 0 (zero) for successful completion; otherwise non-zero.
3. immediate_fn is expected to perform an async operation, although it
doesn't have to. if immediate_fn is not async, it makes more sense to
call it inline.
'''
def on_defer(rc, result):
if rc == 0:
return deferred_fn(self, result) # happy path
if error_fn:
return error_fn(self, result)
if error_msg:
log.warning('error cid=%s: %s', self.handler.id, result)
result = error_msg
if error_200:
return self.respond({'error': result})
self.respond(400, result)
self.delay()
immediate_fn(on_defer)
def respond(self, *args, **kwargs):
'''
the args/kwargs usually match the RESTResult __init__ method
in the case of a single argument, the RESTResult coerce method is called to deal with some
legacy ways of using this method.
'''
if len(kwargs) == 0 and len(args) == 1:
result = RESTResult.coerce(args[0])
else:
result = RESTResult(*args, **kwargs)
result.close = self.http_headers.get('Connection') == 'close' # grab Connection from cached headers in case they have been cleared on the HTTPHandler
self.is_delayed = True # treat as delayed to stop on_http_data from responding a second time in the non-delay case
self.handler.rest_response(result)
@property
def json(self):
if not hasattr(self, '_json'):
if self.http_content and self.http_content.lstrip()[0] in '[{':
try:
self._json = json.loads(self.http_content)
except Exception:
raise Exception('Unable to parse json content')
elif len(self.http_query) > 0:
self._json = self.http_query
else:
self._json = {n: v for n, v in urlparse.parse_qsl(self.http_content)}
return self._json
class RESTResult(object):
def __init__(self, code=200, content='', headers=None, message=None, content_type=None):
self.code = code
self.close = False
if isinstance(content, (types.DictType, types.ListType, types.FloatType, types.BooleanType, types.IntType)):
try:
content = json.dumps(content)
content_type = 'application/json; charset=utf-8'
except Exception:
content = str(content)
if content_type:
if not headers:
headers = {}
headers['Content-Type'] = content_type
if not message:
message = {
200: 'OK',
201: 'Created',
204: 'No Content',
302: 'Found',
400: 'Bad Request',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Not Found',
500: 'Internal Server Error',
}.get(code, '')
self.message = message
self.content = content
self.headers = headers
@classmethod
def coerce(cls, result):
if isinstance(result, cls):
return result # already a RESTResult
if isinstance(result, int):
return cls(result) # integer: treat as status code
if isinstance(result, tuple):
return cls(*result) # tuple: treat as *args
return cls(content=result) # otherwise, assume status code 200 with result being the content
class RESTHandler(HTTPHandler):
'''
Identify and execute REST handler functions.
The RESTHandler context is a RESTMapper instance, with mappings defined
for each URI. When an http request URI matches a mapping regex and
method, the respective rest_handler is called with this object as the
first parameter, followed by any regex groups.
A rest_handler function returns a RESTResult object, or something which
is coerced to a RESTResult by the rest_response method, when an immediate
response is available. In order to delay a response (to prevent
blocking the server) a rest_handler can call the delay() function on the
request object; the socket will remain open and set the
is_delayed flag on the RESTRequest.
Callback methods:
on_rest_data(self, *groups)
on_rest_exception(self, exc_type, exc_value, exc_traceback)
on_rest_send(self, code, message, content, headers)
'''
def on_http_data(self):
handler, groups = self.context._match(self.http_resource, self.http_method)
if handler:
try:
request = RESTRequest(self)
self.on_rest_data(request, *groups)
result = handler(request, *groups)
if not request.is_delayed:
self.rest_response(RESTResult.coerce(result))
except Exception:
content = self.on_rest_exception(*sys.exc_info())
kwargs = dict(code=501, message='Internal Server Error')
if content:
kwargs['content'] = str(content)
self._rest_send(**kwargs)
else:
self.on_rest_no_match()
self._rest_send(code=404, message='Not Found')
def on_rest_data(self, request, *groups):
''' called on rest_handler match '''
pass
def on_rest_no_match(self):
pass
def rest_response(self, result):
result = RESTResult.coerce(result)
self._rest_send(result.content, result.code, result.message, result.headers, result.close)
def on_rest_exception(self, exception_type, exception_value, exception_traceback):
''' handle Exception raised during REST processing
If a REST handler raises an Exception, this method is called with the sys.exc_info
tuple to allow for logging or any other special handling.
If a value is returned, it will be sent as the content in the
"501 Internal Server Error" response.
To return a traceback string in the 501 message:
import traceback
return traceback.format_exc(exception_traceback)
'''
return None
def _rest_send(self, content=None, code=200, message='OK', headers=None, close=False):
args = dict(code=code, message=message, close=close)
if content:
args['content'] = content
if headers:
args['headers'] = headers
self.on_rest_send(code, message, content, headers)
self.send_server(**args)
def on_rest_send(self, code, message, content, headers):
pass
class LoggingRESTHandler(RESTHandler):
def on_open(self):
log.info('open: cid=%d, %s', self.id, self.name)
def on_close(self):
log.info('close: cid=%s, reason=%s, t=%.4f, rx=%d, tx=%d', getattr(self, 'id', '.'), self.close_reason, time.time() - self.start, self.rxByteCount, self.txByteCount)
def on_rest_data(self, request, *groups):
log.info('request cid=%d, method=%s, resource=%s, query=%s, groups=%s', self.id, request.http_method, request.http_resource, request.http_query_string, groups)
def on_rest_send(self, code, message, content, headers):
log.debug('response cid=%d, code=%d, message=%s, headers=%s', self.id, code, message, headers)
def on_rest_no_match(self):
log.warning('no match cid=%d, method=%s, resource=%s', self.id, self.http_method, self.http_resource)
def on_http_error(self):
log.warning('http error cid=%d: %s', self.id, self.error)
def on_rest_exception(self, exception_type, value, trace):
log.exception('exception encountered:')
return traceback.format_exc(trace)
class RESTMapper(object):
'''
A URI-to-executable mapper that is passed as the context for a
RESTHandler.
If a context is specified, it is included in each request as
request.context. If the requests are handled in separate threads
it is important to serialize access to this variable since it
is shared.
The on_http_data method of the RESTHandler calls the _match method
on this object to resolve a URI to a previously defined pattern.
Patterns are added with the add method.
'''
def __init__(self, context=None):
self.context = context
self.__mapping = []
self.map()
def map(self):
'''convenience function for initialization '''
pass
def add(self, pattern, get=None, post=None, put=None, delete=None):
'''
Add a mapping between a URI and a CRUD method.
The pattern is a regex string which can include groups. If
groups are included in the regex, they will be passed as
parameters to the matching method.
The _match method will evaluate each mapping in the order
that they are added. The first match wins.
For example:
add('/foo/(\d+)/bar', get=my_func)
will match:
GET /foo/123/bar HTTP/1.1
resulting in the call:
my_func(123)
in this case, my_func must be defined to take the
parameter.
'''
self.__mapping.append(RESTMapping(pattern, get, post, put, delete))
def _match(self, resource, method):
'''
Match a resource + method to a RESTMapping
The resource parameter is the resource string from the
http call. The method parameter is the method from
the http call. The user shouldn't call this method, it
is called by the on_http_data method of the
RESTHandler.
Step through the mappings in the order they were defined
and look for a match on the regex which also has a method
defined.
'''
for mapping in self.__mapping:
m = mapping.pattern.match(resource)
if m:
handler = mapping.method.get(method.lower())
if handler:
return handler, m.groups()
return None, None
def import_by_pathname(target):
if isinstance(target, str):
modnam, clsnam = target.rsplit('.', 1)
mod = __import__(modnam)
for part in modnam.split('.')[1:]:
mod = getattr(mod, part)
return getattr(mod, clsnam)
return target
class RESTMapping(object):
''' container for one mapping definition '''
def __init__(self, pattern, get, post, put, delete):
self.pattern = re.compile(pattern)
self.method = {
'get': import_by_pathname(get),
'post': import_by_pathname(post),
'put': import_by_pathname(put),
'delete': import_by_pathname(delete),
}
def content_to_json(*fields, **kwargs):
'''rest_handler decorator that converts handler.html_content to handler.json
The content must be a valid json document or a valid URI query string (as
produced by a POSTed HTML form). If the content starts with a '[' or '{',
it is treated as json; else it is treated as a URI. The URI only expects
one value per key.
Arguments:
fields - a list of field names. the names will be used to look up
values in the json dictionary which are appended, in order,
to the rest_handler's argument list. The specified fields
must be present in the content.
if a field name is a tuple, then the first element is the name,
which is treated as stated above, and the second element is
a type conversion function which accepts the value and returns
a new value. for instance ('a', int) will look up the value
for 'a', and convert it to an int (or fail trying).
if field name is a tuple with three elements, then the third
element is a default value.
as_args - if true, append fields as described above, else add to decorated
call as kwargs.
Errors:
400 - json conversion fails or specified fields not present in json
Notes:
1. This is responsive to the is_delayed flag on the request.
'''
as_args = kwargs.setdefault('as_args', True)
def __content_to_json(rest_handler):
def inner(request, *args):
kwargs = dict()
try:
if fields:
args = list(args)
for field in fields:
if isinstance(field, tuple):
if len(field) == 3:
fname, ftype, fdflt = field
value = request.json.get(fname, fdflt)
else:
fname, ftype = field
value = request.json[fname]
if ftype:
value = ftype(value)
else:
fname = field
value = request.json[fname]
if as_args:
args.append(value)
else:
kwargs[fname] = value
except KeyError as e:
return request.respond(RESTResult(400, 'Missing required key: %s' % str(e)))
except Exception as e:
return request.respond(RESTResult(400, "Unable to read field '%s': %s" % (fname, e.message)))
return rest_handler(request, *args, **kwargs)
return inner
return __content_to_json
|
Shatnerz/rhc
|
rhc/resthandler.py
|
Python
|
mit
| 17,999
|
import sys
import yaml
import os
import imp
import logging
from src import annotation_dispatcher, annotation_worker
from src import generation_dispatcher, generation_worker
from src import classification_dispatcher, classification_worker
__name__ = "bayzee"
def __loadConfig(configFilePath):
config = None
if not os.path.exists(configFilePath):
print "Config file does not exist"
sys.exit(1)
try:
dataStream = open(configFilePath, "r")
config = yaml.load(dataStream)
except:
error = sys.exc_info()
print "Failed to load configuration from file", error
sys.exit(1)
else:
return config
def __loadProcessors(configFilePath, config):
processorInstances = []
for module in config["processor"]["modules"]:
modulePath = os.path.abspath(os.path.join(os.path.dirname(configFilePath), module["path"]))
processorInstances.append(imp.load_source(module["name"], modulePath))
config["processor_instances"] = processorInstances
def __initLogger(configFilePath, config):
logsDir = os.path.abspath(os.path.join(os.path.dirname(configFilePath), config["logger"]["logsDir"]))
if not os.path.exists(logsDir):
os.makedirs(logsDir)
logger = logging.getLogger("bayzee")
fh = logging.FileHandler(logsDir + "/bayzee.log")
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
config["logger"] = logger
logger = logging.getLogger("elasticsearch")
fh = logging.FileHandler(logsDir + "/elasticsearch.log")
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger = logging.getLogger("elasticsearch.trace")
fh = logging.FileHandler(logsDir + "/elasticsearch.trace")
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
def dispatchToAnnotate(configFilePath, processingStartIndex, processingEndIndex):
config = __loadConfig(configFilePath)
__loadProcessors(configFilePath, config)
__initLogger(configFilePath, config)
ann = annotation_dispatcher.AnnotationDispatcher(config, processingStartIndex, processingEndIndex)
ann.dispatchToAnnotate()
def annotate(configFilePath):
config = __loadConfig(configFilePath)
__loadProcessors(configFilePath, config)
__initLogger(configFilePath, config)
ann = annotation_worker.AnnotationWorker(config)
ann.annotate()
def dispatchToGenerate(configFilePath, processingStartIndex, processingEndIndex):
config = __loadConfig(configFilePath)
__loadProcessors(configFilePath, config)
__initLogger(configFilePath, config)
trainingFilePath = os.path.abspath(os.path.join(os.path.dirname(configFilePath), config["generator"]["trainingPhrasesFilePath"]))
holdOutFilePath = os.path.abspath(os.path.join(os.path.dirname(configFilePath), config["generator"]["holdOutPhrasesFilePath"]))
trainingFile = open(trainingFilePath, "r")
holdOutFile = open(holdOutFilePath, "r")
trainingDataset = {}
for row in trainingFile.readlines()[1:]:
values = row.split(",")
trainingDataset[values[0]] = values[1]
holdOutDataset = {}
for row in holdOutFile.readlines()[1:]:
values = row.split(",")
holdOutDataset[values[0]] = values[1]
gen = generation_dispatcher.GenerationDispatcher(config, trainingDataset, holdOutDataset, processingStartIndex, processingEndIndex)
gen.dispatchToGenerate()
def generate(configFilePath):
config = __loadConfig(configFilePath)
__loadProcessors(configFilePath, config)
__initLogger(configFilePath, config)
trainingFilePath = os.path.abspath(os.path.join(os.path.dirname(configFilePath), config["generator"]["trainingPhrasesFilePath"]))
holdOutFilePath = os.path.abspath(os.path.join(os.path.dirname(configFilePath), config["generator"]["holdOutPhrasesFilePath"]))
trainingFile = open(trainingFilePath, "r")
holdOutFile = open(holdOutFilePath, "r")
trainingDataset = {}
for row in trainingFile.readlines()[1:]:
values = row.split(",")
trainingDataset[values[0]] = values[1]
holdOutDataset = {}
for row in holdOutFile.readlines()[1:]:
values = row.split(",")
holdOutDataset[values[0]] = values[1]
gen = generation_worker.GenerationWorker(config, trainingDataset, holdOutDataset)
gen.generate()
def dispatchToClassify(configFilePath, processingStartIndex, processingEndIndex):
config = __loadConfig(configFilePath)
__initLogger(configFilePath, config)
cls = classification_dispatcher.ClassificationDispatcher(config, processingStartIndex, processingEndIndex)
cls.dispatchToClassify()
def classify(configFilePath):
config = __loadConfig(configFilePath)
__initLogger(configFilePath, config)
cls = classification_worker.ClassificationWorker(config)
cls.classify()
|
pandastrike/bayzee
|
__init__.py
|
Python
|
mit
| 5,154
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Openname.org
:license: MIT, see LICENSE for more details.
"""
from functools import wraps
from flask import request, Response
from .config import API_USERNAME, API_PASSWORD
# -------------------------------------
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == API_USERNAME and password == API_PASSWORD
# -------------------------------------
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
# -------------------------------------
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
|
jetbox/resolver
|
server/helper.py
|
Python
|
mit
| 1,104
|
from __future__ import unicode_literals
from rest_framework import pagination
from rest_framework import serializers
class BongoPagination(pagination.PageNumberPagination):
page_size = 20
page_size_query_param = 'limit'
max_page_size = 100
|
BowdoinOrient/bongo
|
bongo/apps/api/pagination.py
|
Python
|
mit
| 255
|
#!/usr/bin/env python3
import asyncio
from threading import Thread
from bottle import static_file, route, run
from app import Server
def serve_web():
while True:
@route('/')
def index():
static_file('index.css', root='./app')
static_file('client.js', root='./app')
return static_file("index.html", root='./app')
@route('/<filename>')
def server_static(filename):
return static_file(filename, root='./app')
run(host='localhost', port=9000)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
start_server = Server().serve('localhost')
server = loop.run_until_complete(start_server)
t = Thread(target=serve_web)
t.daemon = True
t.start()
loop.run_forever()
|
mikoim/funstuff
|
codecheck/codecheck-2160/run.py
|
Python
|
mit
| 793
|
from django.conf.urls import url
from . import views, api
app_name = 'event'
urlpatterns = [
# event/
url(r'^$',
views.EventIndexView.as_view(),
name='index'),
# event/1/detail
url(r'^(?P<pk>[0-9]+)/$',
views.EventDetailView.as_view(),
name='detail'),
# event/1/delete
url(r'^(?P<pk>[0-9]+)/delete/$',
views.EventDeleteView.as_view(),
name='delete'),
# event/1/edit/
url(r'^(?P<pk>[0-9]+)/edit/$',
views.EventEditView.as_view(),
name='edit'),
# event/<id>/edit/message
url(r'^(?P<pk>[0-9]+)/edit/message$',
views.SendMessage.as_view(),
name='message'),
# event/1/participants/
url(r'^(?P<pk>[0-9]+)/participants/$',
views.EventParticipantsView.as_view(),
name='participants'),
# event/add
url(r'^add/$',
views.EventCreate.as_view(),
name='add'),
# event/search
url(r'^search/$',
views.EventSearchResultsView.as_view(),
name='search'),
# event/list
url(r'^filter/future_participating_events$',
api.event_filter, {'event_kind': 'future_participating_events'},
name='future_participating_events'),
url(r'^filter/new_region_events$',
api.event_filter, {'event_kind': 'new_region_events'},
name='new_region_events'),
url(r'^filter/new_tag_events$',
api.event_filter, {'event_kind': 'new_tag_events'},
name='new_tag_events'),
url(r'^filter/new_events$',
api.event_filter, {'event_kind': 'new_events'},
name='new_events'),
url(r'filter/all_events$',
api.event_filter, {'event_kind': 'all_events'},
name='all_events'),
url(r'^filter/(?P<event_kind>[\w_]+)$',
api.event_filter,
name='filter'),
url(r'^range_search/$',
api.event_range_search,
name='range_search'),
# event/id/participate/frame_id
url(r'^(?P<event_id>[0-9]+)/participate/(?P<frame_id>[0-9]+)$',
views.EventJoinView.as_view(),
name='participate'),
# event/id/follow
url(r'(?P<event_id>[0-9]+)/follow$',
views.EventFollowView.as_view(),
name='follow'),
# event/id/support
url(r'(?P<event_id>[0-9]+)/support$',
views.EventSupportView.as_view(),
name='support'),
# event/1/cancel
url(r'^(?P<event_id>[0-9]+)/cancel/$',
views.ParticipationDeleteView.as_view(),
name='cancel'),
# event/1/comment
url(r'^(?P<event_id>[0-9]+)/comment/$',
views.CommentCreate.as_view(),
name='comment'),
# event/comment/1/delete
url(r'^(?P<event_id>[0-9]+)/comment/(?P<pk>[0-9]+)/delete$',
views.CommentDeleteView.as_view(),
name='comment_delete'),
]
|
internship2016/sovolo
|
app/event/urls.py
|
Python
|
mit
| 2,784
|
#!/usr/bin/env python
import argparse
import sys
from lxml import etree
sys.path.append('./lib')
from jnpr.junos import Device
from pypeer.ConfigDictionary import ConfigDictionary
from pypeer.BgpData import BgpData
from pypeer.Exchange import Exchange
from pypeer.PeeringDBClient import PeeringDBClient
from pypeer.PeeringDBParser import PeeringDBParser
import socket
def is_valid_ipv4_address(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def main():
list_peeringdbid_of_connected_exchanges = []
config = ConfigDictionary("/home/andy/etc/pypeer.ini")
username = config.username()
password = config.password()
exchange = Exchange()
peeringdb = PeeringDBClient()
for router in config.get_list_of_router_names():
jdev = Device(user=username, host=config.get_router_ip(router),
password=password)
jdev.open(gather_facts=False)
jdev.timeout = 600
try:
resultxml = jdev.rpc.get_bgp_summary_information()
except Exception as err:
print "CMD:"
etree.dump(err.cmd)
print "RSP:"
etree.dump(err.rsp)
bgpsum = BgpData(resultxml)
for thispeeringip in bgpsum.get_list_peering_ips():
if not is_valid_ipv4_address(thispeeringip):
next
else:
peeringdb_id = exchange.get_exchange_from_peerip(thispeeringip)['peeringdbid']
if peeringdb_id==0:
next
elif peeringdb_id in list_peeringdbid_of_connected_exchanges:
next
else:
list_peeringdbid_of_connected_exchanges.append(peeringdb_id)
ixpjson = peeringdb.ixlan(peeringdb_id)
ixpparser = PeeringDBParser(ixpjson)
print "%s: %s # %s" % (peeringdb_id, router, ixpparser.get_name_of_ixorg_from_ixlan())
if __name__ == "__main__":
main()
|
andydavidson/pypeer
|
bin/get_myexchanges_connected.py
|
Python
|
mit
| 2,290
|
import re
from pyinfra import logger
from pyinfra.api import FactBase
from .util.packaging import parse_packages
BREW_REGEX = r'^([^\s]+)\s([0-9\._+a-z\-]+)'
def new_cask_cli(version):
'''
Returns true if brew is version 2.6.0 or later and thus has the new CLI for casks.
i.e. we need to use brew list --cask instead of brew cask list
See https://brew.sh/2020/12/01/homebrew-2.6.0/
The version string returned by BrewVersion is a list of major, minor, patch version numbers
'''
return (version[0] >= 3) or ((version[0] >= 2) and version[1] >= 6)
VERSION_MATCHER = re.compile(r'^Homebrew\s+(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+).*$')
def unknown_version():
return [0, 0, 0]
class BrewVersion(FactBase):
'''
Returns the version of brew installed as a semantic versioning tuple:
.. code:: python
[major, minor, patch]
'''
command = 'brew --version'
requires_command = 'brew'
@staticmethod
def default():
return [0, 0, 0]
def process(self, output):
m = VERSION_MATCHER.match(output[0])
if m is not None:
return [int(m.group(key)) for key in ['major', 'minor', 'patch']]
else:
logger.warning('could not parse version string from brew: %s', output[0])
return self.default()
class BrewPackages(FactBase):
'''
Returns a dict of installed brew packages:
.. code:: python
{
'package_name': ['version'],
}
'''
command = 'brew list --versions'
requires_command = 'brew'
default = dict
def process(self, output):
return parse_packages(BREW_REGEX, output)
class BrewCasks(BrewPackages):
'''
Returns a dict of installed brew casks:
.. code:: python
{
'package_name': ['version'],
}
'''
command = (r'if brew --version | grep -q -e "Homebrew\ +(1\.|2\.[0-5]).*" 1>/dev/null;'
r'then brew cask list --versions; else brew list --cask --versions; fi')
requires_command = 'brew'
class BrewTaps(FactBase):
'''
Returns a list of brew taps.
'''
command = 'brew tap'
requires_command = 'brew'
default = list
def process(self, output):
return output
|
Fizzadar/pyinfra
|
pyinfra/facts/brew.py
|
Python
|
mit
| 2,296
|
import argparse
import os
from fontTools.ttLib import TTFont
from fontTools import subset
def makeWeb(args):
""" Generate TTF/WOFF/WOFF2 fonts """
font = TTFont(args.file)
## TODO: We can remove specialized glyphs, stylistic sets,
## etc. that are not useful on the web in order to minimize the
## file size.
base, ext = os.path.splitext(args.file)
base = os.path.basename(base)
for flavor in ("ttf", "woff", "woff2"):
if flavor is not "ttf":
font.flavor = flavor
font.save(args.dir + "/" + base + "." + flavor)
font.close()
def main():
parser = argparse.ArgumentParser(description="Create web optimised version of Sahel fonts.")
parser.add_argument("file", help="input font to process")
parser.add_argument("dir", help="output directory to write fonts to")
args = parser.parse_args()
makeWeb(args)
if __name__ == "__main__":
main()
|
bateni/qalam-tarash
|
tools/makeweb.py
|
Python
|
mit
| 930
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
usage: chcount.py [-u] [file ...]
Lists each character with non-zero count along with count. Non-printable
characters have ASCII labels or meta-char escapes, as per python
curses.ascii.
The -u option reports only unused byte codes.
Usual read of files from command line args or stdin if none.
'''
import sys
import curses.ascii
label = [curses.ascii.controlnames[i] for i in range(33)]
label.extend([curses.ascii.unctrl(i) for i in range(33, 128)])
label.extend(['#%02X' % i for i in range (128, 256)])
def charcnt(f, char_counts):
'''Accumulate character counts from file.'''
chunk = "data"
while chunk:
chunk = f.read(8192)
for byte in chunk:
char_counts[ord(byte)] += 1
if __name__ == '__main__':
char_counts = [0 for i in range(256)]
args = sys.argv[1:]
unused_codes = False
try:
if args and args[0] == '-u':
unused_codes = True
args = args[1:]
for fn in args:
f = open(fn)
charcnt(f, char_counts)
if not args:
charcnt(sys.stdin, char_counts)
except Exception, e:
sys.stderr.write('%s\n' % e)
sys.stderr.write('%s\n' % __doc__)
sys.exit(1)
for (code, count) in enumerate(char_counts):
if unused_codes and count == 0:
print '%04o %-4s' % (code, label[code])
if ( not unused_codes ) and ( count > 0 ):
print '%7d %-4s' % (count, label[code])
|
evanvliet/vw
|
tools/chcount.py
|
Python
|
mit
| 1,512
|
import socket
import selectors
import types
import os
import sys
import imp
import json
import logging
from urllib.parse import splitnport as parse_addr
from threading import Thread
from thinrpc.message import RpcMessage
from thinrpc.client import RpcRemote
from thinrpc import logger, RECV_SIZE, ENC, OK
################################################################################
# TODO: dynamic namedtuple for 'result' vals
# TODO: better logging solution for "extra" param (wrapper)
def single_threaded_acceptor(srv):
def handle_new_conn(sock):
conn, addr = sock.accept()
conn.setblocking(False)
srv.sel.register(conn, selectors.EVENT_READ, srv._handle)
return handle_new_conn
def single_threaded_destructor(srv, conn):
srv.sel.unregister(conn)
conn.close()
def multi_threaded_destructor(srv, conn):
conn.close()
def multi_thread_handler(srv, conn):
while True:
shutdown = srv._handle(conn)
if shutdown:
break
def multi_threaded_acceptor(srv):
def handle_new_conn(sock):
conn, addr = sock.accept()
Thread(target=multi_thread_handler, args=[srv, conn]).start()
return handle_new_conn
class GolangStyleImplLoaderMeta(type):
'''
Function-loading metaclass inspired by
http://stackoverflow.com/questions/9865455/adding-functions-from-other-files-to-a-python-class
'''
def __new__(cls, name, bases, dct):
modules = [imp.load_source(fn, fn) for fn in os.listdir('.') if fn.startswith('rpc_') and fn.endswith('.py')]
for module in modules:
for nm in dir(module):
f = getattr(module, nm)
if isinstance(f, types.FunctionType):
dct[f.__name__] = f
return super(GolangStyleImplLoaderMeta, cls).__new__(cls, name, bases, dct)
################################################################################
class _RpcServer(object):
running = False
sel = selectors.DefaultSelector()
funs = {}
# error check
def error(self, msg):
return RpcMessage(err=msg)
def _checkReady(m):
def proxy(self, *args):
if self.running:
return m(self, *args)
else:
raise Exception("RpcModule is not initialized!")
return proxy
@_checkReady
def _send(self, conn, reply):
conn.sendall(reply.Encode(ENC))
@_checkReady
def _handle(self, conn):
sender = RpcRemote(conn.getpeername())
data = conn.recv(RECV_SIZE)
logger.debug("Received data '%s' from client %s", data, sender, extra={"mode":"server"})
if data:
try:
msg = RpcMessage.Decode(data)
method = msg["method"]
logger.debug("[Client %s][Method %s][Msg %s]", sender, method, msg, extra={"mode":"server"})
if method in self.funs:
fun = self.funs[method]
err, val = self._dispatch(sender, msg, fun)
logger.debug("[Client %s][Method %s][Result %s]", sender, method, val, extra={"mode":"server"})
reply = RpcMessage(err=err, result=val)
self._send(conn, reply)
else:
logger.debug("[Client %s][Method %s][NoSuchMethod]", sender, method, extra={"mode":"server"})
self._send(conn, self.error("no such method"))
except ValueError as e:
self._send(conn, self.error("malformed message: %s" % str(e)))
logger.debug("[Client %s][Method %s][BadMsg %s]", sender, method, msg, extra={"mode":"server"})
else:
self.conn_destructor(self, conn)
return True
@_checkReady
def _dispatch(self, sender, msg, fun):
# skip 'self' arg
argnames = fun.__code__.co_varnames[1:fun.__code__.co_argcount]
msg[argnames[0]] = sender
args = [msg[arg] for arg in argnames]
return fun(self.app, *args)
def Init(self, app, single_threaded=True):
logger.info("Starting server on %s, single_threaded=%s", app.addr, single_threaded, extra={"mode":"server"})
self.app = app
self.iface, self.port = app.addr
self.running = True
self.killsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sel.register(self.killsock, selectors.EVENT_READ, lambda f:3)
if single_threaded:
self.acceptor = single_threaded_acceptor
self.conn_destructor = single_threaded_destructor
else:
self.acceptor = multi_threaded_acceptor
self.conn_destructor = multi_threaded_destructor
self.sock = socket.socket()
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.iface, self.port))
self.sock.listen(10)
self.sock.setblocking(False)
self.sel.register(self.sock, selectors.EVENT_READ, self.acceptor(self))
def run():
logger.info("Server started", extra={"mode":"server"})
while self.running:
events = self.sel.select()
for key, _ in events:
cb = key.data
cb(key.fileobj)
self.t = Thread(target=run, name="RpcDispatcher %s:%s" % (self.iface, self.port))
self.t.start()
def Method(self, f):
self.funs[f.__name__] = f
return f
class RpcApplication(object, metaclass=GolangStyleImplLoaderMeta):
def Start(self, **kwargs):
RpcModule.Init(self, **kwargs)
#TODO: channel-based stopping mechanism
def Stop(self):
logger.info("Stopping RPC server....", extra={"mode":"server"})
RpcModule.running = False
RpcModule.t.join()
logger.info("Server stopped", extra={"mode":"server"})
def Kill(self):
self.Stop()
logger.info("Exiting...", extra={"mode":"server"})
sys.exit(1)
RpcModule = _RpcServer()
|
anrosent/thinrpc
|
thinrpc/server.py
|
Python
|
mit
| 6,073
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
if_match: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str')
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_stats_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_valid_skus_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_event_hub_consumer_groups_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_event_hub_consumer_group_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_event_hub_consumer_group_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_event_hub_consumer_group_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_jobs_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_job_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
job_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"jobId": _SERIALIZER.url("job_id", job_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_quota_metrics_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_check_name_availability_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_keys_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_keys_for_key_name_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
key_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"keyName": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_export_devices_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_import_devices_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-01-22"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class IotHubResourceOperations(object):
"""IotHubResourceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2018_01_22.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.IotHubDescription":
"""Get the non-security related metadata of an IoT hub.
Get the non-security related metadata of an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(iot_hub_description, 'IotHubDescription')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller["_models.IotHubDescription"]:
"""Create or update the metadata of an IoT hub.
Create or update the metadata of an Iot hub. The usual pattern to modify a property is to
retrieve the IoT hub metadata and security metadata, and then combine them with the modified
values in a new body to update the IoT hub. If certain properties are missing in the JSON,
updating IoT Hub may cause these values to fallback to default, which may lead to unexpected
behavior.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param iot_hub_description: The IoT hub metadata and security metadata.
:type iot_hub_description: ~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription
:param if_match: ETag of the IoT Hub. Do not specify for creating a brand new IoT Hub. Required
to update an existing IoT Hub.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_description=iot_hub_description,
if_match=if_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_tags: "_models.TagsResource",
**kwargs: Any
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(iot_hub_tags, 'TagsResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_tags: "_models.TagsResource",
**kwargs: Any
) -> LROPoller["_models.IotHubDescription"]:
"""Update an existing IoT Hubs tags.
Update an existing IoT Hub tags. to update other fields use the CreateOrUpdate method.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param resource_name: Name of iot hub to update.
:type resource_name: str
:param iot_hub_tags: Updated tag information to set into the iot hub instance.
:type iot_hub_tags: ~azure.mgmt.iothub.v2018_01_22.models.TagsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_tags=iot_hub_tags,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 404:
deserialized = self._deserialize('ErrorDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> LROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
"""Delete an IoT hub.
Delete an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either IotHubDescription or ErrorDetails or the
result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription
or ~azure.mgmt.iothub.v2018_01_22.models.ErrorDetails]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a subscription.
Get all the IoT hubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a resource group.
Get all the IoT hubs in a resource group.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore
@distributed_trace
def get_stats(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.RegistryStatistics":
"""Get the statistics from an IoT hub.
Get the statistics from an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistryStatistics, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.RegistryStatistics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_stats_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_stats.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistryStatistics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore
@distributed_trace
def get_valid_skus(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.IotHubSkuDescriptionListResult"]:
"""Get the list of valid SKUs for an IoT hub.
Get the list of valid SKUs for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubSkuDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_valid_skus_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_valid_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_valid_skus_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubSkuDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore
@distributed_trace
def list_event_hub_consumer_groups(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
**kwargs: Any
) -> Iterable["_models.EventHubConsumerGroupsListResult"]:
"""Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint.
:type event_hub_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_event_hub_consumer_groups_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
template_url=self.list_event_hub_consumer_groups.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_event_hub_consumer_groups_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EventHubConsumerGroupsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore
@distributed_trace
def get_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> "_models.EventHubConsumerGroupInfo":
"""Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to retrieve.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.get_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace
def create_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> "_models.EventHubConsumerGroupInfo":
"""Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to add.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.create_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace
def delete_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_event_hub_consumer_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
event_hub_endpoint_name=event_hub_endpoint_name,
name=name,
template_url=self.delete_event_hub_consumer_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
@distributed_trace
def list_jobs(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.JobResponseListResult"]:
"""Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResponseListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.JobResponseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_jobs_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_jobs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_jobs_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("JobResponseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore
@distributed_trace
def get_job(
self,
resource_group_name: str,
resource_name: str,
job_id: str,
**kwargs: Any
) -> "_models.JobResponse":
"""Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param job_id: The job identifier.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_job_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
job_id=job_id,
template_url=self.get_job.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore
@distributed_trace
def get_quota_metrics(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.IotHubQuotaMetricInfoListResult"]:
"""Get the quota metrics for an IoT hub.
Get the quota metrics for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubQuotaMetricInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_quota_metrics_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_quota_metrics.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_quota_metrics_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IotHubQuotaMetricInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore
@distributed_trace
def check_name_availability(
self,
operation_inputs: "_models.OperationInputs",
**kwargs: Any
) -> "_models.IotHubNameAvailabilityInfo":
"""Check if an IoT hub name is available.
Check if an IoT hub name is available.
:param operation_inputs: Set the name parameter in the OperationInputs structure to the name of
the IoT hub to check.
:type operation_inputs: ~azure.mgmt.iothub.v2018_01_22.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubNameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.IotHubNameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(operation_inputs, 'OperationInputs')
request = build_check_name_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_name_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore
@distributed_trace
def list_keys(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]:
"""Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult
or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SharedAccessSignatureAuthorizationRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore
@distributed_trace
def get_keys_for_key_name(
self,
resource_group_name: str,
resource_name: str,
key_name: str,
**kwargs: Any
) -> "_models.SharedAccessSignatureAuthorizationRule":
"""Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param key_name: The name of the shared access policy.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.SharedAccessSignatureAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_keys_for_key_name_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
key_name=key_name,
template_url=self.get_keys_for_key_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore
@distributed_trace
def export_devices(
self,
resource_group_name: str,
resource_name: str,
export_devices_parameters: "_models.ExportDevicesRequest",
**kwargs: Any
) -> "_models.JobResponse":
"""Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param export_devices_parameters: The parameters that specify the export devices operation.
:type export_devices_parameters: ~azure.mgmt.iothub.v2018_01_22.models.ExportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest')
request = build_export_devices_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.export_devices.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore
@distributed_trace
def import_devices(
self,
resource_group_name: str,
resource_name: str,
import_devices_parameters: "_models.ImportDevicesRequest",
**kwargs: Any
) -> "_models.JobResponse":
"""Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param import_devices_parameters: The parameters that specify the import devices operation.
:type import_devices_parameters: ~azure.mgmt.iothub.v2018_01_22.models.ImportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest')
request = build_import_devices_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.import_devices.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2018_01_22/operations/_iot_hub_resource_operations.py
|
Python
|
mit
| 99,409
|
'''This module implements a factory pattern to licenses
'''
import pkgutil
import importlib
import os
class LicenseFactory:
@staticmethod
def get(name='gpl'):
license = GeneralLicense
pack, factory_file = os.path.split(__file__)
modnames = [m for _,m,_ in pkgutil.iter_modules([pack])]
for modname in modnames:
if modname in factory_file: continue
module = importlib.import_module(
package = 'licenses',
name = '.{}'.format(modname))
l = getattr(module, 'License')
if l.is_type(name): license = l
else: del l
return license()
class classproperty:
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
class GeneralLicense:
_labels = []
name = ''
description = ''
text = ''
@classproperty
def attr(cls):
return {k:v for k,v in cls.__dict__.items()
if type(v)==str and not k.startswith('_')}
@classmethod
def is_type(cls, name):
return name.lower() in [i.lower() for i in cls._labels]
|
nullhack/python-template
|
licenses/util.py
|
Python
|
mit
| 1,153
|
def permute(sequence):
"Given an input sequence, generate all permutations of that sequence."
if not sequence:
return []
perms = [tuple()]
for elem in sequence:
next_perms = []
for perm in perms:
for next_perm in _add_element(perm, elem):
next_perms.append(next_perm)
perms = next_perms
return perms
def _add_element(sequence, elem):
for i in range(len(sequence) + 1): # insert elem in every position
copy = list(sequence)
copy.insert(i, elem)
yield copy
|
calebperkins/algorithms
|
algorithms/permutations.py
|
Python
|
mit
| 565
|
import datetime
from bson import ObjectId
from pymongo import MongoClient, DESCENDING, ASCENDING
__author__ = 'Peipei YI'
# connect on the default host and port
client = MongoClient()
# connect on a specific host and port
# client = MongoClient('localhost', 27017)
# connect by the MongoDB URI format
# client = MongoClient('mongodb://localhost:27017/')
# get a database by attribute style
db = client.test
# get a database by dictionary style
# db = client['test']
# get a collection
collection = db.test1
# collection = db['test1']
# Databases and collections in MongoDB are created lazily,
# they are created when the first document is inserted into them.
# documents is represented and stored in JSON-style
# use dictionary to represent document
post = {'author': 'Mike',
'text': 'My first post!',
'tags': ['mongodb', 'python', 'pymongo'],
'date': datetime.datetime.now()}
print post
# native python types (like datetime.datetime instance)
# will be automatically converted to and from BSON types.
# insert a document
posts = db.posts
# post_id = posts.insert(post)
# print post_id
# insert() returns the value of "_id"
# when a document is inserted a special key, "_id", is automatically added
# if the document doesn't already contain an "_id" key.
# "_id" must be unique across the collection.
# list all collections
print db.collection_names()
# [u'system.indexes', u'posts']
# get a single document
print posts.find_one()
# {u'date': datetime.datetime(2014, 2, 20, 21, 18, 31, 651000),
# u'text': u'My first post!',
# u'_id': ObjectId('530600a77c7a1e3524b24dca'),
# u'author': u'Mike',
# u'tags': [u'mongodb', u'python', u'pymongo']}
# MongoDB stores data in BSON format.
# BSON strings are UTF-8 encoded
# so PyMongo must ensure that any strings it stores
# contain only valid UTF-8 data.
# Relgular strings (<type 'str'>) are validated and stored unaltered.
# Unicode strings (<type 'unicode'>) are encoded UTF-8 first.
# The reason why we get u'Mike' back is that
# PyMongo decodes each BSON string into a Python unicode string, not a regular str.
print posts.find_one({'author': 'mike'})
# None
# query by ObjectId, not just its string representation
post_id = ObjectId('53060f1b7c7a1e4d98b785e7')
print posts.find_one({'_id': post_id})
# bulk insert, by passing it an iterable
new_posts = [{'author': 'Mike',
'text': 'Another post!',
'tags': ['bulk', 'insert'],
'date': datetime.datetime.now()},
{'author': 'Eliot',
'title': 'MongoDB is fun', # MongoDB is schema-free
'text': 'and pretty easy too!',
'date': datetime.datetime(2012, 12, 20)}]
# post_ids = posts.insert(new_posts)
# print post_ids
# [ObjectId('53060f677c7a1e3ab8e1a1d2'), ObjectId('53060f677c7a1e3ab8e1a1d3')]
# query for more than one document, find() returns a Cursor instance, allows iterate
print 'query for more than one document'
# for post in posts.find():
for post in posts.find({'author': 'Mike'}):
print post
# counting, just want to know how many documents match a query
print posts.count() # count of all documents in a collection
print posts.find({'author': 'Mike'}).count() # count of matched document
# range queries, using {'$gt': d}
d = datetime.datetime(2012, 12, 30)
for post in posts.find({'date': {'$gt': d}}).sort('author'):
print post
# indexing, to make query fast
# using explain() method to get how the query is being performed without the index
# print posts.find({'date': {'$lt': d}}).sort('author').explain()['cursor']
# u'BasicCursor'
# print posts.find({'date': {'$lt': d}}).sort('author').explain()['nscanned']
# 4
# we can that the query is using BasicCursor and scanning all 3 documents in the collection.
# now, let's add a compound index and look at the same information
# print posts.create_index([('date', DESCENDING), ('author', ASCENDING)])
# u'date_-1_author_1'
# try explain() again
# print posts.find({'date': {'$gt': d}}).sort('author').explain()['cursor']
# CAUTIOUS: $gt might cause OverflowError: Python int too large to convert to C long
print posts.find({'date': {'$lt': d}}).sort('author').explain()['cursor']
# u'BtreeCursor date_-1_author_1'
print posts.find({'date': {'$lt': d}}).sort('author').explain()['nscanned']
# 1
|
yipeipei/peppy
|
_tryout/try_mongo.py
|
Python
|
mit
| 4,344
|
import telebot
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from plugins.bot import BotBot
from plugins.bug import BugBot
from plugins.zao import ZaoBot
from plugins.help import HelpBot
from plugins.event import EventBot
telebot.logger.setLevel(logging.DEBUG)
def readfile(filename):
with open(filename, 'r') as f:
return f.read().strip()
sched = BackgroundScheduler()
bot = telebot.TeleBot(readfile('token.txt'))
#BugBot(bot, sched).bind()
ZaoBot(bot, sched).bind()
BotBot(bot, sched).bind()
HelpBot(bot, sched).bind()
EventBot(bot, sched).bind()
sched.start()
bot.polling()
|
huiyiqun/zaobot
|
start.py
|
Python
|
mit
| 630
|