repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ibadami/pytorch-semseg
|
ptsemseg/models/pspnet.py
|
Python
|
mit
| 3,018
| 0.004639
|
import torch.nn as nn
from utils import *
class pspnet(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(pspnet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
self.layers = [2, 2, 2, 2] # Currently hardcoded for ResNet-18
filters = [64, 128, 256, 512]
|
# filters = [x / self.feature_scale for x in filters]
self.inplanes = filters[0]
# Encoder
self.convbnrelu1 = conv2DBatchNormRelu(in_channels=3, k_size=7, n_filters=
|
64,
padding=3, stride=2, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
block = residualBlock
self.encoder1 = self._make_layer(block, filters[0], self.layers[0])
self.encoder2 = self._make_layer(block, filters[1], self.layers[1], stride=2)
self.encoder3 = self._make_layer(block, filters[2], self.layers[2], stride=2)
self.encoder4 = self._make_layer(block, filters[3], self.layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
# Decoder
self.decoder4 = linknetUp(filters[3], filters[2])
self.decoder3 = linknetUp(filters[2], filters[1])
self.decoder2 = linknetUp(filters[1], filters[0])
self.decoder1 = linknetUp(filters[0], filters[0])
# Final Classifier
self.finaldeconvbnrelu1 = deconv2DBatchNormRelu(filters[0], 32/feature_scale, 2, 2, 0)
self.finalconvbnrelu2 = conv2DBatchNormRelu(in_channels=32/feature_scale, k_size=3, n_filters=32/feature_scale, padding=1, stride=1)
self.finalconv3 = nn.Conv2d(int(32/feature_scale), int(n_classes), 3, 1, 1)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = conv2DBatchNorm(self.inplanes, planes*block.expansion, k_size=1, stride=stride, padding=0, bias=False)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# Encoder
x = self.convbnrelu1(x)
x = self.maxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4)
d4 = d4 + e3
d3 = self.decoder3(d4)
d3 = d3 + e2
d2 = self.decoder2(d3)
d2 = d2 + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconvbnrelu1(d1)
f2 = self.finalconvbnrelu2(f1)
f3 = self.finalconv3(f2)
return f3
|
TheWylieStCoyote/gnuradio
|
gr-fec/python/fec/extended_encoder.py
|
Python
|
gpl-3.0
| 2,704
| 0.003328
|
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import absolute_impor
|
t
from __future__ import unicode_literals
from gnuradio import gr,
|
blocks
from . import fec_swig as fec
from .threaded_encoder import threaded_encoder
from .capillary_threaded_encoder import capillary_threaded_encoder
from .bitflip import read_bitlist
class extended_encoder(gr.hier_block2):
def __init__(self, encoder_obj_list, threading, puncpat=None):
gr.hier_block2.__init__(self, "extended_encoder",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_char))
self.blocks=[]
self.puncpat=puncpat
if(type(encoder_obj_list) == list):
if(type(encoder_obj_list[0]) == list):
gr.log.info("fec.extended_encoder: Parallelism must be 1.")
raise AttributeError
else:
# If it has parallelism of 0, force it into a list of 1
encoder_obj_list = [encoder_obj_list,]
if fec.get_encoder_input_conversion(encoder_obj_list[0]) == "pack":
self.blocks.append(blocks.pack_k_bits_bb(8))
if threading == 'capillary':
self.blocks.append(capillary_threaded_encoder(encoder_obj_list,
gr.sizeof_char,
gr.sizeof_char))
elif threading == 'ordinary':
self.blocks.append(threaded_encoder(encoder_obj_list,
gr.sizeof_char,
gr.sizeof_char))
else:
self.blocks.append(fec.encoder(encoder_obj_list[0],
gr.sizeof_char,
gr.sizeof_char))
if fec.get_encoder_output_conversion(encoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST))
if self.puncpat != '11':
self.blocks.append(fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0))
# Connect the input to the encoder and the output to the
# puncture if used or the encoder if not.
self.connect((self, 0), (self.blocks[0], 0));
self.connect((self.blocks[-1], 0), (self, 0));
# If using the puncture block, add it into the flowgraph after
# the encoder.
for i in range(len(self.blocks) - 1):
self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
|
CVBDL/ccollab2eeplatform-python
|
ccollab2eeplatform/utils.py
|
Python
|
mit
| 2,434
| 0.000822
|
"""Custom utils."""
from datetime import date
from itertools import groupby as groupby_
def to_isoformat(date_str):
"""Convert an ISO 8601 like date string to standard ISO 8601 format.
Args:
date_str (str): An ISO 8601 like date string.
Returns:
str: A standard ISO 8601 date string.
Examples:
>>> to_isoformat('2017-1-1')
2017-01-01
"""
return from_isoformat(date_str).isoformat()
def from_isoformat(date_str):
"""Create date from iso string."""
message = 'Date should be in ISO 8601 format: "YYYY-MM-DD"'
if not isinstance(date_str, str):
raise Exception(message)
try:
parts = [int(part) for part in date_str.split('-')]
return date(parts[0], parts[1], parts[2])
except:
raise Exception(message)
def month_range(start, stop):
"""Return a year month range.
Args:
start (str): Start year month in format '2016-01'
stop (str): Stop year month in format '2017-01'
Returns:
A list of year month string.
Examples:
>>> month_range('2016-11', '2017-01')
['2016-11', '2016-12', '2017-01']
>>> month_range('2017-01', '2016-11')
['2017-01', '2016-12', '2016-11']
"""
start_date = from_isoformat('{0}-01'.format(start))
stop_date = from_isoformat('{0}-01'.format(stop))
if start_date > stop_date:
start_date, stop_date = stop_date, start_
|
date
reverse = True
else:
reverse = False
result = []
while start_date <= stop_date:
result.append(start_date.isoformat()[0:7])
year = start_date.year
month = start_date
|
.month
if month == 12:
year += 1
month = 1
else:
month += 1
start_date = date(year, month, 1)
return reverse and sorted(result, reverse=reverse) or result
def groupby(iterable, key=None, reverse=False):
"""Wrapper of itertools.groupby function.
It make use of built-in itertools.groupby function.
In addition to sort the iterable with the same key as groupby.
Ref: <https://docs.python.org/3/library/itertools.html#itertools.groupby>
"""
if key is None:
key = lambda x: x
return groupby_(sorted(iterable, key=key, reverse=reverse), key)
def lower(func):
def _lower(*args, **kwargs):
return str.lower(str.strip(func(*args, **kwargs)))
return _lower
|
flodolo/bedrock
|
tests/pages/firefox/developer.py
|
Python
|
mpl-2.0
| 870
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.download_button import DownloadButton
class DeveloperPage(BasePage):
_URL_TEMPLATE = "/{locale}/fi
|
refox/developer/"
_primary_download_locator = (By.ID, "intro-download")
_secondary_download_locator = (By.ID, "footer-download")
@property
def primary_download_button(self):
el = self.find_element(*self._primary_download_locator)
return DownloadButton(self, root=el)
@property
def secondary_download_button(self):
el = self.find_element(*self._sec
|
ondary_download_locator)
return DownloadButton(self, root=el)
|
aplanas/rally
|
tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py
|
Python
|
apache-2.0
| 13,257
| 0
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from rally.plugins.openstack.scenarios.ceilometer import utils
from tests.unit import test
CEILOMETER_UTILS = "rally.plugins.openstack.scenarios.ceilometer.utils"
class CeilometerScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerScenarioTestCase, self).setUp()
self.scenario = utils.CeilometerScenario(self.context)
def test__list_alarms_by_id(self):
self.assertEqual(self.clients("ceilometer").alarms.get.return_value,
self.scenario._list_alarms("alarm-id"))
self.clients("ceilometer").alarms.get.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__list_alarms(self):
self.assertEqual(self.clients("ceilometer").alarms.list.return_value,
self.scenario._list_alarms())
self.clients("ceilometer").alarms.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__create_alarm(self):
alarm_dict = {"alarm_id": "fake-alarm-id"}
orig_alarm_dict = copy.copy(alarm_dict)
self.scenario._generate_random_name = mock.Mock()
self.assertEqual(self.scenario._create_alarm("fake-meter-name", 100,
alarm_dict),
self.clients("ceilometer").alarms.create.return_value)
self.clients("ceilometer").alarms.create.assert_called_once_with(
meter_name="fake-meter-name",
threshold=100,
description="Test Alarm",
alarm_id="fake-alarm-id",
name=self.scenario._generate_random_name.return_value)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_dict, orig_alarm_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_alarm")
def test__delete_alarms(self):
self.scenario._delete_alarm("alarm-id")
self.clients("ceilometer").alarms.delete.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.delete_alarm")
def test__update_alarm(self):
alarm_diff = {"description": "Changed Test Description"}
orig_alarm_diff = copy.copy(alarm_diff)
self.scenario._update_alarm("alarm-id", alarm_diff)
self.clients("ceilometer").alarms.update.assert_called_once_with(
"alarm-id", **alarm_diff)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_diff, orig_alarm_diff)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.update_alarm")
def test__get_alarm_history(self):
self.assertEqual(
self.scenario._get_alarm_history("alarm-id"),
self.clients("ceilometer").alarms.get_history.return_value)
self.clients("ceilometer").alarms.get_history.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_history")
def test__get_alarm_state(self):
self.assertEqual(
self.scenario._get_alarm_state("alarm-id"),
self.clients("ceilometer").alarms.get_state.return_value)
self.clients("ceilometer").alarms.get_state.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_state")
def test__set_alarm_state(self):
alarm = mock.Mock()
self.clients("ceilometer").alarms.create.return_value = alarm
return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100)
self.mock_wait_for.mock.assert_called_once_with(
alarm,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=100, check_interval=1)
self.mock_resource_is.mock.assert_called_once_with("ok")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_alarm)
se
|
lf._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.set_alarm_state")
def test__list_events(self):
self.assertEqual(
self.scenario._list_events(),
self.admin_clie
|
nts("ceilometer").events.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_events")
def test__get_events(self):
self.assertEqual(
self.scenario._get_event(event_id="fake_id"),
self.admin_clients("ceilometer").events.get.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_event")
def test__list_event_types(self):
self.assertEqual(
self.scenario._list_event_types(),
self.admin_clients("ceilometer").event_types.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_types")
def test__list_event_traits(self):
self.assertEqual(
self.scenario._list_event_traits(
event_type="fake_event_type", trait_name="fake_trait_name"),
self.admin_clients("ceilometer").traits.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_traits")
def test__list_event_trait_descriptions(self):
self.assertEqual(
self.scenario._list_event_trait_descriptions(
event_type="fake_event_type"
),
self.admin_clients("ceilometer").trait_descriptions.list.
return_value
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(),
"ceilometer.list_event_trait_descriptions")
def test__list_meters(self):
self.assertEqual(self.scenario._list_meters(),
self.clients("ceilometer").meters.list.return_value)
self.clients("ceilometer").meters.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_meters")
def test__list_resources(self):
self.assertEqual(
self.scenario._list_resources(),
self.clients("ceilometer").resources.list.return_value)
self.clients("ceilometer").resources.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_resources")
def test__list_samples(self):
self.assertEqual(
self.scenario._list_samples(),
self.clients("ceilometer").samples.list.return_value)
self.clients("ceil
|
SteffenDE/monitornjus-classic
|
admin/setn.py
|
Python
|
mit
| 6,805
| 0.031599
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 Steffen Deusch
# Licensed under the MIT license
# Beilage zu MonitorNjus, 14.09.2015 (Version 0.9.3)
import os
workingdir = os.path.dirname(os.path.realpath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import imp
modulesdir = workingdir+"/../modules"
common = imp.load_source("common", modulesdir+"/common.py")
def updateurl_refresh(Name, GETNAME, Seite, Nummer, widgname):
if "index" in referer:
gval = form.getfirst(Name, None)
if gval is not None:
val = gval
else:
val = None
if val is not None:
if val == common.getinfo(GETNAME, Seite, Nummer):
pass
else:
common.writeinfo(Seite, Nummer, GETNAME, unicode(val))
elif "widgets" in referer:
gval = form.getfirst(Name, None)
if gval is not None:
val = gval
else:
val = None
if val is not None:
if val == common.getwidgetinfo(widgname, Nummer, GETNAME):
pass
else:
common.writewidgetinfo(widgname, Nummer, GETNAME, unicode(val))
else:
raise Warning("Function updateurl_refresh: This referer does not exist.")
def updateaktiv(Name, GETNAME, Seite, Nummer, widgname, hidden):
if hidden is None:
val_flag = 1
else:
val_flag = 0
if "index" in referer:
if val_flag == common.getinfo(GETNAME, Seite, Nummer):
pass
else:
common.writeinfo(Seite, Nummer, GETNAME, unicode(val_flag))
elif "widgets" in referer:
if val_flag == common.getwidgetinfo(widgname, ID, GETNAME):
pass
else:
common.writewidgetinfo(widgname, Nummer, GETNAME, unicode(val_flag))
else:
raise Warning("Function updateaktiv: This referer does not exist.")
def update_align(Name, GETNAME, widgname, ID):
if "widgets" in referer:
if form.getfirst(Name, None):
val = form.getfirst(Name, None)
else:
val = None
if val is not None:
if unicode(val) == common.getwidgetinfo(widgname, ID, GETNAME):
pass
else:
common.writewidgetinfo(widgname, ID, GETNAME, unicode(val))
else:
raise Warning("Function update_align: This referer is not allowed.")
def updatetime(Seite, Nummer):
if "index" in referer:
uhrzeit = form.getfirst("uhrzeit-"+Seite+"-"+unicode(Nummer), None)
wochentag = form.getfirst("wochentag-"+Seite+"-"+unicode(Nummer), None)
tag = form.getfirst("tag-"+Seite+"-"+unicode(Nummer), None)
monat = form.getfirst("monat-"+Seite+"-"+unicode(Nummer), None)
if uhrzeit is None and wochentag is None and tag is None and monat is None:
pass
else:
if uhrzeit is None:
uhrzeit = "*"
if wochentag is None:
wochentag = "*"
if tag is None:
tag = "*"
if monat is None:
monat = "*"
common.writeinfo(Seite, Nummer, "VONBIS", uhrzeit+"|"+wochentag+"|"+tag+"|"+monat)
else:
raise Warning("Function updatetime: This referer is not allowed.")
def updateteilung():
if "index" in referer:
teilung = form.getfirst("teilung", None)
if teilung is not None:
if teilung == common.readsettings("TEILUNG"):
pass
else:
common.updatesettings("TEILUNG", teilung)
else:
raise Warning("Function updateteilung: This referer is not allowed.")
try:
import cgi, cgitb
#import cgitb; cgitb.enable()
if common.authentication:
auth = imp.load_source("auth", modulesdir+"/auth.py")
auth.me()
form = cgi.FieldStorage()
referer = form.getfirst('referer', None)
if "index" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/index.py\">"
for item in form:
if not "teilung" in item and not "referer" in item:
splitteditem = item.split("-")
name = splitteditem[0]
seite = splitteditem[1]
nummer = splitteditem[2]
if not "uhrzeit" in item and not "wochentag" in item and not "tag" in item and not "monat" in item:
if not "aktiv" in name.lower():
updateurl_refresh(item, name, seite, nummer, "")
else:
if "hidden." in item.lower() and not item[7:] in form:
hidden = 0
updateaktiv(item[7:], name[7:], seite, nummer, "", hidden)
elif "hidden." in item.lower() and item[7:] in form:
pass
else:
hidden = None
updateaktiv(item, name, seite, nummer, "", hidden)
else:
updatetime(seite, nummer)
else:
updateteilung()
elif "widgets" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
for item in form:
if not "referer" in item:
splitteditem = item.split("-")
art = splitteditem[0]
typ = splitteditem[1]
ID = splitteditem[2]
if not "aktiv" in art.lower():
if not "url" in art.lower():
update_align(item, art, typ, ID)
else:
updateurl_refresh(item, art, "", ID, typ)
else:
if "hidden." in item.lower() and not item[7:] in form:
hidden = 0
updateaktiv(item[7:], art[7:], "", ID, typ, hidden)
elif "hidden." in item.lower() and item[7:] in form:
pass
else:
hidden = None
updateaktiv(item, art, "", ID, typ, hidden)
elif "row" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; U
|
RL=../admin/index.py\">"
cnum = form.getfirst("createnum", None)
dnum = form.getfirst("delnum"
|
, None)
if cnum is not None and cnum.isdigit():
num = int(cnum)
if num == int(common.getrows())+1:
common.createrow(num)
else:
raise Warning("Neues Displayset - falsche Zahl: "+str(num))
elif dnum is not None and dnum.isdigit():
num = int(dnum)
if num <= int(common.getrows()):
common.delrow(num)
else:
raise Warning("Displayset löschen - falsche Zahl: "+str(num))
elif "newwidget" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
if form.getfirst("art", None):
val = form.getfirst("art", None)
else:
val = None
if val is not None:
if val == "Logo" or val == "Freies_Widget":
count = list(common.getwidgets())
ID = int(common.maxid())+1
common.newwidget(ID, val, val, 0, "placeholder", "bottom", "0px", "center", "0px", "100%", "100%")
else:
raise Warning("Falsches Widget: "+val)
elif "delwidget" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
num = form.getfirst("delnum", None)
if num is not None:
common.removewidget(unicode(num))
elif "triggerrefresh" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/index.py\">"
else:
refresh = ""
out = "Content-Type: text/html;charset=utf-8\n"
out += u"""
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">"""
#for item in form:
#out += item+": "+form[item].value
out += unicode(refresh)
out += u"""\
</head>
</html>"""
print(unicode(out))
if common.triggerrefresh:
datei = open(workingdir+"/../bin/refresh", "w")
datei.write("1")
datei.close()
except Exception as e:
common.debug(e)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/shapely/tests/test_collection.py
|
Python
|
agpl-3.0
| 870
| 0.006897
|
import unittest
from shapely.geometry import LineString
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
self.failUnlessEqual(m.geoms, [])
def test_child_with_deleted_parent(self):
# test that we can remove a collection while having
# childs around
a = LineString([(0, 0), (1, 1), (1,2), (2,2)])
b = LineString([(0, 0), (1, 1)
|
, (2,1), (2,2)])
collection = a.intersection(b)
child = collection.geoms[0]
# delete parent of child
del collection
# access geometry, this should not seg fault as 1.2.15 did
child.to_wkt()
def test_suite()
|
:
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
elbeanio/contented
|
test/app.py
|
Python
|
mit
| 312
| 0.003205
|
import unittest
from contented.app impo
|
rt Application
class AppTests(unittest.TestCase):
def test_load_app(self):
app = Application({})
self.assertTrue(hasattr(app, "sett
|
ings"))
self.assertTrue(hasattr(app, "content_map"))
self.assertTrue(hasattr(app, "request_processors"))
|
hiphoox/experior
|
timetracking/models.py
|
Python
|
bsd-3-clause
| 14,572
| 0.023813
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
from django.contrib.auth.models import User, UserManager
import datetime
####################################################################################################
####################################### Catalogs ################################################
####################################################################################################
class Region(TimeStampedModel):
"""Regions where we have operations"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Currency(TimeStampedModel):
"""Currencies used in contracts"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
class Meta:
verbose_name_plural = "Currencies"
de
|
f __unicode__(self):
return u"%s, %s" % (self.name , self.description)
###
|
#################################################################################################
class Sector(TimeStampedModel):
"""Business sectors"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Activity(TimeStampedModel):
"""Activities that are predefined by the customer. As in Banorte."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
billable = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Activities"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Category(TimeStampedModel):
"""(Category description)"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Categories"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class WorkItem(TimeStampedModel):
"""This could represent a project an artefact or whatever is produced as a result of a worksession"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
is_deliverable = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class ProjectType(TimeStampedModel):
"""ProjectType """
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class ProjectStatus(TimeStampedModel):
"""The current project status. It doesn't have an historic record."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Project Statuses"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Application(TimeStampedModel):
"""Customer's applications for a project."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Applications"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
####################################### Domain ##################################################
####################################################################################################
class Employee(User):
"""
We use the django authorization model to represent our employess.
We only define the extra fields required for our timetracking system.
"""
MARITAL_STATUSES = (
(u'M', u'Married'),
(u'S', u'Single'),
)
ENGLISH_LEVELS = (
(u'iBT TOEFL 107-120', u'iBT TOEFL 107-120'),
(u'iBT TOEFL 90-106', u'iBT TOEFL 90-106'),
(u'iBT TOEFL 61-89', u'iBT TOEFL 61-89'),
(u'iBT TOEFL 57-60', u'iBT TOEFL 57-60'),
(u'CPE', u'Cambridge-Certificate of Proficiency in English'),
(u'CAE', u'Cambridge-Certificate in Advance English'),
(u'FCE', u'Cambridge-First Certificate in English'),
(u'PET', u'Cambridge-Preliminary English Test'),
(u'KET', u'Cambridge-Key English Test'),
(u'IELTS 7.5-9.0', u'International English Language Testing System 7.5-9.0'),
(u'IELTS 6.5-7.0', u'International English Language Testing System 6.5-7.0'),
(u'IELTS 5.0-6.0', u'International English Language Testing System 5.0-6.0'),
(u'IELTS 3.5-4.5', u'International English Language Testing System 3.5-4.5'),
(u'IELTS 3.0', u'International English Language Testing System 3.0'),
)
salary = models.DecimalField(max_digits=15, decimal_places=4, help_text="Salary before taxes (Raw)")
is_Manager = models.BooleanField(default=False, help_text="Designates whether this user has a leadership or managerial rol")
telephone = models.CharField(blank=True, null=True, max_length=15)
birth_date = models.DateField(blank=True, null=True)
contract_date = models.DateField(default=datetime.datetime.now)
comments = models.TextField(blank=True, null=True)
has_passport = models.BooleanField(default=True)
is_technical = models.BooleanField(default=False, help_text="Designates whether this user has a technical leadership rol")
can_travel = models.BooleanField(default=False)
english_level = models.CharField(blank=True, null=True, max_length=50, choices=ENGLISH_LEVELS)
marital_status = models.CharField(blank=True, null=True, max_length=15, choices=MARITAL_STATUSES)
# Relationships
region = models.ForeignKey(Region)
def __unicode__(self):
return u"%s, %s" % (self.first_name , self.last_name)
####################################################################################################
class WorkSession(TimeStampedModel):
"""
This class represent a chunk of working time associated to one activity.
We get more flexibility and by the way is easier to register than forcing to use the activity as the unit of work.
In order to support diferent contexts the activity field is optional. In such case we will use the description field instead.
They are mutual exclusive (free or fixed style).
"""
work_date = models.DateField(default=datetime.datetime.today)
time = models.PositiveIntegerField(null=False)
description = models.CharField(blank=True, default='', max_length=100)
comments = models.TextField(blank=True, default='')
billable = models.BooleanField(default=True)
# Relationshi
|
josephsl/stationPlaylist
|
addon/appModules/tracktool.py
|
Python
|
gpl-2.0
| 3,301
| 0.019388
|
# StationPlaylist Track Tool
# An app module for NVDA
# Copyright 2014-2021 Joseph Lee, released under gPL.
# Functionality is based on JFW scripts for SPL Track Tool by Brian Hartgen.
# Track Tool allows a broadcaster to manage track intros, cues and so forth.
# Each track is a list item with descriptions such as title, file name, intro time and so forth.
# One can press TAB to move along the controls for Track Tool.
# #155 (21.03): remove __future__ import when NVDA runs under Python 3.10.
from __future__ import annotations
from typing import Optional
import appModuleHandler
import addonHandler
import tones
from NVDAObjects.IAccessible import sysListView32
from .splstudio import splconfig, SPLTrackItem
addonHandler.initTranslation()
# Return a tuple of column headers.
# This is just a thinly disguised indexOf function from Studio's track item class.
def indexOf(ttVersion: str) -> tuple[str, ...]:
# Nine columns per line for each tuple.
if ttVersion < "5.31":
return (
"Artist", "Title", "Duration", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Record Label", "ISRC"
)
elif "5.31" <= ttVersion < "6.0":
return (
"Artist", "Title", "Duration", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Record Label", "ISRC", "Language"
)
else:
return (
"Artist", "Title", "Durat
|
ion", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Reco
|
rd Label", "ISRC", "Language", "Restrictions", "Exclude from Requests"
)
class TrackToolItem(SPLTrackItem):
"""An entry in Track Tool, used to implement some exciting features.
"""
def reportFocus(self):
# Play a beep when intro exists.
if self._getColumnContentRaw(self.indexOf("Intro")) is not None:
tones.beep(550, 100)
super(TrackToolItem, self).reportFocus()
def indexOf(self, header: str) -> Optional[int]:
try:
return indexOf(self.appModule.productVersion).index(header)
except ValueError:
return None
@property
def exploreColumns(self) -> list[str]:
return splconfig.SPLConfig["General"]["ExploreColumnsTT"]
class AppModule(appModuleHandler.AppModule):
def __init__(self, *args, **kwargs):
super(AppModule, self).__init__(*args, **kwargs)
# #64 (18.07): load config database if not done already.
splconfig.openConfig("tracktool")
def terminate(self):
super(AppModule, self).terminate()
splconfig.closeConfig("tracktool")
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
import controlTypes
if obj.windowClassName == "TTntListView.UnicodeClass":
if obj.role == controlTypes.Role.LISTITEM:
clsList.insert(0, TrackToolItem)
elif obj.role == controlTypes.Role.LIST:
clsList.insert(0, sysListView32.List)
|
sciyoshi/yamlmod
|
yamlmod.py
|
Python
|
mit
| 1,179
| 0.028838
|
import os
import imp
import sys
import yaml
class YamlImportHook:
def find_module(self, fullname, path=None):
name = fullname.split('.')[-1]
for folder in path or sys.path:
if os.path.exists(os.path.join(folder, '%s.yml' % name)):
return self
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
sys.modules[fullname] = mod = imp.new_module(fullname)
if '.' in fullname:
pkg, name = fullname.rsplit('.', 1)
path = sys.modules[pkg].__path__
else:
pkg, name = '', fullname
pat
|
h = sys.path
for folder in path:
if os.path.exists(os.path.join(folder, '%s.yml' % name)):
mod.__file__ = os.path.join(folder, '%s.yml' % name)
mod.__package__ = pkg
mod.__loader__ = self
|
mod.__dict__.update(yaml.load(open(mod.__file__)) or {})
return mod
# somehow not found, delete from sys.modules
del sys.modules[fullname]
# support reload()ing this module
try:
hook
except NameError:
pass
else:
try:
sys.meta_path.remove(hook)
except ValueError:
# not found, skip removing
pass
# automatically install hook
hook = YamlImportHook()
sys.meta_path.insert(0, hook)
|
patrickglass/Resty
|
resty/__init__.py
|
Python
|
apache-2.0
| 667
| 0
|
"""
Module Resty
Date: November 25, 2013
Company: SwissTech Consulting.
Author: Patrick Glass <patrickglass@gmail.com>
Copyright: Copyright 2013 SwissTech Consulting.
This class implements a simple rest api framework for interfacing with the
Server via its REST API.
"""
__title__ = 'Resty'
__version__ = '0.1'
__author__ = 'Patrick Glas
|
s'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Patrick Glass'
from resty.api import RestyAPI
from resty.exceptions import (
RestApiException,
RestApiUrlException,
RestApiAuthError,
RestApiBadRequest,
RestApiServersDown
|
)
from resty.auth import RestAuthToken
from resty.request import request
|
pypa/twine
|
tests/test_main.py
|
Python
|
apache-2.0
| 2,736
| 0.002193
|
# Licensed under the Apa
|
che License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WAR
|
RANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import requests
from twine import __main__ as dunder_main
from twine.commands import upload
def test_exception_handling(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "missing.whl"])
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Hard-coding control characters for red text; couldn't find a succint alternative.
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
level = "\x1b[31mERROR \x1b[0m"
assert [line.rstrip() for line in captured.out.splitlines()] == [
f"{level} InvalidDistribution: Cannot find file (or expand pattern):",
" 'missing.whl'",
]
def test_http_exception_handling(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "test.whl"])
monkeypatch.setattr(
upload,
"upload",
pretend.raiser(
requests.HTTPError(
response=pretend.stub(
url="https://example.org",
status_code=400,
reason="Error reason",
)
)
),
)
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Hard-coding control characters for red text; couldn't find a succint alternative.
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
level = "\x1b[31mERROR \x1b[0m"
assert [line.rstrip() for line in captured.out.splitlines()] == [
f"{level} HTTPError: 400 Bad Request from https://example.org",
" Error reason",
]
def test_no_color_exception(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "--no-color", "upload", "missing.whl"])
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
assert [line.rstrip() for line in captured.out.splitlines()] == [
"ERROR InvalidDistribution: Cannot find file (or expand pattern):",
" 'missing.whl'",
]
# TODO: Test verbose output formatting
|
waveform80/gpio-zero
|
docs/examples/led_bargraph_1.py
|
Python
|
bsd-3-clause
| 400
| 0
|
from gpiozero import LEDBarGraph
from time import sleep
from __future__ import division # required for python 2
graph = LEDBarGraph(5,
|
6, 13, 19, 26, 20)
graph.value = 1 # (1, 1, 1, 1, 1, 1)
sleep(1)
graph.value = 1/2 # (1, 1, 1, 0, 0, 0)
sleep(1)
graph.value = -1/2 # (0, 0, 0, 1,
|
1, 1)
sleep(1)
graph.value = 1/4 # (1, 0, 0, 0, 0, 0)
sleep(1)
graph.value = -1 # (1, 1, 1, 1, 1, 1)
sleep(1)
|
cgalleguillosm/accasim
|
extra/examples/workload_generator-example.py
|
Python
|
mit
| 1,782
| 0.018519
|
from accasim.experimentation.workload_generator import workload_generator
if __name__ == '__main__':
#===========================================================================
# Workload filepath
#===========================================================================
workload = 'workload.swf'
#==========================================================================
# System config filepath
#==========================================================================
sys_config = 'config.config'
#===========================================================================
# Performance of the computing units
#===========================================================================
performance = { 'core': 3.334 / 2 }
#===========================================================================
# Request limits for each resource type
#===========================================================================
request_limits = {'min':{'core': 1, 'mem': 1000000 // 4}, 'max': {'core': 4, 'mem': 1000000}}
#===========================================================================
# Create the workload generator instance with the basic inputs
#==========================================
|
=================================
generator = workload_generator(workload, sys_config, performance, request_limits)
#===========================================================================
# Generate n jobs and save them to the nw filepath
#=======================================================================
|
====
n = 100
nw_filepath = 'new_workload.swf'
jobs = generator.generate_jobs(n, nw_filepath)
|
napsternxg/haiku_rnn
|
haiku_gen.py
|
Python
|
gpl-2.0
| 3,766
| 0.004514
|
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import random, sys
'''
Example script to generate haiku Text.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = "haiku_all.txt"
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 100
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print "X.shape: %s, Y.shape: %s" % (X.shape, y.shape)
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=False))
model.add(Dropout(0.2))
## Remove above 2 lines and replace by below 2 lines to make 2 layers LSTM.
#model.add(LSTM(len(chars), 512, return_sequences=True))
#model.add(Dropout(0.2))
#model.add(LSTM(512, 512, return_sequences=False))
#model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossen
|
tropy', optimizer='rmsprop')
# helper function to sample an index from a probability array
def sample(a, temperature=1.0):
a = np.log(a)/temperature
a = np.exp(a)/np.sum(np.exp(a))
return np.argmax(np.rand
|
om.multinomial(1,a,1))
# train the model, output generated text after each iteration
def generate_from_model(model, begin_sent=None, diversity_l=[0.2, 0.5, 1.0, 1.2]):
if begin_sent is None:
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in diversity_l:
print
print '----- diversity:', diversity
generated = ''
if begin_sent is None:
sentence = text[start_index : start_index + maxlen]
else:
sentence = begin_sent
generated += sentence
print '----- Generating with seed: "' + sentence + '"'
sys.stdout.write(generated)
tot_lines = 0
tot_chars = 0
while True:
if tot_lines > 3 or tot_chars > 120:
break
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
tot_chars += 1
generated += next_char
if next_char == '\t':
tot_lines += 1
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print ""
if __name__ == "__main__":
history = model.fit(X, y, batch_size=200, nb_epoch=20)
generate_from_model(model)
"""
for i in xrange(1,4):
history = model.fit(X, y, batch_size=100*i, nb_epoch=20)
generate_from_model(model)
"""
|
jumpserver/jumpserver
|
apps/authentication/views/feishu.py
|
Python
|
gpl-3.0
| 7,469
| 0.000538
|
from django.http.response import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from urllib.parse import urlencode
from django.views import View
from django.conf import settings
from django.http.request import HttpRequest
from django.db.utils import IntegrityError
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.exceptions import APIException
from users.utils import is_auth_password_time_valid
from users.views import UserVerifyPasswordView
from users.models import User
from common.utils import get_logger, FlashMessageUtil
from common.utils.random import random_string
from common.utils.django import reverse, get_object_or_none
from common.mixins.views import PermissionsMixin
from common.sdk.im.feishu import FeiShu, URL
from common.utils.common import get_request_ip
from authentication import errors
from authentication.mixins import AuthMixin
from authentication.notifications import OAuthBindMessage
logger = get_logger(__file__)
FEISHU_STATE_SESSION_KEY = '_feishu_state'
class FeiShuQRMixin(PermissionsMixin, View):
def dispatch(self, request, *args, **kwargs):
try:
return super().dispatch(request, *args, **kwargs)
except APIException as e:
msg = str(e.detail)
return self.get_failed_response(
'/',
_('FeiShu Error'),
msg
)
def verify_state(self):
state = self.request.GET.get('state')
session_state = self.request.session.get(FEISHU_STATE_SESSION_KEY)
if state != session_state:
return False
return True
def get_verify_state_failed_response(self, redirect_uri):
msg = _("The system configuration is incorrect. Please contact your administrator")
return self.get_failed_response(redirect_uri, msg, msg)
def get_qr_url(self, redirect_uri):
state = random_string(16)
self.request.session[FEISHU_STATE_SESSION_KEY] = state
params = {
'app_id': settings.FEISHU_APP_ID,
'state': state,
'redirect_uri': redirect_uri,
}
url = URL.AUTHEN + '?' + urlencode(params)
return url
@staticmethod
def get_success_response(redirect_url, title, msg):
message_data = {
'title': title,
'message': msg,
'interval': 5,
'redirect_url': redirect_url,
}
return FlashMessageUtil.gen_and_redirect_to(message_dat
|
a)
@staticmethod
def get_failed_response(redirect_url, title, msg):
message_data = {
|
'title': title,
'error': msg,
'interval': 5,
'redirect_url': redirect_url,
}
return FlashMessageUtil.gen_and_redirect_to(message_data)
def get_already_bound_response(self, redirect_url):
msg = _('FeiShu is already bound')
response = self.get_failed_response(redirect_url, msg, msg)
return response
class FeiShuQRBindView(FeiShuQRMixin, View):
permission_classes = (IsAuthenticated,)
def get(self, request: HttpRequest):
user = request.user
redirect_url = request.GET.get('redirect_url')
if not is_auth_password_time_valid(request.session):
msg = _('Please verify your password first')
response = self.get_failed_response(redirect_url, msg, msg)
return response
redirect_uri = reverse('authentication:feishu-qr-bind-callback', external=True)
redirect_uri += '?' + urlencode({'redirect_url': redirect_url})
url = self.get_qr_url(redirect_uri)
return HttpResponseRedirect(url)
class FeiShuQRBindCallbackView(FeiShuQRMixin, View):
permission_classes = (IsAuthenticated,)
def get(self, request: HttpRequest):
code = request.GET.get('code')
redirect_url = request.GET.get('redirect_url')
if not self.verify_state():
return self.get_verify_state_failed_response(redirect_url)
user = request.user
if user.feishu_id:
response = self.get_already_bound_response(redirect_url)
return response
feishu = FeiShu(
app_id=settings.FEISHU_APP_ID,
app_secret=settings.FEISHU_APP_SECRET
)
user_id = feishu.get_user_id_by_code(code)
if not user_id:
msg = _('FeiShu query user failed')
response = self.get_failed_response(redirect_url, msg, msg)
return response
try:
user.feishu_id = user_id
user.save()
except IntegrityError as e:
if e.args[0] == 1062:
msg = _('The FeiShu is already bound to another user')
response = self.get_failed_response(redirect_url, msg, msg)
return response
raise e
ip = get_request_ip(request)
OAuthBindMessage(user, ip, _('FeiShu'), user_id).publish_async()
msg = _('Binding FeiShu successfully')
response = self.get_success_response(redirect_url, msg, msg)
return response
class FeiShuEnableStartView(UserVerifyPasswordView):
def get_success_url(self):
referer = self.request.META.get('HTTP_REFERER')
redirect_url = self.request.GET.get("redirect_url")
success_url = reverse('authentication:feishu-qr-bind')
success_url += '?' + urlencode({
'redirect_url': redirect_url or referer
})
return success_url
class FeiShuQRLoginView(FeiShuQRMixin, View):
permission_classes = (AllowAny,)
def get(self, request: HttpRequest):
redirect_url = request.GET.get('redirect_url')
redirect_uri = reverse('authentication:feishu-qr-login-callback', external=True)
redirect_uri += '?' + urlencode({'redirect_url': redirect_url})
url = self.get_qr_url(redirect_uri)
return HttpResponseRedirect(url)
class FeiShuQRLoginCallbackView(AuthMixin, FeiShuQRMixin, View):
permission_classes = (AllowAny,)
def get(self, request: HttpRequest):
code = request.GET.get('code')
redirect_url = request.GET.get('redirect_url')
login_url = reverse('authentication:login')
if not self.verify_state():
return self.get_verify_state_failed_response(redirect_url)
feishu = FeiShu(
app_id=settings.FEISHU_APP_ID,
app_secret=settings.FEISHU_APP_SECRET
)
user_id = feishu.get_user_id_by_code(code)
if not user_id:
# 正常流程不会出这个错误,hack 行为
msg = _('Failed to get user from FeiShu')
response = self.get_failed_response(login_url, title=msg, msg=msg)
return response
user = get_object_or_none(User, feishu_id=user_id)
if user is None:
title = _('FeiShu is not bound')
msg = _('Please login with a password and then bind the FeiShu')
response = self.get_failed_response(login_url, title=title, msg=msg)
return response
try:
self.check_oauth2_auth(user, settings.AUTH_BACKEND_FEISHU)
except errors.AuthFailedError as e:
self.set_login_failed_mark()
msg = e.msg
response = self.get_failed_response(login_url, title=msg, msg=msg)
return response
return self.redirect_to_guard_view()
|
kaaveland/anybot
|
im/__init__.py
|
Python
|
gpl-2.0
| 449
| 0.002227
|
"""
This package supplies tools for working with automated services
connected to a server. It was written with
|
IRC in mind, so it's not
very generic, in that it pretty much assumes a single client connected
to a central server, and it's not easy for a client to add further connections
at runtime (But possible, though you might have to avoid selector.Reactor.loop.
"""
__all__ = [
"irc",
"selector",
|
"connection",
"irc2num"
]
|
sunlightlabs/billy
|
billy/scrape/utils.py
|
Python
|
bsd-3-clause
| 5,127
| 0.000585
|
import re
impo
|
rt itertools
import subprocess
import collections
def convert_pdf(filename, type='xml'):
commands = {'text': ['pdftotext', '-layout', filename, '-'],
'text-nolayout': ['pdftotext', filename, '-'],
'xml': ['pdftohtml', '-xml', '-stdout', filename],
'html': ['pdftohtml', '-stdout', filename]}
try:
pipe = subpro
|
cess.Popen(commands[type], stdout=subprocess.PIPE,
close_fds=True).stdout
except OSError as e:
raise EnvironmentError("error running %s, missing executable? [%s]" %
' '.join(commands[type]), e)
data = pipe.read()
pipe.close()
return data
def clean_spaces(s):
return re.sub('\s+', ' ', s, flags=re.U).strip()
class PlaintextColumns(object):
'''
Parse plain text columns like this into a table:
cols = """
Austin Errington Lawson, L Pryor
Bartlett Forestal Macer Riecken
Battles GiaQuinta Moed Shackleford
Bauer Goodin Moseley Smith, V
Brown,C Hale Niezgodsk Stemler
Candelaria Reardon Harris Pelath Summers
DeLaney Kersey Pierce VanDenburgh
Dvorak Klinker Porter
"""
Usage:
>>> table = PlaintextColumns(cols)
>>> next(table.rows())
('Austin', 'Errington', 'Lawson, L', 'Pryor')
>>> next(table.cols())
('Austin',
'Bartlett',
'Battles',
'Bauer',
'Brown,C',
'Candelaria Reardon',
'DeLaney',
'Dvorak')
>>> list(table.cells())
['Austin', 'Errington', 'Lawson, L', ...]
'''
def __init__(self, text, threshold=3):
'''Threshold is how many times a column boundary (an integer offset
from the beginning of the line) must be found in order to qualify
as a boundary and not an outlier.
'''
self.text = text.strip()
self.threshold = threshold
def _get_column_ends(self):
'''Guess where the ends of the columns lie.
'''
ends = collections.Counter()
for line in self.text.splitlines():
for matchobj in re.finditer('\s{2,}', line.lstrip()):
ends[matchobj.end()] += 1
return ends
def _get_column_boundaries(self):
'''Use the guessed ends to guess the boundaries of the plain
text columns.
'''
# Try to figure out the most common column boundaries.
ends = self._get_column_ends()
if not ends:
# If there aren't even any nontrivial sequences of whitespace
# dividing text, there may be just one column. In which case,
# Return a single span, effectively the whole line.
return [slice(None, None)]
most_common = []
threshold = self.threshold
for k, v in collections.Counter(ends.values()).most_common():
if k >= threshold:
most_common.append(k)
if most_common:
boundaries = []
for k, v in ends.items():
if v in most_common:
boundaries.append(k)
else:
# Here there weren't enough boundaries to guess the most common
# ones, so just use the apparent boundaries. In other words, we
# have only 1 row. Potentially a source of inaccuracy.
boundaries = ends.keys()
# Convert the boundaries into a list of span slices.
boundaries.sort()
last_boundary = boundaries[-1]
boundaries = zip([0] + boundaries, boundaries)
boundaries = list(itertools.starmap(slice, boundaries))
# And get from the last boundary to the line ending.
boundaries.append(slice(last_boundary, None))
return boundaries
@property
def boundaries(self):
_boundaries = getattr(self, '_boundaries', None)
if _boundaries is not None:
return _boundaries
self._boundaries = _boundaries = self._get_column_boundaries()
return _boundaries
def getcells(self, line):
'''Using self.boundaries, extract cells from the given line.
'''
for boundary in self.boundaries:
cell = line.lstrip()[boundary].strip()
if cell:
for cell in re.split('\s{3,}', cell):
yield cell
else:
yield None
def rows(self):
'''Returns an iterator of row tuples.
'''
for line in self.text.splitlines():
yield tuple(self.getcells(line))
def cells(self):
'''Returns an interator of all cells in the table.
'''
for line in self.text.splitlines():
for cell in self.getcells(line):
yield cell
def cols(self):
'''Returns an interator of column tuples.
'''
return itertools.izip(*list(self.rows()))
__iter__ = cells
|
sara-02/fabric8-analytics-stack-analysis
|
util/error/error_codes.py
|
Python
|
gpl-3.0
| 322
| 0.003106
|
"""Error codes used during the analysis."""
ERR_INPUT_INVALID = {
"name": "ERR_INPUT_INVALID ",
"msg": "Input is invalid."
}
ERR_MODEL_NOT_AVAILABLE = {
"name": "ERR_MODEL_NOT_AVAILABLE",
"ms
|
g": "Model does not seem to be available! It should be either trained or loaded "
"before scoring."
}
| |
tlake/data-structures-mk2
|
tests/test_insertion_sort.py
|
Python
|
mit
| 1,008
| 0.003968
|
# -*- coding
|
utf-8 -*-
from __future__ import unicode_literals
import pytest
from structures.insertion_sort import insertion_sort
@pytest.fixture
def sorted_list():
return [i for i in xrange(10)]
@pytest.fixture
def reverse_list():
r
|
eturn [i for i in xrange(9, -1, -1)]
@pytest.fixture
def average_list():
return [5, 9, 2, 4, 1, 6, 8, 7, 0, 3]
def test_sorted(sorted_list):
insertion_sort(sorted_list)
assert sorted_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_worst(reverse_list):
insertion_sort(reverse_list)
assert reverse_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_average(average_list):
insertion_sort(average_list)
assert average_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_repeats():
l = [3, 6, 7, 3, 9, 5, 2, 7]
insertion_sort(l)
assert l == [2, 3, 3, 5, 6, 7, 7, 9]
def test_multiple_types():
l = [3, 'foo', 2.8, True, []]
# python 2 sorting is crazy
insertion_sort(l)
assert l == [True, 2.8, 3, [], 'foo']
|
dmartinezgarcia/Python-Programming
|
Chapter 8 - Software Objects/exercise_1.py
|
Python
|
gpl-2.0
| 2,396
| 0.00793
|
# Exercise 1
#
# Improve the Critter caretaker program by allowing the user to specify how much food he or she feeds the critter and
# how long he or she plays with the critter. Have these values affect how quickly the critter's hunger and boredom
# levels drop.
#
class Critter(object):
"""A virtual pet"""
def __init__(self, name, hunger =
|
0, boredom
|
= 0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __pass_time(self, hunger_val = 1, boredom_val = 1):
self.hunger += hunger_val
self.boredom += boredom_val
@property
def mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness < 5:
m = "happy"
elif 5 <= unhappiness <= 10:
m = "okay"
elif 11 <= unhappiness <= 15:
m = "frustrated"
else:
m = "mad"
return m
def talk(self):
print("I'm", self.name, "and I feel", self.mood, "now.\n")
self.__pass_time()
def eat(self, food = 4):
print("Brruppp. Thank you.")
self.hunger -= food
if self.hunger < 0:
self.hunger = 0
self.__pass_time(boredom_val=food/2)
def play(self, fun = 4):
print("Wheee!")
self.boredom -= fun
if self.boredom < 0:
self.boredom = 0
self.__pass_time(hunger_val=fun/2)
def main():
crit_name = input("What do you want to name your critter?: ")
crit = Critter(crit_name)
choice = None
while choice != "0":
print \
("""
Critter Caretaker
0 - Quit
1 - Listen to your critter
2 - Feed your critter
3 - Play with your critter
""")
choice = input("Choice: ")
print()
# exit
if choice == "0":
print("Good-bye.")
# listen to your critter
elif choice == "1":
crit.talk()
# feed your critter
elif choice == "2":
crit.eat(int(input("How much food do you wish to feed the critter? ")))
# play with your critter
elif choice == "3":
crit.play(int(input("How much time do you wish to play with the critter? ")))
# some unknown choice
else:
print("\nSorry, but", choice, "isn't a valid choice.")
main()
("\n\nPress the enter key to exit.")
|
scanny/python-pptx
|
pptx/chart/plot.py
|
Python
|
mit
| 13,213
| 0.000076
|
# encoding: utf-8
"""
Plot-related objects. A plot is known as a chart group in the MS API. A chart
can have more than one plot overlayed on each other, such as a line plot
layered over a bar plot.
"""
from __future__ import absolute_import, print_function, unicode_literals
from .category import Categories
from .datalabel import DataLabels
from ..enum.chart import XL_CHART_TYPE as XL
from ..oxml.ns import qn
from ..oxml.simpletypes import ST_BarDir, ST_Grouping
from .series import SeriesCollection
from ..util import lazyproperty
class _BasePlot(object):
"""
A distinct plot that appears in the plot area of a chart. A chart may
have more than one plot, in which case they appear as superimposed
layers, such as a line plot appearing on top of a bar chart.
"""
def __init__(self, xChart, chart):
super(_BasePlot, self).__init__()
self._element = xChart
self._chart = chart
@lazyproperty
def categories(self):
"""
Returns a |category.Categories| sequence object containing
a |category.Category| object for each of the category labels
associated with this plot. The |category.Category| class derives from
``str``, so the returned value can be treated as a simple sequence of
strings for the common case where all you need is the labels in the
order they appear on the chart. |category.Categories| provides
additional properties for dealing with hierarchical categories when
required.
"""
return Categories(self._element)
@property
def chart(self):
"""
The |Chart| object containing this plot.
"""
return self._chart
@property
def data_labels(self):
"""
|DataLabels| instance providing properties and methods on the
collection of data labels associated with this plot.
"""
dLbls = self._element.dLbls
if dLbls is None:
raise ValueError(
"plot has no data labels, set has_data_labels = True first"
)
return DataLabels(dLbls)
@property
def has_data_labels(self):
"""
Read/write boolean, |True| if the series has data labels. Assigning
|True| causes data labels to be added to the plot. Assigning False
removes any existing data labels.
"""
return self._element.dLbls is not None
@has_data_labels.setter
def has_data_labels(self, value):
"""
Add, remove, or leave alone the ``<c:dLbls>`` child element depending
on current state and assigned *value*. If *value* is |True| and no
``<c:dLbls>`` element is present, a new default element is added with
default child elements and settings. When |False|, any existing dLbls
element is removed.
"""
if bool(value) is False:
self._element._remove_dLbls()
else:
if self._element.dLbls is None:
dLbls = self._element._add_dLbls()
dLbls.showVal.val = True
@lazyproperty
def series(self):
"""
A sequence of |Series| objects representing the series in this plot,
in the order they appear in the plot.
"""
return SeriesCollection(self._element)
@property
def vary_by_categories(self):
"""
Read/write boolean value specifying whether to use a different color
for each of the points in this plot. Only effective when there is
a single series; PowerPoint automatically varies color by series when
more than one series is present.
"""
varyColors = self._element.varyColors
if varyColors is None:
return True
return varyColors.val
@vary_by_categories.setter
def vary_by_categories(self, value):
self._element.get_or_add_varyColors().val = bool(value)
class AreaPlot(_BasePlot):
"""
An area plot.
"""
class Area3DPlot(_BasePlot):
"""
A 3-dimensional area plot.
"""
class BarPlot(_BasePlot):
"""
A bar chart-style plot.
"""
@property
def gap_width(self):
"""
Width of gap between bar(s) of each category, as an integer
percentage of the bar width. The default value for a new bar chart is
150, representing 150% or 1.5 times the width of a single bar.
"""
gapWidth = self._element.gapWidth
if gapWidth is None:
return 150
return gapWidth.val
@gap_width.setter
def gap_width(self, value):
gapWidth = self._element.get_or_add_gapWidth()
gapWidth.val = value
@property
def overlap(self):
"""
Read/write int value in range -100..100 specifying a percentage of
the bar width by which to overlap adjacent bars in a multi-series bar
chart. Default is 0. A setting of -100 creates a gap of a full bar
width and a setting of 100 causes all the bars in a category to be
superimposed. A stacked bar
|
plot has overlap of 100 by default.
"""
overlap = self._element.overlap
if overlap is None:
|
return 0
return overlap.val
@overlap.setter
def overlap(self, value):
"""
Set the value of the ``<c:overlap>`` child element to *int_value*,
or remove the overlap element if *int_value* is 0.
"""
if value == 0:
self._element._remove_overlap()
return
self._element.get_or_add_overlap().val = value
class BubblePlot(_BasePlot):
"""
A bubble chart plot.
"""
@property
def bubble_scale(self):
"""
An integer between 0 and 300 inclusive indicating the percentage of
the default size at which bubbles should be displayed. Assigning
|None| produces the same behavior as assigning `100`.
"""
bubbleScale = self._element.bubbleScale
if bubbleScale is None:
return 100
return bubbleScale.val
@bubble_scale.setter
def bubble_scale(self, value):
bubbleChart = self._element
bubbleChart._remove_bubbleScale()
if value is None:
return
bubbleScale = bubbleChart._add_bubbleScale()
bubbleScale.val = value
class DoughnutPlot(_BasePlot):
"""
An doughnut plot.
"""
class LinePlot(_BasePlot):
"""
A line chart-style plot.
"""
class PiePlot(_BasePlot):
"""
A pie chart-style plot.
"""
class RadarPlot(_BasePlot):
"""
A radar-style plot.
"""
class XyPlot(_BasePlot):
"""
An XY (scatter) plot.
"""
def PlotFactory(xChart, chart):
"""
Return an instance of the appropriate subclass of _BasePlot based on the
tagname of *xChart*.
"""
try:
PlotCls = {
qn("c:areaChart"): AreaPlot,
qn("c:area3DChart"): Area3DPlot,
qn("c:barChart"): BarPlot,
qn("c:bubbleChart"): BubblePlot,
qn("c:doughnutChart"): DoughnutPlot,
qn("c:lineChart"): LinePlot,
qn("c:pieChart"): PiePlot,
qn("c:radarChart"): RadarPlot,
qn("c:scatterChart"): XyPlot,
}[xChart.tag]
except KeyError:
raise ValueError("unsupported plot type %s" % xChart.tag)
return PlotCls(xChart, chart)
class PlotTypeInspector(object):
"""
"One-shot" service object that knows how to identify the type of a plot
as a member of the XL_CHART_TYPE enumeration.
"""
@classmethod
def chart_type(cls, plot):
"""
Return the member of :ref:`XlChartType` that corresponds to the chart
type of *plot*.
"""
try:
chart_type_method = {
"AreaPlot": cls._differentiate_area_chart_type,
"Area3DPlot": cls._differentiate_area_3d_chart_type,
"BarPlot": cls._differentiate_bar_chart_type,
"BubblePlot": cls._differentiate_bubble_chart_type,
"DoughnutPlot": cls._differentiate_doughnut_chart_type,
"LinePlot": cls._diffe
|
pklfz/fold
|
tensorflow_fold/util/proto_test.py
|
Python
|
apache-2.0
| 5,810
| 0.001721
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.
|
org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_fold.util.proto."""
import os
# import google3
import tensorflow as tf
from tensorflow_fold.util import proto_tools
from tensorf
|
low_fold.util import test3_pb2
from tensorflow_fold.util import test_pb2
from google.protobuf import text_format
# Make sure SerializedMessageToTree can see our proto files.
proto_tools.map_proto_source_tree_path("", os.getcwd())
# Note: Tests run in the bazel root directory, which we will use as the root for
# our source protos.
proto_tools.import_proto_file("tensorflow_fold/util/test.proto")
proto_tools.import_proto_file("tensorflow_fold/util/test3.proto")
def MakeCyclicProto(message_str):
return text_format.Parse(message_str, test_pb2.CyclicType())
def MakeCyclicProto3(message_str):
return text_format.Parse(message_str, test3_pb2.CyclicType3())
def MakeOneAtomProto(message_str):
return text_format.Parse(message_str, test_pb2.OneAtom())
class ProtoTest(tf.test.TestCase):
def testSerializedMessageToTree(self):
example = MakeCyclicProto(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], None)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], None)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeProto3(self):
example = MakeCyclicProto3(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType3", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], False)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], False)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeOneofEmpty(self):
empty_proto = MakeOneAtomProto("").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], None)
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], None)
def testSerializedMessageToTreeOneof(self):
empty_proto = MakeOneAtomProto("some_string: \"x\"").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], "some_string")
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], "x")
def testNonConsecutiveEnum(self):
name = "tensorflow.fold.NonConsecutiveEnumMessage"
msg = test_pb2.NonConsecutiveEnumMessage(
the_enum=test_pb2.NonConsecutiveEnumMessage.THREE)
self.assertEqual(
{"the_enum": {"name": "THREE", "index": 1, "number": 3}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
msg.the_enum = test_pb2.NonConsecutiveEnumMessage.SEVEN
self.assertEqual(
{"the_enum": {"name": "SEVEN", "index": 0, "number": 7}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
if __name__ == "__main__":
tf.test.main()
|
2013Commons/HUE-SHARK
|
apps/shell/setup.py
|
Python
|
apache-2.0
| 1,754
| 0.017104
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
import os
def expand_package_data(src_dirs, strip=""):
ret = []
for src_dir in src_dirs:
for path, dnames, fnames in os.walk(src_dir):
for fname in fnames:
ret.a
|
ppend(os.path.join(path, fname).replace(strip, ""))
return ret
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "shell",
version = VERSION,
url = 'http://github.com/cloudera/hue',
description = 'Shell interface in Hue',
author = 'Hue',
packages = find_packages('src'),
package_
|
dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'shell=shell' },
zip_safe = False,
package_data = {
# Include static resources. Package_data doesn't
# deal well with directory globs, so we enumerate
# the files manually.
'shell': expand_package_data(
["src/shell/templates", "src/shell/static"],
"src/shell/")
}
)
|
danxhuber/k2epic
|
checkk2fov/__init__.py
|
Python
|
mit
| 135
| 0.014815
|
#!/usr/bin/env python
|
# -*- coding: utf-8 -*-
from __future__ import (division,absolute_import)
from
|
.K2onSilicon import K2onSilicon
|
shawncaojob/LC
|
QUESTIONS/96_unique_binary_search_trees.py
|
Python
|
gpl-3.0
| 2,402
| 0.005412
|
# 96. Unique Binary Search Trees My Submissions QuestionEditorial Solution
# Total Accepted: 84526 Total Submissions: 224165 Difficulty: Medium
# Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
#
# For example,
# Given n = 3, there are a total of 5 unique BST's.
#
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
A = [0] * (n + 1)
A[0] = 1
A[1] = 1
for i in xrange(2, n+1):
for k in xrange(0, i):
A[i] += A[k]*A[i-1-k]
return A[n]
# 4 4 4 4 4
# / / / / /
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
# 1 3 3 2 1 2
# \ / \ / \ / \ \ / \
# 3 2 4 1 4 1 3 2 1 4
# / \ / \ \ \ /
# 2 4 1 2 4 3 3
# \
# 4
#
# Subscribe to see which companies asked this question
# Analysis:
# n = 0, 1
# n = 1, 1
# n = 2, 2 = (0,1) + (1,0)
# n = 3, 5 = 2(0,2) + 2(2,0) + 1(1,1)
# n = 4, 10 = (0,3), (1,2), (2,1), (0,3)
# n = 5,
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0: return 0
res = [0 for x in xrange(0,n+1)]
res[0], res[1] = 1, 1
for n in xrange(2, n+1):
i, tmp = 0, 0
|
while i < n:
tmp += res[i] * res[n-1-i]
i += 1
res[n] = tmp
return res[n]
import unittest
class TestSolution(unittest.TestCase):
def test_0(self):
self.assertEqual(Solution().numTrees(3), 5)
def test_1(self):
self.assertEqual(Solution().numTrees(2), 2)
def test_2(self):
self.assertEqual(Solution().
|
numTrees(4), 14)
if __name__ == "__main__":
unittest.main()
|
hsnlab/nffg
|
nffg_elements.py
|
Python
|
apache-2.0
| 90,210
| 0.008081
|
# Copyright 2017 Janos Czentye, Balazs Nemeth, Balazs Sonkoly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classes for handling the elements of the NF-FG data structure.
"""
import json
import uuid
from collections import Iterable, OrderedDict
from itertools import chain
################################################################################
# ---------- BASE classes of NFFG elements -------------------
################################################################################
class Persistable(object):
"""
Define general persist function for the whole NFFG structure.
"""
__slots__ = ()
def persist (self):
"""
Common function to persist the actual element into a plain text format.
:return: generated empty object fit to JSON
:rtype: dict
"""
return OrderedDict()
def load (self, data, *args, **kwargs):
"""
Common function to fill self with data from JSON data.
:raise: :any:`exceptions.NotImplementedError`
:param data: object structure in JSON
:return: self
"""
pass
@classmethod
def parse (cls, data, *args, **kwargs):
"""
Common function to parse the given JSON object structure as the actual NF-FG
entity type and return a newly created object.
:param data: raw JSON object structure
:type data: object
:return: parsed data as the entity type
:rtype: :any:`Persistable`
"""
return cls().load(data, *args, **kwargs)
def copy (self):
"""
Return the copy of the object. This copy function is meant to use when a new
``NFFG`` object structure is created. It can handles the references pointed
to internal NFFG element in order to avoid unnecessary deep copies. These
references are always None in the copied object which are overwritten by
adder functions in every case.
:return: copied object
:rtype: :any:`Element`
"""
from copy import deepcopy
return deepcopy(self)
class Element(Persistable):
"""
Main base class for NF-FG elements with unique id.
Contains the common functionality.
"""
# Operation constants
OP_CREATE = "create"
OP_REPLACE = "replace"
OP_MERGE = "merge"
OP_REMOVE = "remove"
OP_DELETE = "delete"
# Status constants
STATUS_INIT = "INITIALIZED"
STATUS_PENDING = "PENDING"
STATUS_DEPLOY = "DEPLOYED"
STATUS_RUN = "RUNNING"
STATUS_STOP = "STOPPED"
STATUS_FAIL = "FAILED"
__slots__ = ('id', 'type', 'operation', 'status')
def __init__ (self, id=None, type="ELEMENT", operation=None, status=None):
"""
Init.
:param id: optional identification (generated by default)
:type id: str or int
:param type: explicit object type both for nodes and edges
:type type: str
:return: None
"""
super(Element, self).__init__()
self.id = id if id is not None else self.generate_unique_id()
self.type = type
self.operation = operation
self.status = status
@staticmethod
def generate_unique_id ():
"""
Generate a unique id for the object based on uuid module: :rfc:`4122`.
:return: unique id
:rtype: str
"""
return str(uuid.uuid1())
def regenerate_id (self):
"""
Regenerate and set id. Useful for object copy.
:return: new id
:rtype: str
"""
self.id = self.generate_unique_id()
return self.id
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
# Need to override
element = super(Element, self).persist()
element['id'] = self.id
if self.operation is not None:
element["operation"] = self.operation
if self.status is not None:
element["status"] = self.status
return element
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.id = data['id']
super(Element, self).load(data=data)
self.operation = data.get("operation") # optional
self.status = data.get("status") # optional
return self
def dump (self):
"""
Dump the Element in a pretty format for debugging.
:return: Element in JSON format
:rtype: str
"""
return json.dumps(self.persist(), indent=2, sort_keys=False)
##############################################################################
# dict specific functions
##############################################################################
def __getitem__ (self, item):
"""
Return the attribute of the element given by ``item``.
:param item: attribute name
:type item: str or int
:return: attribute
:rtype: object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __setitem__ (self, key, value):
"""
Set the attribute given by ``key`` with ``value``:
:param key: attribute name
:type key: str or int
:param value: new value
:type value: object
:return: new value
:rtype: object
"""
if hasatt
|
r(self, key):
return setattr(self, key, value)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, key))
def __contains__ (self, item):
"""
Return true if the given ``item`` is exist.
:param item: searched attribute name
:type item: str or int
:return: given item is exist or not
:rtype: b
|
ool
"""
return hasattr(self, item)
def get (self, item, default=None):
"""
Return with the attribute given by ``item``, else ``default``.
:param item: searched attribute name
:type item: str
:param default: default value
:type default: object
:return: found attribute or default
:rtype: object
"""
try:
return self[item]
except KeyError:
return default
def setdefault (self, key, default=None):
"""
Set the attribute given by ``key``. Use the ``default`` value is it is
not given.
:param key: attribute name
:type key: str or int
:param default: default value
:type default: object
:return: None
"""
if key not in self:
self[key] = default
def clear (self):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError("This standard dict functions is not supported by NFFG!")
def update (self, dict2):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError(
"This standard dict functions is not supported by NFFG! self: %s dict2: "
"%s" % (self, dict2))
class L3Address(Element):
"""
Wrapper class for storing L3 address values.
"""
__slots__ = ('name', 'configure', 'client', 'requested', 'provided')
def __init__ (self, id, name=None, configure=None, client=None,
requested=None, provided=None):
"""
Init.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param configure: request address
:type configure: bool
:param client: client of the address request
:type client: str
:param requested: requested IP
:type requested: str
:param provided: provided IP
:type provided: str
:return: None
"""
super(L3Address, self).__init__(id=id, type="L3ADDRESS")
self.name = name
self.configure = configure
self.client = client
self.requested = requested
self.provided = provided
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data:
|
uranusjr/django
|
django/db/models/sql/query.py
|
Python
|
bsd-3-clause
| 96,523
| 0.000912
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Counter, Iterator, Mapping, OrderedDict, namedtuple
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import (
EmptyResultSet, FieldDoesNotExist, FieldError,
)
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s
|
" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""A single SQL query.""
|
"
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = OrderedDict()
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
@property
def extra(self):
if
|
igordejanovic/textX
|
setup.py
|
Python
|
mit
| 1,005
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
this_dir = os.path.abspath(os.path.dirname(__file__))
VERSIONFILE = os.path.join(this_dir, "textx", "__init__.py")
VERSION = None
for line in open(VERSIONFILE, "r").readlines():
if line.startswith('__version__'):
VERSION = line.split('"')[1]
if not VERSION:
raise RuntimeError('No version defined in textx.__init__.py')
if sys.argv[-1].startswith('publish'):
if os.system("pip list | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
|
if sys.argv[-1] == 'publishtest':
os.system("twine upload -r test dist/*")
else:
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(VERSION))
print(" git push --tags")
|
sys.exit()
setup(version=VERSION)
|
NeCTAR-RC/horizon
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
Python
|
apache-2.0
| 5,711
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
|
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.
|
object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
|
vorwerkc/pymatgen
|
pymatgen/analysis/structure_prediction/dopant_predictor.py
|
Python
|
mit
| 7,420
| 0.001887
|
"""
Predicting potential dopants
"""
import warnings
import numpy as np
from pymatgen.analysis.structure_prediction.substitution_probability import (
SubstitutionPredictor,
)
from pymatgen.core.periodic_table import Element, Species
def get_dopants_from_substitution_probabilities(structure, num_dopants=5, threshold=0.001, match_oxi_sign=False):
"""
Get dopant suggestions based on substitution probabilities.
Args:
structure (Structure): A pymatgen structure decorated with
oxidation states.
num_dopants (int): The number of suggestions to return for
n- and p-type dopants.
threshold (float): Probability threshold for substitutions.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "probability": The probability of substitution.
- "dopant_species": The dopant species.
- "original_spec
|
ies": The substituted species.
"""
els_have_oxi_states = [hasattr(s, "oxi_state") for s in structure.species]
if not all(els_have_oxi_states):
raise ValueError("All sites in structure must have oxidation states to predict dopants.")
sp = SubstitutionPredictor(t
|
hreshold=threshold)
subs = [sp.list_prediction([s]) for s in set(structure.species)]
subs = [
{
"probability": pred["probability"],
"dopant_species": list(pred["substitutions"].keys())[0],
"original_species": list(pred["substitutions"].values())[0],
}
for species_preds in subs
for pred in species_preds
]
subs.sort(key=lambda x: x["probability"], reverse=True)
return _get_dopants(subs, num_dopants, match_oxi_sign)
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False):
"""
Get dopant suggestions based on Shannon radii differences.
Args:
bonded_structure (StructureGraph): A pymatgen structure graph
decorated with oxidation states. For example, generated using the
CrystalNN.get_bonded_structure() method.
num_dopants (int): The nummber of suggestions to return for
n- and p-type dopants.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "radii_diff": The difference between the Shannon radii of the species.
- "dopant_spcies": The dopant species.
- "original_species": The substituted species.
"""
# get a list of all Species for all elements in all their common oxid states
all_species = [Species(el, oxi) for el in Element for oxi in el.common_oxidation_states]
# get a series of tuples with (coordination number, specie)
cn_and_species = {
(
bonded_structure.get_coordination_of_site(i),
bonded_structure.structure[i].specie,
)
for i in range(bonded_structure.structure.num_sites)
}
cn_to_radii_map = {}
possible_dopants = []
for cn, species in cn_and_species:
cn_roman = _int_to_roman(cn)
try:
species_radius = species.get_shannon_radius(cn_roman)
except KeyError:
warnings.warn(f"Shannon radius not found for {species} with coordination number {cn}.\nSkipping...")
continue
if cn not in cn_to_radii_map:
cn_to_radii_map[cn] = _shannon_radii_from_cn(all_species, cn_roman, radius_to_compare=species_radius)
shannon_radii = cn_to_radii_map[cn]
possible_dopants += [
{
"radii_diff": p["radii_diff"],
"dopant_species": p["species"],
"original_species": species,
}
for p in shannon_radii
]
possible_dopants.sort(key=lambda x: abs(x["radii_diff"]))
return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
def _get_dopants(substitutions, num_dopants, match_oxi_sign):
"""
Utility method to get n- and p-type dopants from a list of substitutions.
"""
n_type = [
pred
for pred in substitutions
if pred["dopant_species"].oxi_state > pred["original_species"].oxi_state
and (
not match_oxi_sign
or np.sign(pred["dopant_species"].oxi_state) == np.sign(pred["original_species"].oxi_state)
)
]
p_type = [
pred
for pred in substitutions
if pred["dopant_species"].oxi_state < pred["original_species"].oxi_state
and (
not match_oxi_sign
or np.sign(pred["dopant_species"].oxi_state) == np.sign(pred["original_species"].oxi_state)
)
]
return {"n_type": n_type[:num_dopants], "p_type": p_type[:num_dopants]}
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
"""
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Species.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
"""
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append(
{
"species": s,
"radius": radius,
"radii_diff": radius - radius_to_compare,
}
)
except KeyError:
pass
return shannon_radii
def _int_to_roman(number):
"""Utility method to convert an int (less than 20) to a roman numeral."""
roman_conv = [(10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I")]
result = []
for (arabic, roman) in roman_conv:
(factor, number) = divmod(number, arabic)
result.append(roman * factor)
if number == 0:
break
return "".join(result)
|
benoitc/pywebmachine
|
tests/decisions/b08_test.py
|
Python
|
mit
| 664
| 0.00753
|
import t
class b08(t.Test):
class TestResource(t.Resource):
def is_authorized(self, req, rsp):
if req.
|
headers.get('authorization') == 'yay':
return True
return 'oauth'
def to_html(self, req, rsp):
return "nom nom"
def test_ok(self):
self.req.headers['authorization'] = 'yay'
self.go()
t.eq(self.rsp.status, '200 OK')
t.eq(self.rsp.body, 'nom nom')
def test_not_ok(self):
self.go()
t.eq(self.rsp.status, '401 Unauthorized')
t.eq(self.rsp.headers['www-authenticate'], 'oa
|
uth')
t.eq(self.rsp.body, '')
|
mozilla/firefox-flicks
|
flicks/users/views.py
|
Python
|
bsd-3-clause
| 2,642
| 0.000379
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils.translation import get_language
import django_browserid.views
import waffle
from flicks.base import regions
from flicks.base.util import redirect
from flicks.users.forms import UserProfileForm
from flicks.users.tasks import newsletter_subscribe
from flicks.videos.models import Video, Vote
@login_required
def profile(request):
"""Display and process the profile creation form."""
form = UserProfileForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
profile = form.save(commit=False)
profile.user = request.user
profile.locale = get_language()
profile.save()
if form.cleaned_data['mailing_list_signup']:
format = form.cleaned_data['mailing_list_format']
newsletter_subscribe.delay(request.user.email,
source_url=request.build_absolute_uri(),
format=format)
return redirect('flicks.videos.upload')
return render(request, 'users/profile.html', {
'form': form,
'regions': regions,
})
class Verify(django_browserid.views.Verify):
def login_success(self, *args, **kwargs):
"""
Extend successful login to check if the user was attempting to vote for
a video, and create the vote if they were.
"""
response = super(Verify, self).login_success(*args, **kwargs)
if not waffle.flag_is_active(self.request, 'voting'):
return response
try:
video_id = self.request.session['vote_video']
video = Video.objects.get(id=video_id)
Vote.objects.get_or_create(user=self.request.user, video=video)
del self.request.session['vote_video']
# Set cookie so the JavaScript knows they successfully voted.
response.set_cookie('just_voted', '1', max_age=3600, httponly=False)
except (Video.DoesNotExist, ValueError):
# Avoid retrying on an invalid video.
del self.request.session['vote_video']
except KeyError:
pass # Do nothing if the key never existed.
return response
def log
|
in_failure(self, *args, **kwargs):
"""
Extend login failure so that if login fails, the user's attempts to
vote for a video are cancelled.
"""
try:
del self.request.session['vote_video']
except KeyError:
|
pass
return super(Verify, self).login_failure(*args, **kwargs)
|
devicehive/devicehive-python
|
devicehive/notification.py
|
Python
|
apache-2.0
| 1,546
| 0
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
class Notification(object):
"""Notification class."""
DEVICE_ID_KEY = 'deviceId'
ID_KEY = 'id'
NOTIFICATION_KEY = 'notification'
PARAMETERS_KEY = 'parameters'
TIMESTAMP_KEY = 'timestamp'
def __init__(self, notification):
self._device_id = notification[self.DEVICE_ID_KEY]
self._id = notification[self.ID_KEY]
self._notification = notification[self.NOTIFICATION_KEY]
self._parameters = notification[self.PARAMETERS_KEY]
self
|
._timestamp = notification[self.TIMESTAMP_KEY]
@property
def device_id(self):
return self._device_id
@property
def id(self):
return self._id
@property
def notification(self):
re
|
turn self._notification
@property
def parameters(self):
return self._parameters
@property
def timestamp(self):
return self._timestamp
|
rfoxfa/python-utils
|
utils/__init__.py
|
Python
|
gpl-2.0
| 199
| 0.020101
|
#!/usr/bin/env python
"""
This i
|
s the main module driver for Ross Flieger-Allison's
python-utils module.
"""
__author__ = "Ross Flieger-All
|
ison"
__date__ = "23-10-2015"
__version__ = "1.0.0"
|
rwl/muntjac
|
muntjac/demo/sampler/features/selects/OptionGroupsExample.py
|
Python
|
apache-2.0
| 2,051
| 0.000975
|
from muntjac.api import VerticalLayout, OptionGroup, Label
from muntjac.data.property import IValueChangeListener
class OptionGroupsExample(VerticalLayout, IValueChangeListener):
_cities = ['Berlin', 'Brussels', 'Helsinki', 'Madrid', 'Oslo',
'Paris', 'Stockholm']
def __init__(self):
super(OptionGroupsExample, self).__init__()
self.setSpacing(True)
# 'Shorthand' constructor - also supports data binding using Containers
citySelect = OptionGroup('Please select a city', self._cities)
# user can not 'unselect'
citySelect.setNullSelectionAllowed(False)
# select this by default
citySelect.select('Berlin')
# send the change to the server at once
citySelect.setImmediate(True)
# react when the user selects something
citySelect.addListener(self, IValueChangeListener)
self.addComponent(citySelect)
self.addComponent(Label('<h3>Multi-selection</h3>',
Label.CONTENT_XHTML))
# Create the multiselect option group
# 'Shorthand' constructor - also supports data binding using Containers
citySelect = OptionGroup('Please select cities', self._cities)
citySelect.setMultiSelect(True) # FIXME: multi-select
# user can not 'unselect'
citySelect.setNullSelectionAllowed(False)
# select this by default
citySelect.select('Berlin')
# send the change to the server at once
citySelect.setImmediate(True)
# react when the user selects something
citySelect.addListener(self, IValueChangeListener)
self.addComponent(citySelect)
# Shows a notification when a selection is made. The listener will be
# called whenever the value of the component changes, i.e when the user
# makes a new selection.
def valueChange(self, event):
|
v = event.getProperty().getValue()
if isinstance(v, set):
v = list(v)
self.getWindow().showNotification('Selected city: %s' %
|
v)
|
sjm-ec/cbt-python
|
Units/06-Loops/GoodExample3.py
|
Python
|
gpl-2.0
| 257
| 0.042802
|
from __future__ import print_fun
|
ction
num = 17
test = 2
while test < num:
if num % test == 0 and num != test:
print(num,'equals',test, '*', num/test)
print(num,'is not a prime number')
break
test = test
|
+ 1
else:
print(num,'is a prime number!')
|
stbd/stoolbox
|
tests/obj-to-sm-test/conversion-test.py
|
Python
|
mit
| 3,598
| 0.000556
|
import unittest
converter = __import__("obj-to-sm-conversion")
model = """
# Blender v2.71 (sub 0) OBJ File:
# www.blender.org
mtllib object.mtl
o Cube
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
v 0.493105 -0.493106 2.246419
v -0.493106 -0.493106 2.246419
v 0.493105 0.493105 2.246419
v -0.493106 0.493105 2.246419
v 0.493105 -0.493106 3.738037
v -0.493106 -0.493106 3.738037
v 0.493104 0.493105 3.738037
v -0.493107 0.493105 3.738037
v 0.493105 -0.493106 4.284467
v -0.493107 -0.493106 4.284467
v 0.493104 0.493105 4.284468
v -0.493107 0.493105 4.284467
v 0.493104 1.012896 3.738037
v -0.493107 1.012896 3.738037
v 0.493104 1.343554 4.284468
v -0.493107 1.343554 4.284467
v 0.493105 1.845343 3.234304
v -0.493106 1.845343 3.234304
v 0.493105 2.176001 3.780735
v -0.493106 2.176001 3.780734
v 0.570207 -1.571936 -0.570207
v 0.570207 -1.571936 0.570207
v -0.570207 -1.571936 0.570207
v -0.570207 -1.571936 -0.570208
v 0.570207 -3.115134 -0.570207
v 0.570207 -3.115134 0.570207
v -0.570207 -3.115134 0.570207
v -0.570207 -3.115134 -0.570208
vn -0.799400 -0.600800 -0.000000
vn 0.000000 1.000000 0.000000
vn 1.000000 -0.000000 0.000000
vn -0.000000 0.926300 0.376700
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
vn -0.926300 -0.000000 0.376700
vn 0.926300 0.000000 0.376700
vn 0.000000 -0.926300 0.376700
vn 0.000000 -1.000000 0.000000
vn -0.000000 -0.000000 1.000000
vn 0.000000 0.855600 -0.517700
vn -0.000000 0.517700 0.855600
vn 0.000000 -0.517700 -0.855600
vn -0.000000 -0.600800 0.799400
vn 0.000000 -0.600800 -0.799400
vn 0.799400 -0.600800 0.000000
usemtl Material
s off
f 4//1 32//1 31//1
f 8//2 7//2 6//2
f 1//3 5//3 6//3
f 7//4 12//4 11//4
f 7//5 8//5 4//5
f 1//6 4//6 8//6
f 12//2 16//2 15//2
f 7//7 3//7 10//7
f 2//8 6//8 11//8
f 2//9 9//9 10//9
f 16//5 20//5 24//5
f 12//5 10//5 14//5
f 9//3 11//3 15//3
f 9//10 13//10 14//10
f 17//11 19
|
//11 20//11
f 16//5 14//5 18//5
f 15//3 19//3 17//3
f 13//10 17//10 18//10
f 22//5 24//5 28//5
f 15//3 21//3 23//3
f 19//11 23//11 24//11
f 16//6 22//6 21//6
f 26//12 28//12 27//12
f 23//3 21//3 25//3
f 23//13 27//13 28//13
f 22//14 26//14 25//14
f 32//5 36//5 35//5
f 3//15 31//15 30//15
f 1//
|
16 29//16 32//16
f 2//17 30//17 29//17
f 34//10 35//10 36//10
f 31//11 35//11 34//11
f 29//6 33//6 36//6
f 29//3 30//3 34//3
f 3//1 4//1 31//1
f 5//2 8//2 6//2
f 2//3 1//3 6//3
f 6//4 7//4 11//4
f 3//5 7//5 4//5
f 5//6 1//6 8//6
f 11//2 12//2 15//2
f 12//7 7//7 10//7
f 9//8 2//8 11//8
f 3//9 2//9 10//9
f 22//5 16//5 24//5
f 16//5 12//5 14//5
f 13//3 9//3 15//3
f 10//10 9//10 14//10
f 18//11 17//11 20//11
f 20//5 16//5 18//5
f 13//3 15//3 17//3
f 14//10 13//10 18//10
f 26//5 22//5 28//5
f 19//3 15//3 23//3
f 20//11 19//11 24//11
f 15//6 16//6 21//6
f 25//12 26//12 27//12
f 27//3 23//3 25//3
f 24//13 23//13 28//13
f 21//14 22//14 25//14
f 31//5 32//5 35//5
f 2//15 3//15 30//15
f 4//16 1//16 32//16
f 1//17 2//17 29//17
f 33//10 34//10 36//10
f 30//11 31//11 34//11
f 32//6 29//6 36//6
f 33//3 29//3 34//3
"""
class TestConvertFunctions(unittest.TestCase):
def test_conversion(self):
global model
(format, faces, vertexes, normals, texture) = converter.convert_to_objects(model)
self.assertEqual(len(faces), 68)
self.assertEqual(len(vertexes), 36)
self.assertEqual(len(normals), 17)
self.assertEqual(len(texture), 0)
self.assertEqual(format, 'vn')
return 0
|
rwl/PyCIM
|
CIM15/IEC61968/Common/Agreement.py
|
Python
|
mit
| 2,456
| 0.003257
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class Agreement(Document):
"""Formal agreement between two parties defining the terms and conditions for a set of services. The specifics of the services are, in turn, defined via one or more service agreements.Formal agreement between two parties defining the terms and conditions for a set of services. The specifics of the services are, in turn, defined via one or more service agreements.
"""
def __init__(self, signDate='', validityInterval=None, *args, **kw_args):
"""In
|
itialises a new 'Agreement' instance.
@param signDate: Date this agreement was c
|
onsummated among associated persons and/or organisations.
@param validityInterval: Date and time interval this agreement is valid (from going into effect to termination).
"""
#: Date this agreement was consummated among associated persons and/or organisations.
self.signDate = signDate
self.validityInterval = validityInterval
super(Agreement, self).__init__(*args, **kw_args)
_attrs = ["signDate"]
_attr_types = {"signDate": str}
_defaults = {"signDate": ''}
_enums = {}
_refs = ["validityInterval"]
_many_refs = []
# Date and time interval this agreement is valid (from going into effect to termination).
validityInterval = None
|
maxwelld90/personal_web
|
django_project/personal_web/dmax_website/migrations/0010_auto_20160103_1917.py
|
Python
|
gpl-2.0
| 457
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-03 19:17
from __future__ import uni
|
code_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dmax_website', '0009_auto_20160103_1911'),
]
operations = [
migrations.RenameField(
model_name='projectitem',
|
old_name='project_abbreviation',
new_name='abbreviation',
),
]
|
Organice/django-organice
|
tests/units/__init__.py
|
Python
|
apache-2.0
| 920
| 0.007609
|
"""
Unit te
|
sts and test utilities for django Organice.
NOTE: Having an __init__ file in test directories is bad practice according to
py.test recommendations:
http://pytest.org/latest/goodpractises.html#choosing-a-test-layout-import-rules
However, this makes relative imports work in test modules (e.g. helper fro
|
m ``utils.py``).
"""
# NOTE 1: This file makes the 'test' folder importable! (i.e. `import tests`) Not good.
# Though, the test folder is pruned by MANIFEST.in, hence it's not installed anywhere.
# TODO: Consider inlining the tests into the package, or find a solution without relative imports.
# NOTE 2: The import of `DjangoSettingsManager` for probe_values_in_list() makes the
# test.utils dependent on an installed version of Organice. Also tests are run with
# helpers from the unit under test! No, not good.
# TODO: Make tests and test helpers independent from the implementation being tested.
|
DrSLDR/mgmag-proj
|
gravitas/controller/symbolic.py
|
Python
|
mit
| 7,657
| 0.007575
|
"""A symbolic AI that forms decisions by using a decision tree."""
import random
from .interface import IPlayerController
from model.card import Card
class SymbolicAI_PC(IPlayerController):
"""player controller that returns resolutes its choices using a decision tree.
It is called symbolic because it is not actually an AI"""
def __init__(self, player, args,container):
super().__init__(player,args)
def pollDraft(self, state):
"""Function that returns the choice of a stack from the draw field"""
"""Tries to pick a varied hand"""
# Bind in the percieved fields
percievedField = state.deck.percieveCardField()
# just return if there are no fields left
if len(percievedField) == 0:
return None
# if only one stack left, return that one
if len(percievedField) == 1:
return percievedField[0].index
###### if more than one choice left:
# get cards that you already have (your hand)
hand = self.player.getHand()
handTrac = [c for c in hand if c.getType() == Card.Type.tractor]
handNormLow = [c for c in hand if c.getType() == Card.Type.normal and c.getValue() <= 8]
handNormHigh = [c for c in hand if c.getType() == Card.Type.normal and c.getValue() >= 3]
handRep = [c for c in hand if c.getType() == Card.Type.repulsor]
# order field options on card type
tractors = [f for f in percievedField if f.card.getType() == Card.Type.tractor]
normalHighs = [f for f in percievedField if f.card.getType() == Card.Type.normal and f.card.getValue() < 7]
normalLows = [f for f in percievedField if f.card.getType() == Card.Type.normal and f.card.getValue() >= 3]
repulsors = [f for f in percievedField if f.card.getType() == Card.Type.repulsor]
# if there are tractors available, and you don't have one in your hand
if len(tractors) > 0 and len(handTrac) == 0:
return tractors[0].index
# if there are repulsors, but you dont have them in your hand
if len(repulsors) > 0 and len(handRep) == 0:
return repulsors[0].index
# get lowest normal that plays first
if len(normalLows) > 0 and len(handNormLow) == 0:
lowFirstSorted = sorted(normalLows, key = lambda x:x.card.getName()[0])# sort on first letter
return lowFirstSorted[0].index
# get highest normal that plays first
if len(normalHighs) > 0 and len(handNormHigh) == 0:
highFirstSorted = sorted(normalHighs, key = lambda x:x.card.getName()[0])# sort on first letter
return highFirstSorted[0].index
# if nothin else works, just take a random field
randomField = random.choice(percievedField)
return randomField.index
def pollPlay(self, state):
"""Function that returns which card the PC want to play"""
"""chooses mainly on player-direction and cardtype. It does not take into account
the cardNames (and thus playing-timing) or more complex ship-configurations
or one-step-ahead-strategies"""
# get player hand
hand = self.player.getHand()
# on empty hand
if len(hand) == 0:
return None
# on 1 card in hand, play only card left
if len(hand) == 1:
return hand[0]
##### on more cards, make a choice between them
# order options on card type
tractors = [c for c in hand if c.getType() == Card.Type.tractor]
normals = [c for c in hand if c.getType() == Card.Type.normal]
repulsors = [c for c in hand if c.getType() == Card.Type.repulsor]
# find closest ship
targetName = state.getTarget(self.player.getName())
# if no closest ship, the player is Stuck
if targetName is None:
# if available, try to play a tractor
if len(tractors) > 0:
return tractors[0]
# otherwise, just play some card
else:
return random.choice(hand)
# there is a closest ship: find moving direction
target = state.getShip(targetName).ship
distance = target.getPos() - self.player.getPos()
# moving forward...
if distance > 0:
# so choose highest-value normal card
if len(normals) > 0:
orderedNormals = sorted(normals, key = lambda x: x.getValue() )
|
return orderedNormals[0]
# if there are no normal cards, use tractor or lowest repulsor
else:
if len(tractors) > 0:
# use a tractor (does not mather which one since they are similar)
return tractors[0]
# since then hand is not empty, there are only repulsors left
else:
# chooce lowest repulsor
orderedRe
|
pulsors = sorted(repulsors, key = lambda x: -x.getValue() )
return orderedRepulsors[0]
# moving backward...
else: # if distance <= 0:
# so choose highest-value repulsor card
if len(repulsors) > 0:
orderedRepulsors = sorted(repulsors, key = lambda x: x.getValue() )
return orderedRepulsors[0]
# if there are no repulsor cards, use tractor or lowest normal
else:
if len(tractors) > 0:
# use a tractor (does not mather which one since they are similar)
return tractors[0]
# since then hand is not empty, there are only normals left
else:
# chooce lowest normal
orderedNormals = sorted(normals, key = lambda x: -x.getValue() )
return orderedNormals[0]
def pollEmergencyStop(self, state):
"""Function that returns the choice of using the emergency stop as a boolean.
Right now the choice is rather egocentric; no other-player-bullying is done."""
# get closest ship
targetName = state.getTarget(self.player.getName())
if targetName is None:
# player is stuck, don't waste ES!
return False
if self._playedCard.getType() == Card.Type.tractor:
# choice of using ES with tractor cardType is complex...so dont
return False
# distance to closest ship (sign equals direction)
target = state.getShip(targetName).ship
distance = target.getPos() - self.player.getPos()
if distance < 0 and self._playedCard.getType() == Card.Type.normal:
# going in normal direction with closest ship just behind you: use ES
return True
if distance > 0 and self._playedCard.getType() == Card.Type.repulsor:
# getting repulsed with closest ship just behind you: use ES
return True
# return default
return False
def announceWinner(self, state):
"""Function that updates the PC after the last turn"""
return None
def informReveal(self, cards):
"""The definitive set of played cards in a round are shown to the player"""
self.log.info("Random ai informed about %s" % cards)
self._reveal = cards
self._playedCard = [c for c in cards if cards[c] == self.player.getName()][0] # find unique card owned by player
def isHuman(self):
"""The board need to be able to find the human player, which this function eill help with"""
return False
|
mhahn/stacker
|
stacker/actions/diff.py
|
Python
|
bsd-2-clause
| 6,533
| 0
|
import logging
from .. import exceptions
from ..plan import COMPLETE, Plan
from ..status import NotSubmittedStatus, NotUpdatedStatus
from . import build
import difflib
import json
logger = logging.getLogger(__name__)
def diff_dictionaries(old_dict, new_dict):
"""Diffs two single dimension dictionaries
Returns the number of changes and an unordered list
expressing the common entries and changes.
Args:
old_dict(dict): old dictionary
new_dict(dict): new dictionary
Returns: list()
int: number of changed records
list: [str(<change type>), <key>, <value>]
Where <change type>: +, - or <space>
"""
old_set = set(old_dict)
new_set = set(new_dict)
added_set = new_set - old_set
removed_set = old_set - new_set
common_set = old_set & new_set
changes = 0
output = []
for key in added_set:
changes += 1
output.append(["+", key, new_dict[key]])
for key in removed_set:
changes += 1
output.append(["-", key, old_dict[key]])
for key in common_set:
if str(old_dict[key]) != str(new_dict[key]):
changes += 1
output.append(["-", key, old_dict[key]])
output.append(["+", key, new_dict[key]])
else:
output.append([" ", key, new_dict[key]])
return [changes, output]
def print_diff_parameters(parameter_diff):
"""Handles the printing of differences in parameters.
Args:
parameter_diff (list): A list dictionaries detailing the differences
between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
"""
print """--- Old Parameters
+++ New Parameters
******************"""
for line in parameter_diff:
print "%s%s = %s" % (line[0], line[1], line[2])
def diff_parameters(old_params, new_params):
"""Compares the old vs. new parameters and prints a "diff"
If there are no changes, we print nothing.
Args:
old_params(dict): old paramters
new_params(dict): new parameters
Returns:
list: A list of differences
"""
[changes, diff] = diff_dictionaries(old_params, new_params)
if changes == 0:
return []
return diff
def print_stack_changes(stack_name, new_stack, old_stack, new_params,
old_params):
"""Prints out the paramters (if changed) and stack diff"""
from_file = "old_%s" % (stack_name,)
to_file = "new_%s" % (stack_name,)
lines = difflib.context_diff(
old_stack, new_stack,
fromfile=from_file, tofile=to_file)
template_changes = list(lines)
if not template_changes:
print "*** No changes to template ***"
else:
param_diffs = diff_parameters(old_params, new_params)
print_diff_parameters(param_diffs)
print "".join(template_changes)
class Action(build.Action):
""" Responsible for diff'ing CF stacks in AWS and on disk
Generates the build plan based on stack dependencies (these dependencies
are determined automatically based on references to output values from
other stacks).
The plan is then used to pull the current CloudFormation template from
AWS and compare it to the generated templated based on the current
config.
"""
def _normalize_json(self, template):
"""Normalizes our template for diffing
Args:
template(str): json string representing the template
Returns:
list: json representation of the parameters
"""
obj = json.loads(template)
json_str = json.dumps(obj, sort_keys=True, indent=4)
result = []
lines = json_str.split("\n")
for line in lines:
result.append(line + "\n")
return result
def _print_new_stack(self, stack, parameters):
"""Prints out the parameters & stack contents of a new stack"""
print "New template parameters:"
for param in sorted(parameters,
key=lambda param: param['ParameterKey']):
print "%s = %s" % (param['ParameterKey'], param['ParameterValue'])
print "\nNew template contents:"
print "".join(stack)
def _diff_stack(self, stack, **kwargs):
"""Handles the diffing a stack in CloudFormation vs our config"""
if not build.should_submit(stack):
return NotSubmittedStatus()
if not build.should_update(stack):
return NotUpdatedStatus()
# get the current stack template & params from AWS
try:
[old_template, old_params] = self.provider.get_stack_info(
stack.fqn)
except exceptions.StackDoesNotExist:
old_template = None
old_params = {}
stack.resolve_variables(self.context, self.provider)
# generate our own template & params
new_template = stack.blueprint.rendered
parameters = self.build_parameters(stack)
new_params = dict()
for p in parameters:
new_params[p['ParameterKey']] = p['ParameterValue']
new_stack = self._normalize_json(new_template)
print "============== Stack: %s ==============" % (stack.name,)
# If this is a completely new template dump our params & stack
if not old_template:
self._print_new_stack(new_stack, parameters)
else:
# Diff our old & new stack/parameters
old_stack = self._normalize_json(old_template)
print_stack_changes(stack.name, new_stack, old_stack, new_params,
old_params)
return COMPLETE
def _generate_plan(self):
plan = Plan(description="Diff stacks")
stacks = self.context.get_stacks_dict()
dependencies = self._get_dependencies()
for stack_name in self.get_stack_execution_order(dependencie
|
s):
plan.add(
stacks[stack_name],
run_func=self._diff_stack,
requires=dependencies.get(stack_name),
)
return plan
def run(self, *args, **kwargs):
plan = self._generate_plan()
debug_plan = self._generate_plan()
debug_plan.outline(logging.DEBUG)
logger.info("Diffing stacks: %s", ", ".join(plan.keys()))
pla
|
n.execute()
"""Don't ever do anything for pre_run or post_run"""
def pre_run(self, *args, **kwargs):
pass
def post_run(self, *args, **kwargs):
pass
|
crobinso/pkgdb2
|
pkgdb2/api/extras.py
|
Python
|
gpl-2.0
| 20,250
| 0.000198
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
Extras API endpoints for the Flask application.
'''
import flask
import requests
import pkgdb2.lib as pkgdblib
from pkgdb2 import SESSION, APP
from pkgdb2.api import API
def request_wants_json():
""" Return weather a json output was requested. """
best = flask.request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
flask.request.accept_mimetypes[best] > \
flask.request.accept_mimetypes['text/html']
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _bz_acls_cached(name=None, out_format='text'):
'''Return the package attributes used by bugzilla.
:kwarg collection: Name of the bugzilla collection to gather data on.
:kwarg out_format: Specify if the output if text or json.
Note: The data returned by this function is for the way the current
Fedora bugzilla is setup as of (2007/6/25). In the future, bugzilla
may change to have separate products for each collection-version.
When that happens we'll have to change what this function returns.
The returned data looks like this:
bugzillaAcls[collection][package].attribute
attribute is one of:
:owner: FAS username for the owner
:qacontact: if the package has a special qacontact, their userid
is listed here
:summary: Short description of the package
:cclist: list of FAS userids that are watching the package
'''
packages = pkgdblib.bugzilla(
session=SESSION,
name=name)
output = []
if out_format == 'json':
output = {'bugzillaAcls': {},
'title': 'Fedora Package Database -- Bugzilla ACLs'}
for clt in sorted(packages):
for pkg in sorted(packages[clt]):
if out_format == 'json':
user = []
group = []
for ppl in packages[clt][pkg]['cc'].split(','):
if ppl.startswith('group::'):
group.append(ppl.replace('group::', '@').encode('UTF-8'))
elif ppl:
user.append(ppl.encode('UTF-8'))
poc = packages[clt][pkg]['poc']
if poc.startswith('group::'):
poc = poc.replace('group::', '@')
if clt not in output['bugzillaAcls']:
output['bugzillaAcls'][clt.encode('UTF-8')] = {}
output['bugzillaAcls'][clt][pkg.encode('UTF-8')] = {
'owner': poc.encode('UTF-8'),
'cclist': {
'groups': group,
'people': user,
},
'qacontact': None,
'summary': packages[clt][pkg]['summary'].encode('UTF-8')
}
else:
output.append(
'%(collection)s|%(name)s|%(summary)s|%(poc)s|%(qa)s'
'|%(cc)s' % (packages[clt][pkg])
)
return output
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _bz_notify_cache(
name=None, version=None, eol=False, out_format='text', acls=None):
'''List of usernames that should be notified of changes to a package.
For the collections specified we want to retrieve all of the owners,
watchbugzilla, and watchcommits accounts.
:kwarg name: Set to a collection name to filter the results for that
:kwar
|
g version: Set to a collection version to further filter results
for a single version
:kwarg eol: Set to True if you want to include end of life
distributions
:kwarg out_format: Specify if the output if text or json.
'''
packages = pkgdblib.notify(
session=SESSION,
eol=eol,
name=name,
version=version,
acls=acls)
output = []
if out_format == 'json':
output
|
= {'packages': {},
'eol': eol,
'name': name,
'version': version,
'title': 'Fedora Package Database -- Notification List'}
for package in sorted(packages):
if out_format == 'json':
output['packages'][package] = packages[package].split(',')
else:
output.append('%s|%s\n' % (package, packages[package]))
return output
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _vcs_acls_cache(out_format='text', eol=False):
'''Return ACLs for the version control system.
:kwarg out_format: Specify if the output if text or json.
:kwarg eol: A boolean specifying whether to include information about
End Of Life collections or not. Defaults to ``False``.
'''
packages = pkgdblib.vcs_acls(
session=SESSION, eol=eol, oformat=out_format,
skip_pp=APP.config.get('PKGS_NOT_PROVENPACKAGER', None))
output = []
if out_format == 'json':
output = {'packageAcls': packages,
'title': 'Fedora Package Database -- VCS ACLs'}
else:
for package in sorted(packages):
for branch in sorted(packages[package]):
if packages[package][branch]['group']:
packages[package][branch]['group'] += ','
output.append(
'avail | %(group)s%(user)s | rpms/%(name)s/%(branch)s'
% (packages[package][branch]))
return output
@API.route('/bugzilla/')
@API.route('/bugzilla')
def api_bugzilla():
'''
Bugzilla information
--------------------
Return the package attributes used by bugzilla.
::
/api/bugzilla
:karg collection: Name of the bugzilla collection to gather data on.
:kwarg format: Specify if the output if text or json.
Note: The data returned by this function is for the way the current
Fedora bugzilla is setup as of (2007/6/25). In the future, bugzilla
may change to have separate products for each collection-version.
When that happens we'll have to change what this function returns.
The returned data looks like this::
bugzillaAcls[collection][package].attribute
attribute is one of:
:owner: FAS username for the owner
:qacontact: if the package has a special qacontact, their userid
is listed here
:summary: Short description of the package
:cclist: list of FAS userids that are watching the package
'''
name = flask.request.args.get('collection', None)
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
intro = r"""# Package Database VCS Acls
# Text Format
# Collection|Package|Description|Owner|Initial QA|Initial CCList
# Backslashes (\) are escaped as \u005c Pipes (|) are escaped as \u007c
"""
acls = _bz_acls_cached(name, out_format)
if out_format == 'json':
return flask.jsonify(acls)
else:
return flask.Response(
intro + "\n".join(acls),
content_type="text/plain;charset=UTF-8"
)
@API.route('/notify/')
@API.route('/notify')
def api_notify():
'''
Notification
|
davidmogar/quizzer-python
|
quizzer/deserializers/assessment_deserializer.py
|
Python
|
mit
| 4,007
| 0.002496
|
import json
from quizzer.domain.answer import Answer
from quizzer.domain.grade import Grade
from quizzer.domain.questions import *
__author__ = 'David Moreno García'
def deserialize_answers(json_string):
"""
Deserializes the JSON representation received as arguments to a map of student ids to Answer objects.
:param json_string: JSON representation of the answers objects
:return: a map of student ids to Answer objects
"""
answers = dict()
if json_string:
data = json.loads(json_string)
if 'items' in data:
for item in data['items']:
try:
answers[item['studentId']] = [Answer(answer['question'], answer['value']) for answer in
item['answers']]
except KeyError:
pass
return answers
def deserialize_grades(json_string):
"""
Deserializes the JSON representation received as arguments to a map of student ids to Grade objects.
:param json_string: JSON representation of the grades objects
:return: a map of student ids to Grade objects
"""
grades = dict()
if json_string:
data = json.loads(json_string)
|
if 'scores' in
|
data:
for grade in data['scores']:
if 'studentId' in grade and 'value' in grade:
grades[grade['studentId']] = Grade(grade['studentId'], grade['value'])
return grades
def deserialize_multichoice(hash):
"""
Deserialize a Multichoice question
:param hash: HashMap containing the question data
:return: question created
"""
question = MultichoiceQuestion(hash['id'], hash['questionText'])
if 'alternatives' in hash:
for alternative in hash['alternatives']:
if 'code' in alternative and 'text' in alternative and 'value' in alternative:
question.add_alternative(alternative['code'], alternative['text'], alternative['value'])
return question
def deserialize_numerical(hash):
"""
Deserialize a Numerical question
:param hash: HashMap containing the question data
:return: question created
"""
question = NumericalQuestion(hash['id'], hash['questionText'])
if 'correct' in hash:
question.correct = hash['correct']
if 'valueOk' in hash:
question.value_correct = hash['valueOk']
if 'valueFailed' in hash:
question.value_incorrect = hash['valueFailed']
return question
def deserialize_true_false(hash):
"""
Deserialize a True/False question
:param hash: HashMap containing the question data
:return: question created
"""
question = TrueFalseQuestion(hash['id'], hash['questionText'])
if 'correct' in hash:
question.correct = hash['correct']
if 'valueOK' in hash:
question.value_correct = hash['valueOK']
if 'valueFailed' in hash:
question.value_incorrect = hash['valueFailed']
if 'feedback' in hash:
question.feedback = hash['feedback']
return question
# Hash used to decide what method to call based on the question type
question_type = {
'multichoice': deserialize_multichoice,
'numerical': deserialize_numerical,
'truefalse': deserialize_true_false
}
def deserialize_questions(json_string):
"""
Deserializes the JSON representation received as arguments to a map of questions ids to Question objects.
:param json_string: JSON representation of the questions objects
:return: a map of questions ids to Question objects
"""
questions = dict()
if json_string:
data = json.loads(json_string)
if 'questions' in data:
for question in data['questions']:
try:
if 'id' in question and 'questionText' in question:
questions[question['id']] = question_type[question['type']](question)
except KeyError:
pass
return questions
|
karasinski/NACAFoil-OpenFOAM
|
plot.py
|
Python
|
gpl-3.0
| 1,969
| 0.004571
|
#!/usr/bin/env python
"""
This script plots various quantities.
"""
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
ylabels = {"cl": r"$C_l$", "cd": r"$C_d$", "cl/cd": r"$C_l/C_d$", "k": "$k$",
"omega": r"$\omega$", "epsilon": r"$\epsilon$"}
def plot_foil_perf(quantity="cl/cd", foil="0012", Re=2e5):
df = pd.read_csv("processed/NACA{}_{:.1e}.csv".format(foil, Re))
plt.figure()
if quantity == "cl/cd":
q = df.cl/df.cd
else:
q = df[quantity]
plt.plot(df.alpha_deg, q, "-o")
plt.xlabel(r"$\alpha$ (deg)")
plt.ylabel(ylabels[quantity])
plt.grid(True)
plt.tight_layout()
if __name__ == "__main__":
try:
import seaborn
seaborn.set(style="white", context="not
|
ebook", font_scale=1.5)
except ImportError:
print("Could not import seaborn for plot styling. Try")
|
print("\n conda install seaborn\n\nor")
print("\n pip install seaborn\n")
parser = argparse.ArgumentParser(description="Plotting results")
parser.add_argument("quantity", nargs="?", default="cl/cd",
help="Which quantity to plot",
choices=["cl", "cd", "cl/cd", "k", "omega", "epsilon"])
parser.add_argument("--foil", "-f", help="Foil", default="0012")
parser.add_argument("--Reynolds", "-R", help="Reynolds number", default=2e5)
parser.add_argument("--save", "-s", action="store_true", help="Save plots")
parser.add_argument("--noshow", action="store_true", default=False,
help="Do not show")
args = parser.parse_args()
plot_foil_perf(args.quantity, args.foil, float(args.Reynolds))
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
plt.savefig("figures/{}.pdf".format(args.quantity))
if not args.noshow:
plt.show()
|
richardjmarini/JsonSchema
|
manage.py
|
Python
|
gpl-2.0
| 254
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
|
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "json_schema.settings")
from django.
|
core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
qeedquan/misc_utilities
|
math/matrix-sqrt.py
|
Python
|
mit
| 1,781
| 0.005053
|
"""
https://en.wikipedia.org/wiki/Square_root_of_a_matrix
B is the sqrt of a matrix A if B*B = A
"""
import numpy as np
from scipy.linalg import sqrtm
from scipy.stats import special_ortho_group
def denman_beaver(A, n=50):
Y = A
Z = np.eye(len(A))
for i in range(n):
Yn = 0.5*(Y + np.linalg.inv(Z))
Zn = 0.5*(Z + np.linalg.inv(Y))
Y = Yn
Z = Zn
return (Y, Z)
def babylonian(A, n=50):
X = np.eye(len(A))
for i in range(n):
|
X = 0.5*(X + np.dot(A, np.linalg.inv(X)))
return X
def gen_random_matrix(n):
return np.random.rand(n, n)
def gen_rotation_matrix(n):
return special_ortho_group.rvs(n)*np.random.randint(-100, 101)
def gen_symmetric_matrix(n):
A = np.random.randint(-10, 11, size=(n, n))
A = 0.5*(A + A.T)
return A
def test(title, gen_matrix, size, iters):
print("Testing {} matrix".format(title))
for i in range(1, size):
for j in
|
range(iters):
try:
A = gen_matrix(i)
d = np.linalg.det(A)
Y, _ = denman_beaver(A)
X = babylonian(A)
Z = sqrtm(A)
print("{}x{} matrix (det {})".format(i, i, d))
print(A)
print("Denman Beaver")
print(np.dot(Y, Y))
print("Babylonian")
print(np.dot(X, X))
print("Scipy")
print(np.dot(Z, Z))
print()
except:
pass
# iteration methods above tend to fail on random and symmetric matrices
test("random", gen_random_matrix, 5, 10)
test("symmetric", gen_symmetric_matrix, 5, 10)
# for rotation matrices, the iteration methods work
test("rotation", gen_rotation_matrix, 5, 10)
|
tedor/home-blog
|
blog/models.py
|
Python
|
bsd-3-clause
| 1,381
| 0.005793
|
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from tagging.fields import TagField
from django.utils import timezone
class Post(models.Model):
STATUS_DRAFT = 1
STATUS_PUBLIC = 2
TEXT_CUT = "===cut==="
STATUS_CHOICES = (
(STATUS_DRAFT, _('Draft')),
(STATUS_PUBLIC, _('Public'))
|
,
)
title = models.CharField(_('title'), max_length=255)
slug = models.SlugField(_('slug'), unique=True)
text = models.TextField(_('text'), help_text="<a href='http://daringfireball.net/projects/markdown/syntax'>Markdown</a>")
status = models.IntegerField(_('status'), choices=STATUS_C
|
HOICES, default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=True)
tag = TagField()
def save(self):
if not self.created_at:
self.created_at = timezone.now()
super(Post, self).save()
def __unicode__(self):
return self.title
@property
def get_text_cut(self):
return u'%s' % self.text.split(Post.TEXT_CUT)[0]
@property
def get_text(self):
return u'%s' % self.text.replace(Post.TEXT_CUT, "")
@permalink
def get_absolute_url(self):
return ('blog_post_detail', None, {'slug': self.slug})
|
bitcraft/pyglet
|
pyglet/graphics/allocation.py
|
Python
|
bsd-3-clause
| 14,197
| 0
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior wr
|
itten
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTIC
|
ULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
|
uclouvain/OSIS-Louvain
|
base/forms/entity.py
|
Python
|
agpl-3.0
| 1,833
| 0.001092
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the L
|
icense, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public L
|
icense - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import django_filters
from django.forms import TextInput
from django.utils.translation import gettext_lazy as _
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
|
TomasDuro/paparazzi
|
sw/tools/rtp_viewer/rtp_viewer.py
|
Python
|
gpl-2.0
| 2,844
| 0.001758
|
#! /usr/bin/python
import cv2
import sys
from os import path, getenv
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
class RtpViewer:
frame = None
mouse = dict()
def __init__(self, src):
# Create the video capture device
self.cap = cv2.VideoCapture(src)
# Start the ivy interface
self.ivy = IvyMessagesInterface("RTPviewer", start_ivy=False)
self.ivy.start()
# Create a named window and add a mouse callback
cv2.namedWindow('rtp')
cv2.setMouseCallback('rtp', self.on_mouse)
def run(self):
# Start an 'infinite' loop
while True:
# Read a frame from the video capture
ret, self.frame = self.cap.read()
# Quit if frame could not be retrieved or 'q' is pressed
if not ret or cv2.waitKey(1) & 0xFF == ord('q'):
break
# Run the computer vision function
self.cv()
def cv(self):
# If a selection is happening
if self.mouse.get('start'):
# Draw a rectangle indicating the region of interest
cv2.rectangle(self.frame, self.mouse['start'], self.mouse['now'], (0, 255, 0), 2)
# Show the image in a window
cv2.imshow('rtp', self.frame)
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.mouse['start'] = (x, y)
if event == cv2.EVENT_RBUTTONDOWN
|
:
self.mouse['start'] = None
if event == cv2.EVENT_MOUSEMOVE:
self.mouse['now'] = (x, y)
|
if event == cv2.EVENT_LBUTTONUP:
# If mouse start is defined, a region has been selected
if not self.mouse.get('start'):
return
# Obtain mouse start coordinates
sx, sy = self.mouse['start']
# Create a new message
msg = PprzMessage("datalink", "VIDEO_ROI")
msg['ac_id'] = None
msg['startx'] = sx
msg['starty'] = sy
msg['width'] = abs(x - sx)
msg['height'] = abs(y - sy)
msg['downsized_width'] = self.frame.shape[1]
# Send message via the ivy interface
self.ivy.send_raw_datalink(msg)
# Reset mouse start
self.mouse['start'] = None
def cleanup(self):
# Shutdown ivy interface
self.ivy.shutdown()
if __name__ == '__main__':
viewer = RtpViewer("rtp_viewer.sdp")
if not viewer.cap.isOpened():
viewer.cleanup()
sys.exit("Can't open video stream")
viewer.run()
viewer.cleanup()
|
beol/reviewboard
|
reviewboard/webapi/resources/review_screenshot_comment.py
|
Python
|
mit
| 6,387
| 0
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators i
|
mport augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DAT
|
A,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.reviews.models import Screenshot
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_screenshot_comment import \
BaseScreenshotCommentResource
class ReviewScreenshotCommentResource(BaseScreenshotCommentResource):
"""Provides information on screenshots comments made on a review.
If the review is a draft, then comments can be added, deleted, or
changed on this list. However, if the review is already published,
then no changes can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
policy_id = 'review_screenshot_comment'
model_parent_key = 'review'
def get_queryset(self, request, review_id, *args, **kwargs):
q = super(ReviewScreenshotCommentResource, self).get_queryset(
request, *args, **kwargs)
return q.filter(review=review_id)
@webapi_check_local_site
@webapi_login_required
@webapi_request_fields(
required=dict({
'screenshot_id': {
'type': int,
'description': 'The ID of the screenshot being commented on.',
},
'x': {
'type': int,
'description': 'The X location for the comment.',
},
'y': {
'type': int,
'description': 'The Y location for the comment.',
},
'w': {
'type': int,
'description': 'The width of the comment region.',
},
'h': {
'type': int,
'description': 'The height of the comment region.',
},
}, **BaseScreenshotCommentResource.REQUIRED_CREATE_FIELDS),
optional=BaseScreenshotCommentResource.OPTIONAL_CREATE_FIELDS,
allow_unknown=True,
)
def create(self, request, screenshot_id, *args, **kwargs):
"""Creates a screenshot comment on a review.
This will create a new comment on a screenshot as part of a review.
The comment contains text and dimensions for the area being commented
on.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
try:
screenshot = Screenshot.objects.get(pk=screenshot_id,
review_request=review_request)
except ObjectDoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'screenshot_id': ['This is not a valid screenshot ID'],
}
}
new_comment = self.create_comment(
review=review,
screenshot=screenshot,
fields=('screenshot', 'x', 'y', 'w', 'h'),
**kwargs)
review.screenshot_comments.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=dict({
'x': {
'type': int,
'description': 'The X location for the comment.',
},
'y': {
'type': int,
'description': 'The Y location for the comment.',
},
'w': {
'type': int,
'description': 'The width of the comment region.',
},
'h': {
'type': int,
'description': 'The height of the comment region.',
},
}, **BaseScreenshotCommentResource.OPTIONAL_UPDATE_FIELDS),
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a screenshot comment.
This can update the text or region of an existing comment. It
can only be done for comments that are part of a draft review.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
screenshot_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(screenshot_comment, **kwargs):
return self.update_issue_status(request, self, *args, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
self.update_comment(screenshot_comment, ('x', 'y', 'w', 'h'), **kwargs)
return 200, {
self.item_result_key: screenshot_comment,
}
@webapi_check_local_site
@augment_method_from(BaseScreenshotCommentResource)
def delete(self, *args, **kwargs):
"""Deletes the comment.
This will remove the comment from the review. This cannot be undone.
Only comments on draft reviews can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response on success, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseScreenshotCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of screenshot comments made on a review."""
pass
review_screenshot_comment_resource = ReviewScreenshotCommentResource()
|
vojtatranta/django-is-core
|
example/manage.py
|
Python
|
lgpl-3.0
| 323
| 0
|
#!/usr/bin/env python
import os
import sys
PROJECT_DIR = os.path.abspath(
os.path.join
|
(os.path.dirname(__file__))
)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj.settings")
from django.core.management import execute_from_command_line
execute_from_command_
|
line(sys.argv)
|
moreati/u2fval
|
u2fval/core/api.py
|
Python
|
bsd-2-clause
| 9,048
| 0
|
# Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from u2fval.model import Device
from u2fval.core.controller import U2FController
from u2fval.core.jsobjects import (
RegisterRequestData, RegisterResponseData, AuthenticateRequestData,
AuthenticateResponseData)
from u2fval.core.exc import U2fException, BadInputException
from M2Crypto import X509
from webob.dec import wsgify
from webob import exc, Response
from cachetools import lru_cache, LRUCache
import json
import logging
log = logging.getLogger(__name__)
__all__ = ['create_application']
def u2f_error(e):
server_e = exc.HTTPBadRequest()
server_e.body = e.json
server_e.content_type = 'application/json'
return server_e
class U2FServerApplication(object):
def __init__(s
|
elf, session, memstore, metadata, allow_untrusted=False):
self._session = session
self._memstore = memstore
self._metadata
|
= metadata
self._require_trusted = not allow_untrusted
@wsgify
def __call__(self, request):
client_name = request.environ.get('REMOTE_USER')
if not client_name:
raise u2f_error(BadInputException('Client not specified'))
try:
resp = self.client(request, client_name)
if not isinstance(resp, Response):
resp = Response(json.dumps(resp),
content_type='application/json')
return resp
except Exception as e:
self._session.rollback()
if isinstance(e, U2fException):
e = u2f_error(e)
elif isinstance(e, exc.HTTPException):
pass
else:
log.exception('Server error')
e = exc.HTTPServerError(e.message)
raise e
finally:
self._session.commit()
@lru_cache(maxsize=16)
def _get_controller(self, client_name):
return U2FController(self._session, self._memstore, client_name,
self._metadata, self._require_trusted)
def client(self, request, client_name):
user_id = request.path_info_pop()
controller = self._get_controller(client_name)
if not user_id:
if request.method == 'GET':
return controller.get_trusted_facets()
else:
raise exc.HTTPMethodNotAllowed
return self.user(request, controller, user_id.encode('utf-8'))
def user(self, request, controller, user_id):
if request.path_info_peek():
page = request.path_info_pop()
if page == 'register':
return self.register(request, controller, user_id)
elif page == 'authenticate':
return self.authenticate(request, controller, user_id)
else:
return self.device(request, controller, user_id, page)
if request.method == 'GET':
return controller.get_descriptors(user_id)
elif request.method == 'DELETE':
controller.delete_user(user_id)
return exc.HTTPNoContent()
else:
raise exc.HTTPMethodNotAllowed
def register(self, request, controller, user_id):
if request.method == 'GET':
register_requests, sign_requests = controller.register_start(
user_id)
return RegisterRequestData(
registerRequests=register_requests,
authenticateRequests=sign_requests
)
elif request.method == 'POST':
data = RegisterResponseData(request.body)
try:
handle = controller.register_complete(user_id,
data.registerResponse)
except KeyError:
raise exc.HTTPBadRequest
controller.set_props(handle, data.properties)
return controller.get_descriptor(user_id, handle)
else:
raise exc.HTTPMethodNotAllowed
def authenticate(self, request, controller, user_id):
if request.method == 'GET':
sign_requests = controller.authenticate_start(user_id)
return AuthenticateRequestData(
authenticateRequests=sign_requests
)
elif request.method == 'POST':
data = AuthenticateResponseData(request.body)
try:
handle = controller.authenticate_complete(
user_id, data.authenticateResponse)
except KeyError:
raise BadInputException('Malformed request')
except ValueError as e:
log.exception('Error in authenticate')
raise BadInputException(e.message)
controller.set_props(handle, data.properties)
return controller.get_descriptor(user_id, handle)
else:
raise exc.HTTPMethodNotAllowed
def device(self, request, controller, user_id, handle):
try:
if request.method == 'GET':
return controller.get_descriptor(user_id, handle)
elif request.method == 'POST':
props = json.loads(request.body)
controller.set_props(handle, props)
return controller.get_descriptor(user_id, handle)
elif request.method == 'DELETE':
controller.unregister(handle)
return exc.HTTPNoContent()
else:
raise exc.HTTPMethodNotAllowed
except ValueError as e:
raise exc.HTTPNotFound(e.message)
class MetadataCache(object):
def __init__(self, provider, maxsize=64):
self._provider = provider
self._cache = LRUCache(maxsize=maxsize)
def get_attestation(self, device_or_cert):
if isinstance(device_or_cert, Device):
device = device_or_cert
if device.certificate_id not in self._cache:
cert = X509.load_cert_der_string(device.certificate.der)
attestation = self._provider.get_attestation(cert)
self._cache[device.certificate_id] = attestation
return self._cache[device.certificate_id]
else:
return self._provider.get_attestation(device_or_cert)
def get_metadata(self, device):
attestation = self.get_attestation(device)
if attestation:
metadata = {}
if attestation.vendor_info:
metadata['vendor'] = attestation.vendor_info
if attestation.device_info:
metadata['device'] = attestation.device_info
return metadata
return None
def create_application(settings):
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine(settings['db'], echo=False)
Session = sessionmaker(bind=engin
|
magfest/ubersystem
|
alembic/versions/a1a5bd54b2aa_add_autograph_interview_and_travel_plan_.py
|
Python
|
agpl-3.0
| 3,669
| 0.010902
|
"""Add autograph, interview, and travel plan checklist items
Revision ID: a1a5bd54b2aa
Revises: f619fbd56912
Create Date: 2017-09-21 07:17:46.817443
"""
# revision identifiers, used by Alembic.
revision = 'a1a5bd54b2aa'
down_revision = 'f619fbd56912'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.create_table('guest_autograph',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('num',
|
sa.Integer(), server_default='0', nullable=False),
sa.Column('length', sa.Integer()
|
, server_default='60', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_autograph_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_autograph')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_autograph_guest_id'))
)
op.create_table('guest_interview',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('will_interview', sa.Boolean(), server_default='False', nullable=False),
sa.Column('email', sa.Unicode(), server_default='', nullable=False),
sa.Column('direct_contact', sa.Boolean(), server_default='False', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_interview_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_interview')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_interview_guest_id'))
)
op.create_table('guest_travel_plans',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('modes', sa.Unicode(), server_default='', nullable=False),
sa.Column('modes_text', sa.Unicode(), server_default='', nullable=False),
sa.Column('details', sa.Unicode(), server_default='', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_travel_plans_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_travel_plans')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_travel_plans_guest_id'))
)
def downgrade():
op.drop_table('guest_travel_plans')
op.drop_table('guest_interview')
op.drop_table('guest_autograph')
|
CroissanceCommune/autonomie
|
autonomie/models/options.py
|
Python
|
gpl-3.0
| 5,322
| 0.000188
|
# -*- coding: utf-8 -*-
# * Copyright (C) 2012-2014 Croissance Commune
# * Authors:
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
# * TJEBBES Gaston <g.t@majerti.fr>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILI
|
TY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
"""
Base tools for administrable options
"""
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
ForeignKey,
)
|
from sqlalchemy.util import classproperty
from sqlalchemy.sql.expression import func
from autonomie_base.utils.ascii import camel_case_to_name
from autonomie_base.models.base import (
DBBASE,
default_table_args,
DBSESSION,
)
from autonomie.forms import (
get_hidden_field_conf,
EXCLUDED,
)
class ConfigurableOption(DBBASE):
"""
Base class for options
"""
__table_args__ = default_table_args
id = Column(
Integer,
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()}
)
label = Column(
String(100),
info={'colanderalchemy': {'title': u'Libellé'}},
nullable=False,
)
active = Column(
Boolean(),
default=True,
info={'colanderalchemy': EXCLUDED}
)
order = Column(
Integer,
default=0,
info={'colanderalchemy': get_hidden_field_conf()}
)
type_ = Column(
'type_',
String(30),
nullable=False,
info={'colanderalchemy': EXCLUDED}
)
@classproperty
def __mapper_args__(cls):
name = cls.__name__
if name == 'ConfigurableOption':
return {
'polymorphic_on': 'type_',
'polymorphic_identity': 'configurable_option'
}
else:
return {'polymorphic_identity': camel_case_to_name(name)}
@classmethod
def query(cls, *args):
query = super(ConfigurableOption, cls).query(*args)
query = query.filter(ConfigurableOption.active == True)
query = query.order_by(ConfigurableOption.order)
return query
def __json__(self, request):
return dict(
id=self.id,
label=self.label,
active=self.active,
)
def move_up(self):
"""
Move the current instance up in the category's order
"""
order = self.order
if order > 0:
new_order = order - 1
self.__class__.insert(self, new_order)
def move_down(self):
"""
Move the current instance down in the category's order
"""
order = self.order
new_order = order + 1
self.__class__.insert(self, new_order)
@classmethod
def get_next_order(cls):
"""
:returns: The next available order
:rtype: int
"""
query = DBSESSION().query(func.max(cls.order)).filter_by(active=True)
query = query.filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
)
query = query.first()
if query is not None and query[0] is not None:
result = query[0] + 1
else:
result = 0
return result
@classmethod
def _query_active_items(cls):
"""
Build a query to collect active items of the current class
:rtype: :class:`sqlalchemy.Query`
"""
return DBSESSION().query(cls).filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
).filter_by(active=True)
@classmethod
def insert(cls, item, new_order):
"""
Place the item at the given index
:param obj item: The item to move
:param int new_order: The new index of the item
"""
query = cls._query_active_items()
items = query.filter(cls.id != item.id).order_by(cls.order).all()
items.insert(new_order, item)
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
@classmethod
def reorder(cls):
"""
Regenerate order attributes
"""
items = cls._query_active_items().order_by(cls.order).all()
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
def get_id_foreignkey_col(foreignkey_str):
"""
Return an id column as a foreignkey with correct colander configuration
foreignkey_str
The foreignkey our id is pointing to
"""
column = Column(
"id",
Integer,
ForeignKey(foreignkey_str),
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()},
)
return column
|
jonnybazookatone/ads
|
examples/journal-publications-over-time/journals.py
|
Python
|
mit
| 3,651
| 0.004656
|
# coding: utf-8
""" Compile publication data for astronomy journals over the last 10 years. """
from __future__ import division, print_function
__author__ = "Andy Casey <acasey@mso.anu.edu.au>"
# Standard library
import json
# Module specific
import ads
if __name__ == "__main__":
# Let's select the years and journals we want to compare
years = (1993, 2013)
journals = [ # (Scraped from Wikipedia)
# "AIAA Journal",
"Astrobiology",
"Astronomical Journal",
"Astronomical Review",
# "Astronomische Nachrichten",
"Astronomy and Astrophysics",
# "Astronomy and Computing",
# "Astronomy & Geophysics",
"Astronomy Letters",
"Astronomy Now",
"Astronomy Reports",
"Astroparticle Physics",
"The Astrophysical Journal",
# "The Astrophysical Journal Letters",
"The Astrophysical Journal Supplement Series",
"Astrophysics and Space Science",
"Celestial Mechanics and Dynamical Astronomy",
"Classical and Quantum Gravity",
# "Connaissance des Temps",
"Cosmic Research",
# "Earth, Moon, and Planets",
"Earth and Planetary Science Letters",
"General Relativity and Gravitation",
"Geophysical Research Letters",
"Icarus",
"International Astronomical Union Circular",
"International Journal of Astrobiology",
"Journal of the British Interplanetary Society",
"Journal of Cosmology",
"Journal of Cosmology and Astroparticle Physics",
"Journal of Geophysical Research",
# "Journal for the History of Astronomy",
# "Journal of the Korean Astronomical Society",
# "Journal of the Royal Astronomical Society of Canada",
# "Meteoritics & Planetary Science",
"Monthly Notices of the Royal Astronomical Society",
# "Nature Geoscience",
"New Astronomy",
"The Observatory",
"Planetary and Space Science",
# "Publications of the Astronomical Society of Japan",
"Publications of the Astronomical Society of the Pacific",
"Solar Physics",
"Space Science Reviews",
]
publication_data = []
for journal in journals:
# Initiate the dictionary for this journal
journal_data = {
"name": journal,
"articles": [],
"total": 0
}
for year in range(years[0], years[1] + 1):
|
# Perform the query
# We actually don't want all the results, we just want the metadata
# which tells us how many publications there were
q = ads.SearchQuery(q="pub:\"{journal}\" year:{year}".format(journal=journal, year=year), fl=['id'], rows=1)
q.execute()
num = int(q.response.numFound)
print("{journal
|
} had {num} publications in {year}"
.format(journal=journal, num=num, year=year))
# Save this data
journal_data["articles"].append([year, num])
journal_data["total"] += num
# Let's only save it if there were actually any publications
if journal_data["total"] > 0:
publication_data.append(journal_data)
sorted_publication_data = []
totals = [journal["total"] for journal in publication_data]
indices = sorted(range(len(totals)),key=totals.__getitem__)
for index in indices:
sorted_publication_data.append(publication_data[index])
# Save the data
with open('journal-publications.json', 'w') as fp:
json.dump(sorted_publication_data, fp, indent=2)
|
mmccollow/TSV-Convert
|
tsv-convert.py
|
Python
|
gpl-2.0
| 3,059
| 0.024191
|
#!bin/python
# TSV to Dublin Core/McMaster Repository conversion tool
# Matt McCollow <mccollo@mcmaster.ca>, 2011
# Nick Ruest <ruestn@mcmaster.ca>, 2011
from DublinCore import DublinCore
import csv
from sys import argv
from xml.dom.minidom import Document
from os.path import basename
DC_NS = 'http://purl.org/dc/elements/1.1/'
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
MACREPO_NS = 'http://repository.mcmaster.ca/schema/macrepo/elements/1.0/'
class TabFile(object):
""" A dialect for the csv.DictReader constructor """
delimiter = '\t'
def parse(fn):
""" Parse a TSV file """
try:
fp = open(fn)
fields = fp.readline().rstrip('\n').split('\t')
tsv = csv.DictReader(fp, fieldnames=fields, dialect=TabFile)
for row in tsv:
dc = makedc(row)
writefile(row['dc:identifier'], dc)
xml = makexml(row)
writefile(row['dc:identifier'], xml)
except IOError as (errno, strerror):
print "Error ({0}): {1}".format(errno, str
|
error)
raise SystemExit
fp.close()
def makedc(row):
""" Generate a Dublin Core XML file from a TSV """
metadata = DublinCore()
metadata.Contributor = row.get('dc:contributor', '')
metadata.Coverage = row.get('dc:coverage', '')
metadata.Creator = row.get('dc:creator', '')
metadata.Date = row.get('dc:date', '')
metadata.Description = row.get('dc:description', '')
metadata.Format = row.get('dc:format', '')
metadata.Ident
|
ifier = row.get('dc:identifier', '')
metadata.Language = row.get('dc:language', '')
metadata.Publisher = row.get('dc:publisher', '')
metadata.Relation = row.get('dc:relation', '').split('|')
metadata.Rights = row.get('dc:rights', '')
metadata.Source = row.get('dc:source', '')
metadata.Subject = row.get('dc:subject', '')
metadata.Title = row.get('dc:title', '')
return metadata
def makexml(row):
""" Generate an XML file conforming to the macrepo schema from a TSV """
doc = Document()
root = doc.createElement('metadata')
root.setAttribute('xmlns:xsi', XSI_NS)
root.setAttribute('xmlns:macrepo', MACREPO_NS)
doc.appendChild(root)
oldnid = doc.createElement('macrepo:oldNid')
oldnid.appendChild(doc.createTextNode(row.get('macrepo:oldNid', '')))
root.appendChild(oldnid)
notes = doc.createElement('macrepo:notes')
notes.appendChild(doc.createTextNode(row.get('macrepo:notes', '')))
root.appendChild(notes)
scale = doc.createElement('macrepo:scale')
scale.appendChild(doc.createTextNode(row.get('macrepo:scale', '')))
root.appendChild(scale)
return doc
def writefile(name, obj):
""" Writes Dublin Core or Macrepo XML object to a file """
if isinstance(obj, DublinCore):
fp = open(name + '-DC.xml', 'w')
fp.write(obj.makeXML(DC_NS))
elif isinstance(obj, Document):
fp = open(name + '-macrepo.xml', 'w')
fp.write(obj.toprettyxml())
fp.close()
def chkarg(arg):
""" Was a TSV file specified? """
return False if len(arg) < 2 else True
def usage():
""" Print a nice usage message """
print "Usage: bin/python " + basename(__file__) + " <filename>.tsv"
if __name__ == "__main__":
if chkarg(argv):
parse(argv[1])
else:
usage()
|
anentropic/django-ebaysync
|
ebaysync/notifications.py
|
Python
|
lgpl-3.0
| 4,183
| 0.004542
|
import base64
import hashlib
import logging
import math
import time
from django.conf import settings
from ebaysuds import TradingAPI
from suds.plugin import PluginContainer
from suds.sax.parser import Parser
logging.basicConfig()
log = logging.getLogger(__name__)
class UnrecognisedPayloadTypeError(Exception):
pass
class NotificationValidationError(Exception):
pass
class TimestampOutOfBounds(NotificationValidationError):
pass
class InvalidSignature(NotificationValidationError):
pass
def ebay_timestamp_string(datetime_obj):
# convert python datetime obj to string representation used by eBay
# appears to be a bug in suds - eBay's milliseconds are loaded into python datetime
# as microseconds so the datetime_obj we get from suds is not accurate to the data
return '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s.%(millisecond)sZ' % {
'year': '%04d' % datetime_obj.year,
'month': '%02d' % datetime_obj.month,
'day': '%02d' % datetime_obj.day,
'hour': '%02d' % datetime_obj.hour,
'minute': '%02d' % datetime_obj.minute,
'second': '%02d' % datetime_obj.second,
'millisecond': '%03d' % datetime_obj.microsecond# don't need to x1000 as we're omitting three digits of zero-padding
}
class NotificationHandler(object):
def __init__(self, wsdl_url=None, token=None, sandbox=False, _validate=True):
es_kwargs = {
'sandbox': sandbox,
}
if wsdl_url is not None:
es_kwargs['wsdl_url'] = wsdl_url
if token is not None:
es_kwargs['token'] = token
self.client = TradingAPI(**es_kwargs)
self.saxparser = Parser()
self._validate = _validate
def decode(self, payload_type, message):
try:
payload_method = getattr(self.client.sudsclient.service, payload_type)
except AttributeError:
raise UnrecognisedPayloadTypeError('Unrecognised
|
payload type: %s' % payload_type)
# don balaclava, hijack a suds SoapClient instance to decode our payload for us
sc_class = payload_method.clientclass({})
soapclient = sc_class(self.client.sudsclient, payload_method.method)
# copy+pasted from SoapClient.send :(
plugins = PluginContainer(soapclient.options.
|
plugins)
ctx = plugins.message.received(reply=message)
result = soapclient.succeeded(soapclient.method.binding.input, ctx.reply)
# `result` only contains the soap:Body of the response (parsed into objects)
# but the signature we need is in the soap:Header element
signature = self._parse_signature(message)
if not self._validate or self.validate(result, signature):
return result
def _parse_signature(self, message):
xml = self.saxparser.parse(string=message)
return xml.getChild("Envelope").getChild("Header").getChild('RequesterCredentials').getChild('NotificationSignature').text
def validate(self, message, signature):
"""
As per:
http://developer.ebay.com/DevZone/XML/docs/WebHelp/wwhelp/wwhimpl/common/html/wwhelp.htm?context=eBay_XML_API&file=WorkingWithNotifications-Receiving_Platform_Notifications.html
"""
timestamp_str = ebay_timestamp_string(message.Timestamp)
floattime = time.mktime(message.Timestamp.timetuple())
if not settings.DEBUG:
# check timestamp is within 10 minutes of current time
diff_seconds = math.fabs(time.time() - floattime)
if diff_seconds > 600:
raise TimestampOutOfBounds("Payload timestamp was %s seconds away from current time." % diff_seconds)
# make hash
m = hashlib.md5()
m.update(timestamp_str)
m.update(self.client.config.get('keys', 'dev_id'))
m.update(self.client.config.get('keys', 'app_id'))
m.update(self.client.config.get('keys', 'cert_id'))
computed_hash = base64.standard_b64encode(m.digest())
if computed_hash != signature:
raise InvalidSignature("%s != %s" % (computed_hash, signature))
return True
|
bianchimro/django-search-views
|
tests/settings.py
|
Python
|
mit
| 289
| 0.00346
|
SECRET_KEY = 'fake-ke
|
y'
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
INSTALLED_APPS = [
"django_nose",
"tests",
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=search
|
_views',
]
|
ets-labs/python-dependency-injector
|
tests/unit/providers/async/test_provided_instance_py36.py
|
Python
|
bsd-3-clause
| 5,113
| 0.001565
|
"""ProvidedInstance provider async mode tests."""
import asyncio
from dependency_injector import containers, providers
from pytest import mark, raises
from .common import RESOURCE1, init_resource
@mark.asyncio
async def test_provided_attribute():
class TestClient:
def __init__(self, resource):
self.resource = resource
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided.resource)
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_attribute_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_exception)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.attr()
@mark.asyncio
async def test_provided_attribute_undefined_attribute():
class TestClient:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
container = TestContainer()
with raises(AttributeError):
await container.client.provided.attr()
@mark.asyncio
async def test_provided_item():
class TestClient:
def __init__(self, resource):
self.resource = resource
def __getitem__(self, item):
return getattr(self, item)
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided["resource"])
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_item_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_exception)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided["item"]()
@mark.asyncio
async def test_provided_item_undefined_item():
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(dict, resource=resource)
container = TestContainer()
with raises(KeyError):
await container.client.provided["item"]()
@mark.asyncio
async def test_provided_method_call():
class TestClient:
def __init__(self, resource):
self.resource = resource
def get_resource(self):
return self.resource
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided.get_resource.call())
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert
|
instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_method_call_parent_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_except
|
ion)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.method.call()()
@mark.asyncio
async def test_provided_method_call_error():
class TestClient:
def method(self):
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(TestClient)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.method.call()()
|
arpruss/plucker
|
plucker_desktop/installer/osx/application_bundle_files/Resources/parser/python/vm/PIL/CurImagePlugin.py
|
Python
|
gpl-2.0
| 2,171
| 0.00783
|
#
# The Python Imaging Library.
# $Id: CurImagePlugin.py,v 1.2 2007/06/17 14:12:14 robertoconnor Exp $
#
# Windows Cursor support for PIL
#
# notes:
# uses BmpImagePlugin.py to read the bitmap data.
#
# history:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and re
|
distribution.
#
__version__ = "0.1"
import string
import Image, BmpImagePlugin
#
# --------------------------------------------------------------------
def i16(c):
return ord(c[0]) + (ord(c[1])<<8)
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16)
|
+ (ord(c[3])<<24)
def _accept(prefix):
return prefix[:4] == "\0\0\2\0"
##
# Image plugin for Windows Cursor files.
class CurImageFile(BmpImagePlugin.BmpImageFile):
format = "CUR"
format_description = "Windows Cursor"
def _open(self):
offset = self.fp.tell()
# check magic
s = self.fp.read(6)
if not _accept(s):
raise SyntaxError, "not an CUR file"
# pick the largest cursor in the file
m = ""
for i in range(i16(s[4:])):
s = self.fp.read(16)
if not m:
m = s
elif ord(s[0]) > ord(m[0]) and ord(s[1]) > ord(m[1]):
m = s
#print "width", ord(s[0])
#print "height", ord(s[1])
#print "colors", ord(s[2])
#print "reserved", ord(s[3])
#print "hotspot x", i16(s[4:])
#print "hotspot y", i16(s[6:])
#print "bytes", i32(s[8:])
#print "offset", i32(s[12:])
# load as bitmap
self._bitmap(i32(m[12:]) + offset)
# patch up the bitmap height
self.size = self.size[0], self.size[1]/2
d, e, o, a = self.tile[0]
self.tile[0] = d, (0,0)+self.size, o, a
return
#
# --------------------------------------------------------------------
Image.register_open("CUR", CurImageFile, _accept)
Image.register_extension("CUR", ".cur")
|
googleinterns/wss
|
third_party/deeplab/core/dense_prediction_cell.py
|
Python
|
apache-2.0
| 12,180
| 0.003859
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Prediction Cell class that can be evolved in semantic segmentation.
DensePredictionCell is used as a `layer` in semantic segmentation whose
architecture is determined by the `config`, a dictionary specifying
the architecture.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from third_party.deeplab.core import utils
slim = contrib_slim
# Local constants.
_META_ARCHITECTURE_SCOPE = 'meta_architecture'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_OP = 'op'
_CONV = 'conv'
_PYRAMID_POOLING = 'pyramid_pooling'
_KERNEL = 'kernel'
_RATE = 'rate'
_GRID_SIZE = 'grid_size'
_TARGET_SIZE = 'target_size'
_INPUT = 'input'
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply dropout on the concatenated
features or not.
- dropout_on_projection_features: Boolean, apply dropout on the projection
features or not.
- dropout_keep_prob: Float, when `dropout_on_concat_features' or
`dropout_on_projection_features' is True, the `keep_prob` value used
in the dropout operation.
- concat_channels: Integer, the concatenated features will be
channel-reduced to `concat_channels` channels.
- conv_rate_multiplier: Integer, used to multiply the convolution rates.
This is useful in the case when the output_stride is changed from 16
to 8, we need to double the convolution rates correspondingly.
"""
return {
'reduction_size': 256,
'dropout_on_concat_features': True,
'dropout_on_projection_features': False,
'dropout_keep_prob': 0.9,
'concat_channels': 256,
'conv_rate_multiplier': 1,
}
class DensePredictionCell(object):
"""DensePredictionCell class used as a 'layer' in semantic segmentation."""
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary returned by
dense_prediction_cell_hparams().
Raises:
ValueError: If `conv_rate_multiplier` has value < 1.
"""
self.hparams = dense_prediction_cell_hparams()
if hparams is not None:
self.hparams.update(hparams)
self.config = config
# Check values in hparams are valid or not.
if self.hparams['conv_rate_multiplier'] < 1:
raise ValueError('conv_rate_multiplier cannot have value < 1.')
def _get_pyramid_pooling_arguments(
self, crop_size, output_stride, image_grid, image_pooling_crop_size=None):
"""Gets arguments for pyramid pooling.
Args:
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_grid: A list of two integers, [image_grid_height, image_grid_width],
specifying the grid size of how the pyramid pooling will be performed.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A list of (resize_value, pooled_kernel)
"""
resize_height = utils.scale_dimension(crop_size[0], 1. / output_strid
|
e)
resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride)
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = crop_size
pooled_height = utils.scale_dimension(
image_pooling_crop_size[0], 1. / (output_
|
stride * image_grid[0]))
pooled_width = utils.scale_dimension(
image_pooling_crop_size[1], 1. / (output_stride * image_grid[1]))
return ([resize_height, resize_width], [pooled_height, pooled_width])
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parameters for one
operation.
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A dictionary stores the related information for the operation.
"""
if config[_OP] == _PYRAMID_POOLING:
(config[_TARGET_SIZE],
config[_KERNEL]) = self._get_pyramid_pooling_arguments(
crop_size=crop_size,
output_stride=output_stride,
image_grid=config[_GRID_SIZE],
image_pooling_crop_size=image_pooling_crop_size)
return config
def build_cell(self,
features,
output_stride=16,
crop_size=None,
image_pooling_crop_size=None,
weight_decay=0.00004,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
scope=None):
"""Builds the dense prediction cell based on the config.
Args:
features: Input feature map of size [batch, height, width, channels].
output_stride: Int, output stride at which the features were extracted.
crop_size: A list [crop_height, crop_width], determining the input
features resolution.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
weight_decay: Float, the weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Boolean, is training or not.
fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not.
scope: Optional string, specifying the variable scope.
Returns:
Features after passing through the constructed dense prediction cell with
shape = [batch, height, width, channels] where channels are determined
by `reduction_size` returned by dense_prediction_cell_hparams().
Raises:
ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or
the operation is not recognized.
"""
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale':
|
Mariaanisimova/pythonintask
|
BIZa/2015/Anisimova_M_L/task_2_1.py
|
Python
|
apache-2.0
| 574
| 0.008523
|
# Задача 2. Вариант 1.
|
# Напишите программу, которая будет выводить на экр
|
ан наиболее понравившееся вам высказывание, автором которого является Еврипид. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Anisimova M.L.
# 02.09.2016
print("Жизнь наша есть борьба.")
print("\t\t Еврипид")
input("Нажмите Enter для выхода.")
|
thonkify/thonkify
|
src/lib/sparkpost/tornado/base.py
|
Python
|
mit
| 1,048
| 0.000954
|
import json
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from .exceptions import SparkPostAPIException
class TornadoTransport(object):
@gen.coroutine
def re
|
quest(self, method, uri, headers, **kwargs):
if "data" in kwargs:
|
kwargs["body"] = kwargs.pop("data")
client = AsyncHTTPClient()
try:
response = yield client.fetch(uri, method=method, headers=headers,
**kwargs)
except HTTPError as ex:
raise SparkPostAPIException(ex.response)
if response.code == 204:
raise gen.Return(True)
if response.code == 200:
result = None
try:
result = json.loads(response.body.decode("utf-8"))
except:
pass
if result:
if 'results' in result:
raise gen.Return(result['results'])
raise gen.Return(result)
raise SparkPostAPIException(response)
|
ismailsunni/f3-factor-finder
|
core/run_it.py
|
Python
|
gpl-2.0
| 3,452
| 0.032445
|
#!/F3/core/run_it.py
# This file is used for creating a script
# Author : Ismail Sunni/@ismailsunni
# Created : 2012-04-06
import MySQLdb # accesing mysql database
from xlwt import Workbook # for writing in excel
import xlrd # for reading excel
from tempfile import TemporaryFile
import util as util
import tweet_model as tm
import preprocess as pp
from db_control import db_conn
def main_sql_to_excel():
"""Read from database then write in excel"""
# To do
# read database
# database variable
db_host = 'localhost'
db_user = 'root'
db_password = ''
db_name = 'rimus'
conn = MySQLdb.connect(db_host, db_user, db_password, db_name)
cursor = conn.cursor()
query = "SELECT * FROM `tweets`"
try:
cursor.execute(query)
result = cursor.fetchall()
# return result
except Exception, e:
util.debug('db_control.read error' + str(e))
conn.rollback()
result = None
# write to excel
book = Workbook()
activeSheet = book.add_sheet('tweets')
i = 1
activeSheet.write(i, 0, 'No'
|
)
activeSheet.write(i, 1, 'Tweet Id')
activeSheet.write(i, 2, 'Username')
activeSheet.write(i, 3, 'Created')
activeSheet.write(i, 4, 'Text')
from random import sample
result = sample(result, 3000)
i += 1
try:
for row in result:
activeSheet.write(i, 0, str(i - 1))
activeSheet.write(i, 1, str(row[0]))
activeSheet.write(i, 2, str(row[7]))
activeSheet.write(i, 3, row[3].__str__())
activeSheet.write(i, 4, pp.normalize_character(row[1]))
i += 1
# print i
|
if i >= 50002:
break
book.save('test_data_training2.xls')
book.save(TemporaryFile())
except Exception, e:
util.debug(str(e))
def main_excel_to_sql():
book = xlrd.open_workbook('test_data_training2.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
for row in range(sheet.nrows):
if sheet.row_values(row)[5] == 1:
new_data = {}
new_data['id'] = int(sheet.row_values(row)[1])
new_data['sentiment'] = int(sheet.row_values(row)[4])
tweets.append(new_data)
# new_db = new db_conn()
print tweets
def move_data():
book = xlrd.open_workbook('data_training_TA_Ismail Sunni.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
k = 0
for row in range(sheet.nrows):
if sheet.row_values(row)[6] == 3:
tweets.append(sheet.row_values(row))
conn = db_conn()
i = 0
for tweet in tweets:
query = "INSERT INTO " + conn.dev_table + "( `tweet_id`, `tweet_text`, `created_at`, `sentiment`) VALUES (" + str(tweet[1]) + ", '" + tweet[4] + "', '" + tweet[3] + "', '" + str(int(tweet[5])) +"')"
# print query
if conn.insert(query) == True:
i += 1
print i
def ultimate_function():
book = xlrd.open_workbook('data_training_TA_Ismail Sunni.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
for row in range(sheet.nrows):
if sheet.row_values(row)[6] == 3:
tweets.append(sheet.row_values(row))
conn = db_conn()
i = 0
j = 0
for tweet in tweets:
query = "UPDATE " + conn.test_table + " SET `sentiment`=" + str(int(tweet[5])) + ", `dev_tweet`= 1 WHERE `tweet_id`="+str(tweet[1])
if conn.update(query) == True:
i += 1
else:
j += 1
print i
print j
def reset_data():
conn = db_conn()
query = "UPDATE " + conn.test_table + " SET `dev_tweet` = 0"
return conn.update(query)
if __name__ == '__main__':
print reset_data()
ultimate_function()
|
dholl/python-sounddevice
|
doc/fake_cffi.py
|
Python
|
mit
| 636
| 0
|
"""Mock module for Sphinx autodoc."""
class FFI(object):
N
|
ULL = NotImplemented
I_AM_FAKE = True # This is used for the d
|
ocumentation of "default"
def cdef(self, _):
pass
def dlopen(self, _):
return FakeLibrary()
class FakeLibrary(object):
# from portaudio.h:
paFloat32 = paInt32 = paInt24 = paInt16 = paInt8 = paUInt8 = NotImplemented
paFramesPerBufferUnspecified = 0
def Pa_Initialize(self):
return 0
def Pa_Terminate(self):
return 0
# from stdio.h:
def fopen(*args, **kwargs):
return NotImplemented
def fclose(*args):
pass
|
dinnozap/MinecraftServerMaker
|
launch.py
|
Python
|
apache-2.0
| 2,078
| 0.039461
|
import subprocess, os, zipfile, requests
## Function Download
def download(url, fichier):
pass
fileName = fichier
req = requests.get(url)
file = open(fileName, 'wb')
for chunk in req.iter_content(100000):
file.write(chunk)
file.close()
print("The download is finish !")
## Function Unzip
def unzip(source , destination):
with zipfile.ZipFile(source) as zf:
zf.extractall(destination)
nameinfo = open("name.info", "r")
ServerName = nameinfo.readline().rstrip()
Version = nameinfo.readline().rstrip()
VersionServer = nameinfo.readline().rstrip()
nameinfo.close()
subprocess.call(['java', '-jar', ServerName +'.jar'])
fichier = open("eula.txt", "w")
fichier.write("eula = true")
fichier.close()
if not os.path.exists("world"):
print("Whitch type of Minecraft server you want to create ?")
a=input("[1] Pre-Build (Map and Plugin) Spigot Server [2] Blanc Spigot Server [3] Semi-Build (Plugin pre installed, blanc map) : ")
if a == '1':
print(VersionServer)
if VersionServer == '1.9' or VersionServer == '1.8' or VersionServer == '1.7.10':
download('https://raw.githubusercontent.com/dinnozap/MinecraftServerMaker/master/world.zip', 'world.zip')
unzip('world.zip', '')
if not os.path.exists("plugins"):
os.mkdir("plugins")
download('https://hub.spigotmc.org/jenkins/job/Spigot-Essentials/lastSuccessfulBuild/artifact/Essentials/target/Essentials-2.x-SNAPSHOT.jar', 'plugins/essentials.jar')
download('https://www.spigotmc.org/resources/sexymotd.2474/download?version=73466', 'plugins/motd.jar')
subprocess.call(['java', '-jar', ServerName +'.jar'])
elif a=='2':
subprocess.call(['java', '-jar', ServerName +'.jar'])
elif a=='3':
if not os.path.exists("plugins"):
os.mkdir(
|
"plugins")
download('https://hub.spigotmc.org/jenkins/job/Spigot-Essentials/lastSuccessfulBuild/artifact/Essentials/target/Essentials-2.x-SNAPSHOT.jar', 'plugins/essentials.jar')
download('http
|
s://www.spigotmc.org/resources/sexymotd.2474/download?version=73466', 'plugins/motd.jar')
subprocess.call(['java', '-jar', ServerName +'.jar'])
|
gmittal/prisma
|
server/src/utils.py
|
Python
|
mit
| 975
| 0.017436
|
import scipy.misc, numpy as np, os, sys
def save_img(out_path, img):
img = np.clip
|
(img, 0, 255).astype(np.uint8)
scipy.misc.imsave(out_path, img)
def scale_img(style_path, style_scale):
scale = float(style_s
|
cale)
o0, o1, o2 = scipy.misc.imread(style_path, mode='RGB').shape
scale = float(style_scale)
new_shape = (int(o0 * scale), int(o1 * scale), o2)
style_target = _get_img(style_path, img_size=new_shape)
return style_target
def get_img(src, img_size=False):
img = scipy.misc.imread(src, mode='RGB') # misc.imresize(, (256, 256, 3))
if not (len(img.shape) == 3 and img.shape[2] == 3):
img = np.dstack((img,img,img))
if img_size != False:
img = scipy.misc.imresize(img, img_size)
return img
def exists(p, msg):
assert os.path.exists(p), msg
def list_files(in_path):
files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
files.extend(filenames)
break
return files
|
Verbalist/electrum-server
|
src/storage.py
|
Python
|
mit
| 21,577
| 0.000556
|
import plyvel
import ast
import hashlib
import os
import sys
import threading
from processor import print_log, logger
from utils import (
bc_address_to_hash_160,
Hash,
bytes8_to_int,
bytes4_to_int,
int_to_bytes8,
int_to_hex8,
int_to_bytes4,
int_to_hex4
)
"""
Patricia tree for hashing unspents
"""
# increase this when database needs to be updated
GENESIS_HASH = '00000c492bf73490420868bc577680bfc4c60116e7e85343bc624787c21efa4c'
DB_VERSION = 3
KEYLENGTH = 56 # 20 + 32 + 4
class Node(object):
def __init__(self, s):
self.k = int(s[0:32].encode('hex'), 16)
self.s = s[32:]
if self.k == 0 and self.s:
print "init error", len(self.s), "0x%0.64X" % self.k
raise BaseException("z")
def serialized(self):
k = "0x%0.64X" % self.k
k = k[2:].decode('hex')
assert len(k) == 32
return k + self.s
def has(self, c):
return (self.k & (1 << (ord(c)))) != 0
def is_singleton(self, key):
assert self.s != ''
return len(self.s) == 40
def get_singleton(self):
for i in xrange(256):
if self.k == (1 << i):
return chr(i)
raise BaseException("get_singleton")
def indexof(self, c):
assert self.k != 0 or self.s == ''
x = 0
for i in xrange(ord(c)):
if (self.k & (1 << i)) != 0:
x += 40
return x
def get(self, c):
x = self.indexof(c)
ss = self.s[x:x + 40]
_hash = ss[0:32]
value = bytes8_to_int(ss[32:40])
return _hash, value
def set(self, c, h, value):
if h is None:
h = chr(0) * 32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
if self.has(c):
self.remove(c)
x = self.indexof(c)
self.s = self.s[0:x] + item + self.s[x:]
self.k |= (1 << ord(c))
assert self.k != 0
def remove(self, c):
x = self.indexof(c)
self.k &= ~(1 << ord(c))
self.s = self.s[0:x] + self.s[x + 40:]
def get_hash(self, x, parent):
if x:
assert self.k != 0
skip_string = x[len(parent) + 1:] if x != '' else ''
x = 0
v = 0
hh = ''
for i in xrange(256):
if (self.k & (1 << i)) != 0:
ss = self.s[x:x + 40]
hh += ss[0:32]
v += bytes8_to_int(ss[32:40])
x += 40
try:
_hash = Hash(skip_string + hh)
except:
_hash = None
if x:
assert self.k != 0
return _hash, v
@classmethod
def from_dict(cls, d):
k = 0
s = ''
for i in xrange(256):
if chr(i) in d:
k += 1 << i
h, value = d[chr(i)]
if h is None: h = chr(0) * 32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
s += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + s
return Node(out)
class DB(object):
def __init__(self, path, name, cache_size):
self.db = plyvel.DB(os.path.join(path, name), create_if_missing=True,
compression=None, lru_cache_size=cache_size)
self.batch = self.db.write_batch()
self.cache = {}
self.lock = threading.Lock()
def put(self, key, s):
self.batch.put(key, s)
self.cache[key] = s
def get(self, key):
s = self.cache.get(key)
if s == 'deleted':
return None
if s is None:
with self.lock:
s = self.db.get(key)
return s
def delete(self, key):
self.batch.delete(key)
self.cache[key] = 'deleted'
def close(self):
self.db.close()
def write(self):
with self.lock:
self.batch.write()
self.batch.clear()
self.cache.clear()
def get_next(self, key):
with self.
|
lock:
i = self.db.iterator(start=key)
k, _ = i.next()
return k
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.shared = shared
self.hash_list = {}
self.parents = {}
self.skip_batch =
|
{}
self.test_reorgs = test_reorgs
# init path
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
try:
self.db_utxo = DB(self.dbpath, 'utxo',
config.getint('leveldb', 'utxo_cache'))
self.db_hist = DB(self.dbpath, 'hist',
config.getint('leveldb', 'hist_cache'))
self.db_addr = DB(self.dbpath, 'addr',
config.getint('leveldb', 'addr_cache'))
self.db_undo = DB(self.dbpath, 'undo', None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
try:
self.last_hash, self.height, db_version = ast.literal_eval(
self.db_undo.get('height'))
except:
print_log('Initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
db_version = DB_VERSION
self.put_node('', Node.from_dict({}))
# check version
if db_version != DB_VERSION:
print_log(
"Your database '%s' is deprecated. "
"Please create a new database" % self.dbpath)
self.shared.stop()
return
# pruning limit
try:
self.pruning_limit = ast.literal_eval(self.db_undo.get('limit'))
except:
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.db_undo.put('version', repr(self.pruning_limit))
# reorg limit
try:
self.reorg_limit = ast.literal_eval(
self.db_undo.get('reorg_limit'))
except:
self.reorg_limit = config.getint('leveldb', 'reorg_limit')
self.db_undo.put('reorg_limit', repr(self.reorg_limit))
# compute root hash
root_node = self.get_node('')
self.root_hash, coins = root_node.get_hash('', None)
# print stuff
print_log("Database version %d." % db_version)
print_log(
"Pruning limit for spent outputs is %d." % self.pruning_limit)
print_log("Reorg limit is %d blocks." % self.reorg_limit)
print_log("Blockchain height", self.height)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", coins)
# convert between bitcoin addresses and 20 bytes keys used for storage.
@staticmethod
def address_to_key(addr):
return bc_address_to_hash_160(addr)
def get_skip(self, key):
o = self.skip_batch.get(key)
if o is not None:
return o
k = self.db_utxo.get_next(key)
assert k.startswith(key)
return k[len(key):]
def set_skip(self, key, skip):
self.skip_batch[key] = skip
def get_proof(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d.get(letter)[1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid Bitcoin address', addr)
out = []
with se
|
jgirardet/unolog
|
unolog/patients/models.py
|
Python
|
gpl-3.0
| 2,704
| 0.001113
|
from string import capwords
from django.db import models
CAPWORDS_ATTRS = ('name', 'firstname')
class PatientManager(models.Manager):
"""
custum patient manger to modifie create and update
"""
attrs = CAPWORDS_ATTRS
# paremeter to capwords
# def create_patient(self, name=None, firstname=None, birthdate=None):
# """
# every patient creatient must use this
# """
# if not name:
# raise ValueError('Must Include a name when adding a Patient')
# if not firstname:
# raise ValueError('Must Include a firstname when adding a Patient')
# if not birthdate:
# raise ValueError('Must Include a birthdate when adding a Patient')
# patient = self.model(
# name = name,
# firstname= firstname,
# birthdate = birthdate
# )
# print('hello')
# patient.save(using=self.db)
#
|
return patient
def create(self, **kwargs):
"""
enhancement
"""
# capwors certain fields
for i in self.attrs:
kwargs[i] = capwords(kwargs[i])
# recall base create
return super(PatientManager,
|
self).create(**kwargs)
class Patient(models.Model):
"""
ase class of patient.&
Require on ly 3 fields : name, firstname, birthdate
"""
attrs = CAPWORDS_ATTRS
# required Field
name = models.CharField(max_length=50)
firstname = models.CharField(max_length=50)
birthdate = models.DateField()
sexe = models.BooleanField(default=True) #True if women else false
# non required fields
street = models.CharField(blank=True, max_length=200, default="")
postalcode = models.CharField(blank=True, max_length=5, default="")
city = models.CharField(max_length=200, blank=True, default="")
phonenumber = models.CharField(blank=True, max_length=20, default="")
email = models.EmailField(blank=True, max_length=100, default="")
alive = models.BooleanField(default=True)
objects = PatientManager()
def __str__(self):
"""
nice printing Firstname Name
"""
return self.firstname + ' ' + self.name
def save(self, *args, **kwargs):
"""
customizing save method, adds :
- fore capwords for name et firstanme
"""
for i in self.attrs:
setattr(self, i, capwords(getattr(self, i)))
super(Patient, self).save(*args, **kwargs)
"""
champs à ajouter :
date de décès
décédé
médecin traitant déclaré
notes divers
"""
|
Eksmo/itunes-iap
|
itunesiap/utils.py
|
Python
|
bsd-2-clause
| 466
| 0
|
import six
def force_unicode(value):
if not isinstance(value, six.string_types):
return six.text_type(value)
try
|
:
return value.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
return value
def force_bytes(value):
if not isinstance(value, six.string_types):
value = force_unicode(value)
try:
|
return value.encode('utf-8')
except (AttributeError, UnicodeDecodeError):
return value
|
ngageoint/scale
|
scale/job/execution/configuration/workspace.py
|
Python
|
apache-2.0
| 653
| 0
|
"""Defines a workspace that is needed by a task"""
from __future__ import unicode_literals
class TaskWorkspace(object):
"""Represents a workspace needed by a task
"""
def __init__(self, name, mode, volume_name=None):
"""Creates a task workspace
:param name: The name of the workspace
:type name
|
: string
:param mode: The mode to use for the workspace, either 'ro' or 'rw'
:type mode: string
:param volume_name: The name to use for the workspace's volume
:type volume_name: string
"""
self.name = name
s
|
elf.mode = mode
self.volume_name = volume_name
|
Duke-GCB/DukeDSHandoverService
|
d4s2_api/migrations/0017_auto_20180323_1833.py
|
Python
|
mit
| 944
| 0.001059
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2018-03-23 18:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('d4s2_api', '0016_email_group_to_set'),
]
operations = [
migrations.AlterUniqueTogether(
|
name='emailtemplate',
uni
|
que_together=set([('template_set', 'template_type')]),
),
migrations.RemoveField(
model_name='historicalemailtemplate',
name='group',
),
migrations.AlterField(
model_name='emailtemplate',
name='template_set',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='d4s2_api.EmailTemplateSet'),
),
migrations.RemoveField(
model_name='emailtemplate',
name='group',
),
]
|
nisavid/home
|
.config/ipython/profile_simple/ipython_notebook_config.py
|
Python
|
unlicense
| 17,850
| 0.003361
|
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The base URL for the kernel server
# c.NotebookApp.base_kernel_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to prevent editing/execution of notebooks.
# c.NotebookApp.read_only = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The hostname for the websocket server.
# c.NotebookApp.websocket_host = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u'/home/ivan/.config/ipython
|
'
#
|
Set the log level by value or name.
# c.NotebookApp.log_level = 20
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s] %(message)s'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: KernelApp, BaseIPythonApplication,
# Application, InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = '127.0.0.1'
#
# c.IPKernelApp.parent_appname = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (DEALER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/ivan/.config/ipython'
# ONLY USED ON WINDOWS Interrupt this process when the parent is signalled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s] %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't
|
sasha-gitg/python-aiplatform
|
schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py
|
Python
|
apache-2.0
| 5,910
| 0.001184
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class predictionCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=predictionCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the prediction client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}'
|
does
|
not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
cpausmit/Kraken
|
pandaf/014/mc.py
|
Python
|
mit
| 129
| 0.007752
|
import PandaProd
|
.Producer.opts
PandaProd.Producer.opts.options.config = 'Autumn18'
from PandaProd.Producer.prod impor
|
t process
|
googleinterns/wss
|
core/train_utils_core.py
|
Python
|
apache-2.0
| 13,925
| 0.007038
|
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
import math
import tensorflow as tf
from tensorflow.contrib import slim
# Custom import
from third_party.deeplab.core import preprocess_utils
def _div_maybe_zero(total_loss, num_present):
"""Normalizes the total loss with the number of present pixels."""
return tf.cast(num_present > 0, tf.float32) * tf.math.divide(
total_loss, tf.maximum(1e-5, num_present))
net_to_stride_to_endpoints_name = {
'xception_65': {
4: 'xception_65/entry_flow/block1',
8: 'xception_65/entry_flow/block2',
## All stride=16 below
13: 'xception_65/entry_flow/block3',
14: 'xception_65/middle_flow/block1',
15: 'xception_65/exit_flow/block1',
16: 'xception_65/exit_flow/block2',
},
'resnet_v1_50': {
8: 'resnet_v1_50/block1',
## All stride=16 below
14: 'resnet_v1_50/block2',
15: 'resnet_v1_50/block3',
16: 'resnet_v1_50/block4',
},
'resnet_v1_101': {
8: 'resnet_v1_101/block1',
## All stride=16 below
14: 'resnet_v1_101/block2',
15: 'resnet_v1_101/block3',
16: 'resnet_v1_101/block4',
},
}
def compute_cam_v2(
end_points,
logits,
cls_label,
num_class=21,
use_attention=True,
attention_dim=128,
strides=(15, 16),
is_training=True,
valid_mask=None,
net='xception_65',
):
"""Compute Grad-CAM.
Args:
end_points: Network end_points (dict).
logits: Cls logits with shape [N, #classes-1] (multi-label, no bg)
cls_label: Ground truth image-level label
num_class: Number of classes including background
use_attention: Using self-attention to refine or not. If not, then no
learnable parameters
attention_dim: Embedding space dimension for key and query used in the
self-attention module
strides: Use feature maps from which stride to compute pixel similarity for
Grad-CAM refinement
is_training: Indicate training or inference mode
valid_mask: To identity valid region of the input. It is used to avoid
attending to padding regions
net: Specify which network is used
Returns:
A list of computed Grad-CAMs or refined ones.
"""
# Sanity check: Make sure strides are sorted
strides = sorted(list(strides))[::-1]
# Always use the last stride layer to compute Grad-CAM
conv_layer = end_points[net_to_stride_to_endpoints_name[net][strides[0]]]
cams = []
# Can we speed up this part?
for c in range(num_class-1):
grads = tf.gradients(logits[:, c], conv_layer)
weights = tf.reduce_mean(grads, axis=(1, 2))
weighted_grads = weights * conv_layer
curr_cams = tf.nn.relu(tf.reduce_sum(weighted_grads, axis=3))
cams.append(curr_cams)
cams = tf.stack(cams, axis=-1)
cls_label = tf.reshape(cls_label, [-1, 1, 1, num_class - 1])
cams = cls_label * cams
# Normalize to [0, 1]
cams = _div_maybe_zero(
cams, tf.reduce_max(cams, axis=(1, 2), keepdims=True))
out_cam = tf.stop_gradient(cams, name='stride_{}/cam'.format(strides[0]))
if not use_attention:
out_att_cam = None
else:
valid_mask = tf.compat.v1.image.resize_nearest_neighbor(
valid_mask, preprocess_utils.resolve_shape(out_cam, 4)[1:3])
out_att_cam = compute_self_att_v2(
end_points,
out_cam,
num_class,
attention_dim,
strides,
is_training,
linformer=False,
valid_mask=valid_mask,
net=net)
# Add bg score
bg = 1 - tf.reduce_max(out_cam, axis=3, keepdims=True)
out_cam = tf.concat([bg, out_cam], axis=-1)
return out_cam, out_att_cam
def compute_self_att_v2(
end_points,
logits,
num_class=21,
attention_dim=128,
strides=(15, 16),
is_training=True,
linformer=True,
valid_mask=None,
factor=8,
downsample_type='nearest',
net='xception_65'):
"""Compute self-attention for segmentation head.
Args:
end_points: Network end_points (dict).
logits: The input seed for refinement. Used as ``value'' in self-attention.
Can be either logits, probability, or score map.
num_class: Number of classes including background
attention_dim: Embedding space dimension for key and query used in the
self-attention module
strides: Use feature maps from which stride to compute pixel similarity
is_training: Indicate training or inference mode
linformer: Adopt the idea from https://arxiv.org/abs/2006.04768 to reduce
memory usage in self-attention computation. But instead of learning the
downsample function, we use deterministic image downsample functions
valid_mask: To identity valid region of the input. It is used to avoid
attending to padding regions
factor: Downsample factor used in linformer mode
downsample_type: Use which downsample method to reduce the memory usage. Can
be either 'nearest' or 'bilinear'. Default: 'nearest'
net: Specify which network is used
Returns:
A list of computed Grad-CAMs or refined ones.
"""
# Sanity check: Make sure strides are sorted
strides = sorted(list(strides))[::-1]
conv_layer_list = []
for stride in strides:
conv_layer = end_points[net_to_stride_to_endpoints_name[net][stride]]
conv_layer_list.append(conv_layer)
# Resize to seed resolution first
h, w = preprocess_utils.resolve_shape(logits, 4)[1:3]
conv_layer_list = [
tf.compat.v1.image.resize_bilinear(
conv, (h, w), align_corners=True)
for conv in conv_layer_list
]
conv_layer_merged = tf.concat(conv_layer_list, axis=-1)
conv_layer_merged = tf.stop_gradient(conv_layer_merged)
score = tf.stop_gradient(logits)
# This tells us what input it is (decoder logits or Grad-CAM)
value_dim = tf.
|
shape(score)[-1]
# Only valid when we use Linformer style to reduce size for key and value
if downsample_type == 'bilinear':
resize_fn = tf.compat.v1.image.resize_bilinear
else:
resize_fn = tf.compat.v1.image.resize_nearest_neighbor
scope = 'hyper_column'
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d],
|
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
reuse=tf.AUTO_REUSE):
k = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='key')
q = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='query')
q = tf.reshape(q, [-1, h * w, attention_dim])
if valid_mask is not None:
valid_mask_q = tf.reshape(valid_mask, [-1, h * w, 1])
# Adopt idea from Linformer (https://arxiv.org/abs/2006.04768) to reduce the
# memory usage. Instead of learning a downsample function, we use determinstic
# image downsample methods (nearest neighbor or bilinear) to reduce the size
# of key and value.
if linformer:
k = resize_fn(
k, ((h // factor + 1), (w // factor + 1)), align_corners=True)
k = tf.reshape(k,
[-1, (h // factor + 1) * (w // factor + 1), attention_dim])
if valid_mask is not None:
valid_mask_k = tf.compat.v1.image.resize_nearest_neighbor(
valid_mask, ((h // factor + 1), (w // factor + 1)))
valid_mask_k = tf.reshape(
tf.cast(valid_mask_k, tf.float32),
[-1, (h // factor + 1) * (w // factor + 1), 1])
else:
k = tf.reshape(k, [-1, h * w, attention_dim])
valid_mask_k = tf.reshape(valid_mask, [-1, h * w, 1])
matmul_qk = tf.matmul(q, k,
|
zutshi/S3CAMX
|
src/graph.py
|
Python
|
bsd-2-clause
| 32,399
| 0.00213
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import err
import utils as U
# needed for testing
import random as rand
import time
# FIX heap duplicates used by different ksp!
import heapq
from heapq import heappush, heappop
from itertools import count
from collections import defaultdict
from blessings import Terminal
term = Terminal()
def graph_factory(graph_type):
if graph_type == 'nx':
global nx
import networkx as nx
return GraphNX()
elif graph_type == 'gt':
global gt
import graph_tool.all as gt
return GraphGT()
else:
raise err.Fatal('unknown graph library requested: {}'.format(graph_type))
class GraphGT(object):
@staticmethod
def compare(G1, G2):
raise NotImplementedError
@staticmethod
def compare_nodes(n1, n2):
raise NotImplementedError
@staticmethod
def compare_edges():
raise NotImplementedError
def __init__(self, G=None, Type=None):
# unused maxVert
self.maxVertices = 0
# create a Di-graph if not created already
if G is None:
self.G = gt.Graph()
self.Type = 'test_no'
else:
self.G = G
self.Type = Type
self.node_vertex_dict = {}
self.edge_attr_dict = self.G.new_edge_property('object')
def check_n_add_n_get(self, n):
v = self.node_vertex_dict.get(n)
# if node does not exist in the graph
if v is None:
# allocate a new vertex
v = self.G.add_vertex()
# add it to the dictionary for future
self.node_vertex_dict[n] = v
return v
def nodes(self):
# why is this needed?
raise NotImplementedError
def add_edge(
self,
n1,
n2,
attr_val=None,
):
v1 = self.check_n_add_n_get(n1)
v2 = self.check_n_add_n_get(n2)
e = self.G.add_edge(v1, v2)
self.edge_attr_dict[e] = attr_val
def add_edge_wt(
self,
v1,
v2,
weight,
):
raise NotImplementedError
self.G.add_edge(v1, v2, w=weight)
def add_node(self, n):
raise NotImplementedError # Actually, its just not tested...
self.check_n_add_n_get(n)
return
############################## UNFINISHED FROM HERE
def get_path_attr_list(self, path):
raise NotImplementedError
attr_list = []
for (v1, v2) in U.pairwise(path):
attr_list.append(self.G[v1][v2]['attr'])
return attr_list
# Actually draws the graph!! Need to rewrite get_path_generator() from
# scratch for gt. Also, destroys the passed in graph (oops) :D
# Hence, use this function only for debugging!!
# # TODO: Fix it, of course?
def get_path_generator(
self,
source_list,
sink_list,
max_depth=None,
):
print 'WARNING: This is actually a plotting function!!!'
num_source_nodes = len(source_list)
num_sink_nodes = len(sink_list)
# super_source_vertex = g.add_vertex()
# super_sink_vertex = g.add_vertex()
super_source_vertex = 'super_source_vertex'
super_sink_vertex = 'super_sink_vertex'
edge_list = zip([super_source_vertex] * num_source_nodes, source_list)
for e in edge_list:
self.add_edge(*e)
edge_list = zip(sink_list, [super_sink_vertex] * num_sink_nodes)
for e in edge_list:
self.add_edge(*e)
g = self.G
pos = gt.arf_layout(g, max_iter=0)
gt.graph_draw(g, pos=pos, vertex_text=self.G.vertex_index)
time.sleep(1000)
print 'exiting'
exit()
gt.graph_draw(self.G, vertex_text=self.G.vertex_index)
time.sleep(1000)
# print edge_list
# Add edges:
# \forall sink \in sink_list. sink -> super sink node
edge_list = zip(sink_list, [dummy_super_sink_node] * num_sink_nodes)
H.add_edges_from(edge_list)
# print edge_list
# print '='*80
# TODO: WHY?
# Switching this on with def path_gen(), results in empty path and no further results!!
# #xplanation required!
# for path in nx.all_simple_paths(H, dummy_super_source_node, dummy_super_sink_node):
# print path
# print '='*80
# TODO: how to do this with lambda?
# Also, is this indeed correct?
def path_gen():
for i in nx.all_simple_paths(H, dummy_super_source_node,
dummy_super_sink_node):
# Remove the first (super source)
# and the last element (super sink)
yield i[1:-1]
# return lambda: [yield i[1:-1] for i in nx.all_simple_paths(H,
# dummy_super_source_node, dummy_super_sink_node)]
return path_gen()
def neighbors(self, node):
raise NotImplementedError
return self.G.neighbors(node)
def draw(self, pos_dict=None):
raise NotImplementedError
nx.draw_networkx(self.G, pos=pos_dict, labels=pos_dict,
with_labels=True)
def __contains__(self, key):
raise NotImplementedError
return key in self.G
def __repr__(self):
raise NotImplementedError
s = ''
s += '''==== Nodes ==== {} '''.format(self.G.nodes())
s += '''==== Edges ==== {} '''.format(self.G.edges())
return s
class GraphNX(object):
@staticmethod
def compare(G1, G2):
G1 = G1.G
G2 = G2.G
G1_nodes_set = set(G1.nodes())
G2_nodes_set = set(G2.nodes())
G1_edges_set = set(G1.edges())
G2_edges_set = set(G2.edges())
G1_in_G2_nodes = G1_nodes_set.issubset(G2_nodes_set)
G2_in_G1_nodes = G2_nodes_set.issubset(G1_nodes_set)
G1_in_G2_edges = G1_edges_set.issubset(G2_edges_set)
G2_in_G1_edges = G2_edges_set.issubset(G1_edges_set)
G1_in_G2 = G1_in_G2_nodes and G1_in_G2_edges
G2_in_G1 = G2_in_G1_nodes and G2_in_G1_edges
print 'G1_in_G2_nodes: {}, G1_in_G2_edges: {}'.format(G1_in_G2_nodes,
G1_in_G2_edges)
print 'G2_in_G1_nodes: {}, G2_in_G1_edges: {}'.format(G2_in_G1_nodes,
G2_in_G1_edges)
print '''G1_nodes_set - G2_nodes_set
{}
'''.format(G1_nodes_set
- G2_nodes_set)
G1_and_G2_are_equal = G1_in_G2 and G2_in_G1
print 'G1_in_G2: {}, G2_in_G1: {}\n'.format(G1_in_G2, G2_in_G1)
return G1_and_G2_are_equal
@staticmethod
def compare_nodes(n1, n2):
raise NotImplementedError
@staticmethod
def compare_edges():
raise NotImplementedError
def __init__(self, G=None, Type=None):
# unused maxVert
self.maxVertices = 0
self.ctr = 0
# create a Di-graph if not created already
if G is None:
self.G = nx.DiGraph()
self.Type = 'test_no'
else:
self.G = G
self.Type = Type
def nodes(self):
return self.G.nodes()
def add_edge(self, v1, v2, ci=None, pi=None, weight=1):
self.G.add_edge(v1, v2, weight=1, ci=ci, pi=pi)
self.ctr += 1
if self.ctr % 1000 == 0:
with term.location(x=100, y=term.height-10):
print(term.green('nodes={}, edges={}'
.format(
self.G.number_of_nodes(),
self.G.number_of_edges())))
def add_edges_from(self, edge_list, ci=None, pi=None, weight=1):
self.G.add_edges_from(edge_list, weight=1, ci=ci, pi=pi)
def add_node(self, v):
self.G.add_node(v)
def get_path_attr_list(self, path, attrs):
attr_map = defaultdict(list)
for (v1, v2) in U.pairwise(path):
for attr in attrs:
attr_map[attr].append(self.G[v1][v2][attr])
return attr_map
# ###################
|
### KSP 1 ##############################################
|
####
# https://gist.github.com/guilhermemm/d
|
3YOURMIND/django-migration-linter
|
tests/test_project/app_make_not_null_with_django_default/migrations/0001_initial.py
|
Python
|
apache-2.0
| 661
| 0
|
# Generated by Django 2.2 on 2020-06-20 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="A",
fields=[
(
"id",
|
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("col", m
|
odels.CharField(max_length=10, null=True)),
],
),
]
|
gingi99/research_dr
|
python/FPgrowth/orange_fpgrowth.py
|
Python
|
mit
| 10,802
| 0.015115
|
# coding: utf-8
# python 3.5
import Orange
from orangecontrib.associate.fpgrowth import *
import pandas as pd
import numpy as np
import sys
import os
from collections import defaultdict
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.support = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setSupport(self, supports) :
self.support = supports
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getSupport(self) :
return(self.support)
def getSupportD(self) :
return(self.support * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("support:" + str(self.support))
print("conf:" + str(self.conf))
# ======================================================
# Rules のうち、P個の属性値が分かれば、クラスを推定できるか
# ======================================================
def getPerIdentifiedClass(rules, p) :
attribute_values = [rule.getValue() for rule in rules]
attribute_values = list(chain.from_iterable(attribute_values))
attribute_values = list(set(attribute_values))
combi_attribute_values = combinations(attribute_values,p)
count = 0
bunbo = 0
for combi in combi_attribute_values :
bunbo += 1
rules_target = []
for rule in rules :
matching_count = len(list(set(combi) & set(rule.getValue())))
if matching_count == len(list(combi)) :
rules_target.append(rule)
# rules_target が空なら評価から外す
if len(rules_target) == 0:
bunbo -= 1
#
else :
consequents = [rule.getConsequent() for rule in rules_target]
if len(list(set(consequents))) == 1:
count += 1
if bunbo == 0:
ans = 0
else:
ans = (float(count) / float(bunbo))
return(ans)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def isExplainRule(obj, rule) :
matching_count = len(list(set(obj) & set(rule.getValue())))
if matching_count == len(rule.getValue()) : return(True)
else : return(False)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def getMatchingFactor(obj, rule) :
matching_factor = len(list(set(obj) & set(rule.getValue())))
matching_factor = matching_factor / len(rule.getValue())
return(matching_factor)
# ======================================================
# ルールのsupport P を返す
# ======================================================
def getSupportP(obj, rule) :
matching_factor = getMatchingFactor(obj, rule)
return(rule.getSupportD() * matching_factor)
# ======================================================
# ルールから対象のクラスを予測
# ======================================================
def estimateClass(obj, rules) :
list_judge = [isExplainRule(obj, r) for r in rules]
# 1つ以上マッチするなら
if any(list_judge) :
consequents = [rules[i].getConsequent() for i, judge in enumerate(list_judge) if judge]
# マッチしたルールが推論するクラスの数がただ1つなら
if len(set(consequents)) == 1 :
return(consequents[0])
else :
rules_match = list(compress(rules,list_judge))
supportD = [r.getSupportD() for r in rules_match]
return(rules_match[supportD.index(max(supportD))].getConsequent())
# rule が objに1つもマッチしない場合は部分一致ルールによる推定
else :
supportP = [getSupportP(obj, rule) for rule in rules]
return(rules[supportP.index(max(supportP))].getConsequent())
# ======================================================
# LERS による精度評価
# ======================================================
def predictByLERS(FILENAME, iter1, iter2, rules) :
# read test data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.txt'
decision_table_test = pd.read_csv(filepath, delimiter=' ', header=None)
decision_table_test = decision_table_test.dropna()
decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()
decision_table_test = decision_table_test.drop(decision_table_test.columns[len(decision_table_test.columns)-1], axis=1)
decision_table_test = decision_table_test.values.tolist()
# LERS で予測
predictions = []
for obj in decision_table_test:
estimated_class = estimateClass(obj, rules)
predictions.append(estimated_class)
# 正答率を求める
accuracy = accuracy_score(decision_class, predictions)
print(accuracy)
return(accuracy)
# =====================================
# Main 関数
# =====================================
def getRulesByFPGrowth(FILENAME, classes, iter1, iter2, minsup, minconf) :
# read data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data_pd = pd.read_csv(filepath, delimiter=' ')
pd.DataFrame.to_csv(data_pd, DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket', index=False, sep=',')
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket'
data_table = Orange.data.Table(filepath)
#print len(data_table)
# set parameter
num_lines = sum(1 for line in open(filepath))
minsup = float(minsup) / float(num_lines)
#
#itemsets = frequent_itemsets(data_table, minsup)
#print(itemsets)
#print(list(itemsets))
X, mapping = OneHot.encode(data_table, include_class=True)
#print(X)
itemsets = dict(frequent_itemsets(X, minsup))
#print(itemsets)
#print(len(itemsets))
rules = [(P, Q, supp, conf) for P, Q, supp, conf in association_rules(itemsets, minconf) if len(Q) == 1]
#print(rules)
names = {item: '{}={}'.format(var.name, val) for item, var, val in OneHot.decode(mapping, data_table, mapping)}
for ante, cons, supp, conf in rules:
print(', '.join(names[i] for i in ante), '-->', names[next(iter(cons))], '(supp: {}, conf: {})'.format(supp, conf))
# induce rules
|
#rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support=minsup, confidence=minconf)
#rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support = minsup, max_item_sets = 2000)
# convert Rule Class
#rules = []
|
#for rule_orange in rules_orange :
# consequent = rule_orange.right.get_metas(str).keys()
# if len(consequent) == 1 and consequent[0] in classes and rule_orange.confidence >= minconf :
# rule = Rule()
# rule.setValue(rule_orange.left.get_metas(str).keys())
# rule.setConsequent(consequent[0])
# rule.setSupport(rule_orange.support)
# rule.setConf(rule_orange.confidence)
# rules.append(rule)
# END
#return(rules)
# ======================================================
# Apriori_LERS
# ======================================================
def Apriori_LERS(FILENAME, classes, iter1, iter2, min_sup, min_conf):
# rule 抽出
rules = getRulesByApriori(FILENAME, classes, iter1, iter2, min_sup, min_conf)
# predict by LERS
accuracy = predictByLERS(FILENAME, iter1, iter2, rules)
# save
savepath = DIR_UCI+'/'+FILENAME+'/Apriori_LERS.csv'
with open(savepath, "a") as f :
f.writelines('Apriori_LERS,{min_sup},{FIL
|
zoofIO/flexx
|
flexx/ui/widgets/_lineedit.py
|
Python
|
bsd-2-clause
| 7,452
| 0.001476
|
"""
The ``LineEdit`` and ``MultiLineEdit`` widgets provide a way for the user
to input text.
.. UIExample:: 100
from flexx import app, event, ui
class Example(ui.Widget):
def init(self):
with ui.VBox():
self.line = ui.LineEdit(placeholder_text='type here')
self.l1 = ui.Label(html='<i>when user changes text</i>')
self.l2 = ui.Label(html='<i>when unfocusing or hitting enter </i>')
self.l3 = ui.Label(html='<i>when submitting (hitting enter)</i>')
ui.Widget(flex=1)
@event.reaction('line.user_text')
def when_user_changes_text(self, *events):
self.l1.set_text('user_text: ' + self.line.text)
@event.reaction('line.user_done')
def when_user_is_done_changing_text(self, *events):
self.l2.set_text('user_done: ' + self.line.text)
@event.reaction('line.submit')
def when_user_submits_text(self, *events):
self.l3.set_text('submit: ' + self.line.text)
"""
from ... import event
from . import Widget
class LineEdit(Widget):
""" An input widget to edit a line of text.
The ``node`` of this widget is a text
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_.
"""
DEFAULT_MIN_SIZE = 100, 28
CSS = """
.flx-LineEdit {
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-LineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
## Properties
text = event.StringProp(settable=True, doc="""
The current text of the line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
password_mode = event.BoolProp(False, settable=True, doc="""
Whether the insered text should be hidden.
""")
placeholder_text = event.StringProp(settable=True, doc="""
The placeholder text (shown when the text is an empty string).
""")
autocomp = event.TupleProp(settable=True, doc="""
A tuple/list of strings for autocompletion. Might not work in all browsers.
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the line edit is disabled.
""")
## Methods, actions, emitters
def _create_dom(self):
global window
# Create node element
node = window.document.createElement('input')
node.setAttribute('type', 'input')
node.setAttribute('list', self.id)
self._autocomp = window.document.createElement('datalist')
self._autocomp.id = self.id
node.appendChild(self._autocomp)
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
#if IE10:
# self._addEventListener(self.node, 'change', f1, False)
return node
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text, either by
moving the focus elsewhere, or by hitting enter.
Has ``old_value`` and ``new_value`` attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def submit(self):
""" Event emitted when the user strikes the enter or return key
(but not when losing focus). Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
self.user_done()
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def key_down(self, e):
# Prevent propating the key
ev = super().key_down(e)
pkeys = 'Escape', # keys to propagate
if (ev.modifiers and ev.modifiers != ('Shift', )) or ev.key in pkeys:
pass
else:
e.stopPropagation()
if ev.key in ('Enter', 'Return'):
self.submit()
# Nice to blur on mobile, since it hides keyboard, but less nice on desktop
# self.node.blur()
elif ev.key == 'Escape':
self.node.blur()
return ev
## Reactions
@event.reaction
def __text_changed(self):
self.node.value = self.text
@event.reaction
def __password_mode_changed(self):
self.node.type = ['text', 'password'][int(bool(self.password_mode))]
@event.reaction
def __placeholder_text_changed(self):
self.node.placeholder = self.placeholder_text
# note: this works in the browser but not in e.g. firefox-app
@event.reaction
def __autocomp_changed(self):
global window
autocomp = self.autocomp
# Clear
for op in self._autocomp:
self._autocomp.removeChild(op)
# Add new options
for option in autocomp:
op = window.document.createElement('option')
op.value = option
self._autocomp.appendChild(op)
@event.reaction
def __disabled_changed(self):
if self.disabled:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
class MultiLineEdit(Widget):
""" An input widget to edit multiple lines of text.
The ``node`` of this widget is a
`<textarea> <https://developer.mozilla.org/docs/Web/HTML/Element/textarea>`_.
"""
DEFAULT_MIN_SIZE = 100, 50
CSS = """
.flx-MultiLineEdit {
resize: none;
overflow-y: scroll;
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-MultiLineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
text = event.StringProp(settable=True, doc="""
The current text of the multi-line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
def _create_dom(self):
node = window.document.createElement('textarea')
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
return node
@event.reaction
def __text_changed(self):
|
self.node.value = self.text
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text by
moving the focus elsewhere. Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
|
|
SlicerRt/SlicerDebuggingTools
|
PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydevd_plugins/django_debug.py
|
Python
|
bsd-3-clause
| 16,161
| 0.003589
|
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK
import inspect
from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, get_thread_id, dict_iter_items, DJANGO_SUSPEND, IS_PY2
from pydevd_file_utils import get_abs_path_real_path_and_base_from_file, normcase
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint, get_exception_name
from _pydevd_bundle import pydevd_vars
import traceback
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, FCode, just_raised, ignore_exception_trace
IS_DJANGO18 = False
IS_DJANGO19 = False
IS_DJANGO19_OR_HIGHER = False
try:
import django
version = django.VERSION
IS_DJANGO18 = version[0] == 1 and version[1] == 8
IS_DJANGO19 = version[0] == 1 and version[1] == 9
IS_DJANGO19_OR_HIGHER = ((version[0] == 1 and version[1] >= 9) or version[0] > 1)
except:
pass
class DjangoLineBreakpoint(LineBreakpoint):
def __init__(self, file, line, condition, func_name, expression, hit_condition=None, is_logpoint=False):
self.file = file
LineBreakpoint.__init__(self, line, condition, func_name, expression, hit_condition=hit_condition, is_logpoint=is_logpoint)
def is_triggered(self, template_frame_file, template_frame_line):
return self.file == template_frame_file and self.line == template_frame_line
def __str__(self):
return "DjangoLineBreakpoint: %s-%d" %(self.file, self.line)
def add_line_breakpoint(plugin, pydb, type, file, line, condition, expression, func_name, hit_condition=None, is_logpoint=False):
if type == 'django-line':
breakpoint = DjangoLineBreakpoint(file, line, condition, func_name, expression, hit_condition=hit_condition, is_logpoint=is_logpoint)
if not hasattr(pydb, 'django_breakpoints'):
_init_plugin_breaks(pydb)
return breakpoint, pydb.django_breakpoints
return None
def add_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
if not hasattr(pydb, 'django_exception_break'):
_init_plugin_breaks(pydb)
pydb.django_excep
|
tion_break[exception] = True
pydb.set_tracing_for_untraced_contexts_if_not_frame_eval()
retur
|
n True
return False
def _init_plugin_breaks(pydb):
pydb.django_exception_break = {}
pydb.django_breakpoints = {}
def remove_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
try:
del pydb.django_exception_break[exception]
return True
except:
pass
return False
def get_breakpoints(plugin, pydb, type):
if type == 'django-line':
return pydb.django_breakpoints
return None
def _inherits(cls, *names):
if cls.__name__ in names:
return True
inherits_node = False
for base in inspect.getmro(cls):
if base.__name__ in names:
inherits_node = True
break
return inherits_node
def _is_django_render_call(frame):
try:
name = frame.f_code.co_name
if name != 'render':
return False
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
inherits_node = _inherits(cls, 'Node')
if not inherits_node:
return False
clsname = cls.__name__
if IS_DJANGO19:
# in Django 1.9 we need to save the flag that there is included template
if clsname == 'IncludeNode':
if 'context' in frame.f_locals:
context = frame.f_locals['context']
context._has_included_template = True
return clsname != 'TextNode' and clsname != 'NodeList'
except:
traceback.print_exc()
return False
def _is_django_context_get_call(frame):
try:
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
return _inherits(cls, 'BaseContext')
except:
traceback.print_exc()
return False
def _is_django_resolve_call(frame):
try:
name = frame.f_code.co_name
if name != '_resolve_lookup':
return False
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
clsname = cls.__name__
return clsname == 'Variable'
except:
traceback.print_exc()
return False
def _is_django_suspended(thread):
return thread.additional_info.suspend_type == DJANGO_SUSPEND
def suspend_django(main_debugger, thread, frame, cmd=CMD_SET_BREAK):
frame = DjangoTemplateFrame(frame)
if frame.f_lineno is None:
return None
pydevd_vars.add_additional_frame_by_id(get_thread_id(thread), {id(frame): frame})
main_debugger.set_suspend(thread, cmd)
thread.additional_info.suspend_type = DJANGO_SUSPEND
return frame
def _find_django_render_frame(frame):
while frame is not None and not _is_django_render_call(frame):
frame = frame.f_back
return frame
#=======================================================================================================================
# Django Frame
#=======================================================================================================================
def _read_file(filename):
# type: (str) -> str
if IS_PY2:
f = open(filename, 'r')
else:
f = open(filename, 'r', encoding='utf-8', errors='replace')
s = f.read()
f.close()
return s
def _offset_to_line_number(text, offset):
curLine = 1
curOffset = 0
while curOffset < offset:
if curOffset == len(text):
return -1
c = text[curOffset]
if c == '\n':
curLine += 1
elif c == '\r':
curLine += 1
if curOffset < len(text) and text[curOffset + 1] == '\n':
curOffset += 1
curOffset += 1
return curLine
def _get_source_django_18_or_lower(frame):
# This method is usable only for the Django <= 1.8
try:
node = frame.f_locals['self']
if hasattr(node, 'source'):
return node.source
else:
if IS_DJANGO18:
# The debug setting was changed since Django 1.8
pydev_log.error_once("WARNING: Template path is not available. Set the 'debug' option in the OPTIONS of a DjangoTemplates "
"backend.")
else:
# The debug setting for Django < 1.8
pydev_log.error_once("WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make "
"django template breakpoints working")
return None
except:
pydev_log.debug(traceback.format_exc())
return None
def _get_template_file_name(frame):
try:
if IS_DJANGO19:
# The Node source was removed since Django 1.9
if 'context' in frame.f_locals:
context = frame.f_locals['context']
if hasattr(context, '_has_included_template'):
# if there was included template we need to inspect the previous frames and find its name
back = frame.f_back
while back is not None and frame.f_code.co_name in ('render', '_render'):
locals = back.f_locals
if 'self' in locals:
self = locals['self']
if self.__class__.__name__ == 'Template' and hasattr(self, 'origin') and \
hasattr(self.origin, 'name'):
return normcase(self.origin.name)
back = back.f_back
else:
if hasattr(context, 'template
|
JasonKessler/scattertext
|
demo_tokenizer_roberta.py
|
Python
|
apache-2.0
| 1,670
| 0.003593
|
from transformers import RobertaTokenizerFast
import scattertext as st
tokenizer_fast = RobertaTokenizerFast.from_pretrained(
"roberta-base", add_prefix_space=True)
tokenizer = st.RobertaTokenizerWrapper(tokenizer_fast)
df = st.SampleCorpora.ConventionData2012.get_data().assign(
parse = lambda df: df.text.apply(tokenizer.tokenize)
)
corpus = st.OffsetCorpusFactory(
df,
category_
|
col='party',
parsed_col='parse',
feat_and_offset_getter=st.TokenFeatAndOffsetGetter()
).build()
# Remove words occur less than 5 times
corpus = corpus.remove_infrequent_words(5, non_text=True
|
)
plot_df = corpus.get_metadata_freq_df('').assign(
Y=lambda df: df.democrat,
X=lambda df: df.republican,
Ypos=lambda df: st.Scalers.dense_rank(df.Y),
Xpos=lambda df: st.Scalers.dense_rank(df.X),
SuppressDisplay=False,
ColorScore=lambda df: st.Scalers.scale_center_zero(df.Ypos - df.Xpos),
)
html = st.dataframe_scattertext(
corpus,
plot_df=plot_df,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
suppress_text_column='Display',
metadata=corpus.get_df()['speaker'],
use_non_text_features=True,
ignore_categories=False,
use_offsets=True,
unified_context=False,
color_score_column='ColorScore',
left_list_column='ColorScore',
y_label='Democarats',
x_label='Republicans',
header_names={'upper': 'Top Democratic', 'lower': 'Top Republican', 'right': 'Most Frequent'},
subword_encoding='RoBERTa'
)
fn = 'roberta_sentence_piece.html'
with open(fn, 'w') as of:
of.write(html)
print("Open ./" + fn + ' in Chrome.')
|
justyns/home-assistant
|
tests/components/mqtt/test_init.py
|
Python
|
mit
| 11,668
| 0
|
"""The tests for the MQTT component."""
from collections import namedtuple
import unittest
from unittest import mock
import socket
import homeassistant.components.mqtt as mqtt
from homeassistant.const import (
EVENT_CALL_SERVICE, ATTR_DOMAIN, ATTR_SERVICE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestMQTT(unittest.TestCase):
"""Test the MQTT component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(1)
mock_mqtt_component(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def record_calls(self, *args):
"""Helper for recording calls."""
self.calls.append(args)
def test_client_starts_on_home_assistant_start(self):
""""Test if client start on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.start.called)
def test_client_stops_on_home_assistant_start(self):
"""Test if client stops on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_ST
|
ART)
self.hass.pool.block_till_done()
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.stop.called)
def test_setup_fails
|
_if_no_connect_broker(self):
"""Test for setup failure if connection to broker is missing."""
with mock.patch('homeassistant.components.mqtt.MQTT',
side_effect=socket.error()):
self.assertFalse(mqtt.setup(self.hass, {mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'test-broker',
}}))
def test_publish_calls_service(self):
"""Test the publishing of call to services."""
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'test-topic',
self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC])
self.assertEqual(
'test-payload',
self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD])
def test_service_call_without_topic_does_not_publish(self):
"""Test the service call if topic is missing."""
self.hass.bus.fire(EVENT_CALL_SERVICE, {
ATTR_DOMAIN: mqtt.DOMAIN,
ATTR_SERVICE: mqtt.SERVICE_PUBLISH
})
self.hass.pool.block_till_done()
self.assertTrue(not mqtt.MQTT_CLIENT.publish.called)
def test_service_call_with_template_payload_renders_template(self):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], "2")
def test_service_call_with_payload_doesnt_render_template(self):
"""Test the service call with unrendered template.
If a 'payload' is provided then use that instead of 'payload_template'.
"""
payload = "not a template"
payload_template = "a template"
# Call the service directly because the helper functions don't allow
# you to provide payload AND payload_template.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template
}, blocking=True)
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], payload)
def test_service_call_without_payload_or_payload_template(self):
"""Test the service call without payload or payload template.
If neither 'payload' or 'payload_template' is provided then fail.
"""
# Call the service directly because the helper functions require you to
# provide a payload.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic"
}, blocking=True)
self.assertFalse(mqtt.MQTT_CLIENT.publish.called)
def test_subscribe_topic(self):
"""Test the subscription of a topic."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_not_match(self):
"""Test if subscribed topic is not a match."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_level_wildcard(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
class TestMQTTCallbacks(unittest.TestCase):
"""Test the MQTT callbacks."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(1)
# mock_mqtt_component(self.hass)
with mock.patch('paho.mqtt.client.Client'):
mqtt.setup(self.hass, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
}
})
self.hass.config.components.append(mqtt.DOMAIN)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everythin
|
laenderoliveira/exerclivropy
|
cap09/exercicio-09-31.py
|
Python
|
mit
| 192
| 0.005291
|
import os.path
dir = "temp"
if os.path.isdir(dir):
print("Diretório tem
|
p existe")
elif os.path.isfile(dir):
print(
|
"Arquivo temp existe")
else:
print("Diretório temp não existe")
|
RafiKueng/SteMM
|
model.py
|
Python
|
mit
| 12,007
| 0.015658
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PHOTOMETRYDEMO - model.py
the data model
Created on Tue Sep 23 12:09:45 2014
@author: rafik
"""
import os
import math
import numpy as np
import scipy as sp
import scipy.ndimage.interpolation
import pyfits
class Model(object):
def __init__(self):
self.name = None
self.masks = []
self.roi = None
self.ellipse = None
self.shape = None
self.scale = -1
self.psf = None
def getRegionCoords(self):
return self.roi.getRegionCoords()
def getMaskFilename(self, pfx=''):
filename = pfx+'mask.fits'
sx, sy = self.shape
renderedmask = np.ones((sx, sy), dtype=np.int)
for mask in self.masks:
if mask.type == 'mask':
pixels = mask.getCoveredPixels()
for px, py in pixels:
renderedmask[px, py] = 0
try:
os.remove(filename)
except OSError:
pass
hdu = pyfits.PrimaryHDU(np.rot90(renderedmask))
hdu.writeto(filename)
return filename
def createPSF(self, pfx=''):
#TODO
self.psf = PSF(pfx)
def getPhotometricZeropoint(self):
#TODO
return 0.0
def getPlateScale(self):
#TODO
return (0.001, 0.001)
class Selection(object):
def __init__(self, canv, nr, color):
self.canv = canv
s = self.canv.scale
print 'scale', s
self.x1, self.y1, self.x2, self.y2 = np.array([30,60,100,120])
self.nr = nr
self.color = color
self.type = None
tg = 'rect_%i' % nr
tg2 = 'rc_%i_' % nr
self.rect = canv.create_rectangle(self.x1*s, self.y1*s, self.x2*s, self.y2*s, tags=tg, outline=self.color)
self.p_a = canv.create_circle(self.x1*s,self.y1*s,5, fill=self.color, tags=tg2+'ul')
self.p_b = canv.create_circle(self.x2*s,self.y2*s,5, fill=self.color, tags=tg2+'lr')
def transform(self, pnt, x, y):
s = self.canv.scale
x /= s
y /= s
if pnt=="cp":
xo = (self.x1+self.x2) // 2
yo = (self.y1+self.y2) // 2
dx = self.x2 - xo
dy = self.y2 - yo
self.x1 = x-dx
self.y1 = y-dy
self.x2 = x+dx
self.y2 = y+dy
elif pnt=="ul":
self.x1 = x
self.y1 = y
elif pnt=='lr':
self.x2 = x
self.y2 = y
self.update()
def _update(self):
r=5
s = self.canv.scale
self.canv.coords(self.p_a, self.x1*s-r, self.y1*s-r, self.x1*s+r, self.y1*s+r)
self.canv.coords(self.p_b, self.x2*s-r, self.y2*s-r, self.x2*s+r, self.y2*s+r)
self.canv.coords(self.rect, self.x1*s, self.y1*s, self.x2*s, self.y2*s)
# inherit and overwrite
def update(self):
self._update()
#
class Mask(Selection):
def __init__(self, canv, nr):
Selection.__init__(self, canv, nr, color='yellow')
self.type = "mask"
s = self.canv.scale
self.lines = []
dx = -self.x1+self.x2
dx *= s
nlines = 10
for i in range(1,nlines+1):
x = i*dx/(nlines+1)
l = self.canv.create_line(self.x1*s+x, self.y1*s, self.x1*s+x, self.y2*s, tags='rect_%i' % self.nr, fill=self.color)
#print self.x1+x, self.y1, self.x1+x, self.y2
self.lines.append(l)
def update(self):
self._update()
s = self.canv.scale
dx = -self.x1+self.x2
for i in range(len(self.lines)):
x = (i+1)*dx/(len(self.lines)+1)*s
self.canv.coords(self.lines[i], self.x1*s+x, self.y1*s, self.x1*s+x, self.y2*s)
def getCoveredPixels(self):
px = []
minx = int(np.floor(np.min([self.x1, self.x2])))
maxx = int(np.floor(np.max([self.x1, self.x2])))
miny = int(np.floor(np.min([self.y1, self.y2])))
maxy = int(np.floor(np.max([self.y1, self.y2])))
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
px.append((x,y))
return px
#
# used to select an region of interesst (inside selected)
class ROI(Selection):
def __init__(self, canv, nr):
print "init roi"
Selection.__init__(self, canv, nr, color='green')
self.type = "roi"
s = self.canv.scale
self.lines = []
dd = 5
sx = int(self.canv.cget('width'))
sy = int(self.canv.cget('height'))
#print sx, sy
for xx in range(0,sx):
#print xx, xx%dd, xx%dd==True
if xx%dd == 0:
minx = np.min([self.x1, self.x2])*s
maxx = np.max([self.x1, self.x2])*s
miny = np.min([self.y1, self.y2])*s
maxy = np.max([self.y1, self.y2])*s
if xx<minx or xx>maxx:
l1 = self.canv.create_line(xx, 0, xx, sy//2, tags='roi_%i' % self.nr, fill=self.color)
l2 = self.canv.create_line(xx, sy/
|
/2, xx, sy, tags='roi_%i' % self.nr, fill=self.color)
el
|
se:
l1 = self.canv.create_line(xx, 0, xx, miny, tags='roi_%i' % self.nr, fill=self.color)
l2 = self.canv.create_line(xx, maxy, xx, sy, tags='roi_%i' % self.nr, fill=self.color)
self.lines.append((l1,l2))
def update(self):
self._update()
s = self.canv.scale
sx = int(self.canv.cget('width'))
sy = int(self.canv.cget('height'))
minx = np.min([self.x1, self.x2])*s
maxx = np.max([self.x1, self.x2])*s
miny = np.min([self.y1, self.y2])*s
maxy = np.max([self.y1, self.y2])*s
for l1, l2 in self.lines:
# x = (i+1)*dx/(len(self.lines)+1)
# self.canv.coords(self.lines[i], self.x1+x, self.y1, self.x1+x, self.y2)
ax1,ay1,ax2,ay2 = self.canv.coords(l1)
bx1,by1,bx2,by2 = self.canv.coords(l2)
xx = ax1
if xx<minx or xx>maxx:
self.canv.coords(l1, xx, 0, xx, sy//2)
self.canv.coords(l2, xx, sy//2, xx, sy)
else:
self.canv.coords(l1, xx, 0, xx, miny)
self.canv.coords(l2, xx, maxy, xx, sy)
def getRegionCoords(self):
minx = np.min([self.x1, self.x2])
maxx = np.max([self.x1, self.x2])
miny = np.min([self.y1, self.y2])
maxy = np.max([self.y1, self.y2])
return (minx, miny, maxx, maxy)
class Ellipse(object):
def __init__(self, canv):
self.canv = canv
self.xc = 300
self.yc = 300
self.a = 100
self.b = 50
self.r = np.pi / 4
s = self.canv.scale
pnts = self._poly_oval()
xa, ya, xb, yb = self._getHandlePoints()
self.poly = canv.create_polygon(pnts, fill='', outline='#fff', width=2, smooth=1, tags='poly')
self.p_a = canv.create_circle(xa,ya,5, fill='red', tags='pa')
self.p_b = canv.create_circle(xb,yb,5, fill='red', tags='pb')
self.p_c = canv.create_circle(self.xc*s,self.yc*s,5, fill='red', tags='pc')
def getCoords(self):
return (self.xc, self.yc)
# R_e (half light radius)
def getRe(self):
return np.sqrt(self.a * self.b)
def getAxisRatio(self):
return 1.0 * self.b / self.a
def getPositionAngle(self):
# should be in deg
# is messured from upwards y axis, internal saved from horizontal x axis (+90)
return self.r / np.pi * 180 + 90
def _getHandlePoints(self):
s = self.canv.scale
xa = self.xc + self.a*np.cos(self.r)
ya = self.yc - self.a*np.sin(self.r)
xb = self.xc + self.b*np.cos(
|
eggplantbren/Lensing2
|
src/distances.py
|
Python
|
gpl-3.0
| 4,646
| 0.020017
|
"""
A module to compute cosmological distances, including:
comoving_distance (Dc)
angular_diameter_distance (Da)
luminosity_distance (Dl)
comoving_volume (volume)
"""
c = 299792458.
G = 4.3e-6
from math import pi
import warnings
warnings.warn("Default cosmology is Om=0.3,Ol=0.7,h=0.7,w=-1 and distance units are Mpc!",ImportWarning)
class Distance:
def __init__(self,cosmo=[0.3,0.7,0.7]):
self.OMEGA_M = cosmo[0]
self.OMEGA_L = cosmo[1]
self.h = cosmo[2]
self.w = -1.
self.wpars = None
self.w_analytic = False
self.Dc = self.comoving_distance
self.Dt = self.comoving_transverse_distance
self.Dm = self.comoving_transverse_distance
self.Da = self.angular_diameter_distance
self.Dl = self.luminosity_distance
self.dm = self.distance_modulus
self.volume = self.comoving_volume
def set(self,cosmo):
self.OMEGA_M = cosmo[0]
self.OMEGA_L = cosmo[1]
self.h = cosmo[2]
def reset(self):
self.OMEGA_M = 0.3
self.OMEGA_L = 0.7
self.h = 0.7
self.w = -1.
def age(self,z):
from scipy import integrate
f = lambda zp,m,l,k : (m/zp+k+l*zp**2)**-0.5
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
return (9.778/self.h)*integrate.romberg(f,1e-300,1/(1.+z),(om,ol,ok))
def comoving_distance(self,z1,z2=0.):
from scipy import integrate
if z2<z1:
z1,z2 = z2,z1
def fa(z):
if self.w_analytic==True:
return self.w(z,self.wpars)
from math import exp
wa = lambda z : (1.+self.w(z,self.wpars))/(1.+z)
#return exp(3.*integrate.romberg(wa,0,z))
return exp(3.*integrate.quad(wa,0,z)[0])
if type(self.w)==type(self.comoving_distance) or type(self.w)==type(fa):
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l*fa(z))**-0.5
elif self.w!=-1.:
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l*(1.+z)**(3.*(1.+self.w)))**-0.5
else:
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l)**-0.5
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
# return (c/self.h)*integrate.romberg(f,z1,z2,(om,ol,ok))/1e5
return (c/self.h)*integrate.quad(f,z1,z2,(om,ol,ok))[0]/1e5
def comoving_transverse_distance(self,z1,z2=0.):
dc = 1e5*self.comoving_distance(z1,z2)/(c/self.h)
ok = 1.-self.OMEGA_M-self.OMEGA_L
if ok>0:
from math import sinh,sqrt
dtc = sinh(sqrt(ok)*dc)/sqrt(ok)
elif ok<0:
from math import sin,sqrt
ok *= -1.
dtc = sin(sqrt(ok)*dc)/sqrt(ok)
else:
dtc = dc
return (c/self.h)*dtc/1e5
def angular_diameter_distance(self,z1,z2=0.):
if z2<z1:
z1,z2 = z2,z1
return self.comoving_transverse_distance(z1,z2)/(1.+z2)
def luminosity_distance(self,z):
return (1.+z)*self.comoving_transverse_distance(z)
def comoving_volume(self,z1,z2=0.):
from scipy import integrate
if z2<z1:
z1,z2 = z2,z1
f = lambda z,m,l,k: (self.comoving_distance(0.,z)**2)/((m*(1.+z)**3+k*(1.+z)**2+l)**0.5)
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
return 4*pi*(c/self.h)*integrate.romberg(f,z1,z2,(om,ol,ok))/1e5
def rho_crit(self,z):
H2 = (self.OMEGA_M*(1+z)**3 + self.OMEGA_L)*(self.h/10.)**2
return 3*H2/(8.*pi*G)
def distance_modulus(self,z):
from math import log10
return 5*log10(self.luminosity_distance(z)*1e5)
if __name__ == '__main__':
import numpy as np
Mpc = 3.08567758E22
cosmo = Distance()
zl = 0.4457
zs = 2.379
Dol = cosmo.angular_diameter_distance(0., zl)*Mpc
Dos = cosmo.angular_diameter_distance(0., zs)*Mpc
Dls = cosmo.angular_diameter_distance(zl, zs)*Mpc
# In SI units
Gnewton = 6.67384E-11
Msun = 1.9891E30
# Arcseconds to radians (example of a 5 arcsec Einstein ring)
b = 5.0 / 3600.0 * np.pi/180.0
# Dimensionless mass within the ring (assuming SIE/SIS/point mass)
# When you get the masses of blobs from the code, they replace this m
m = b**2*np.pi
print(m*c*
|
*2*Dos*Dol/(4*np.pi*Gnewton*Dls)/Msun)
# Another way of getting the mass for
|
a 5 arcsec Einstein ring
# Angular einstein radius (arcseconds) of a solar mass
theta_0 = np.sqrt(4.*Gnewton*Msun/c**2*Dls/Dol/Dos)*(180./np.pi)*3600.
print((5.0/theta_0)**2)
# print(1./theta_0**2./np.pi)
|
vmalloc/pydeploy
|
tests/test__sources.py
|
Python
|
bsd-3-clause
| 10,265
| 0.005261
|
import os
import tempfile
from pkg_resources import Requirement
from infi.unittest import parameters
from .test_cases import ForgeTest
from pydeploy.environment import Environment
from pydeploy.environment_utils import EnvironmentUtils
from pydeploy.checkout_cache import CheckoutCache
from pydeploy.installer import Installer
from pydeploy import sources
from pydeploy.scm import git
from pydeploy import command
from pydeploy import exceptions
class SourceTest(ForgeTest):
def setUp(self):
super(SourceTest, self).setUp()
self.env = self.forge.create_mock(Environment)
self.env.installer = self.forge.create_mock(Installer)
self.env.utils = self.forge.create_mock(EnvironmentUtils)
class SourceFromStringTest(ForgeTest):
def setUp(self):
super(SourceFromStringTest, self).setUp()
self.S = sources.Source.from_anything
def test__git(self):
self.assertIsInstance(self.S("git://bla"), sources.Git)
def test__path(self):
filename = tempfile.mkdtemp()
self.assertIsInstance(self.S(filename), sources.Path)
def test__easy_install(self):
self.assertIsInstance(self.S("blablabla"), sources.EasyInstall)
def test__invalid_source(self):
for invalid_value in [2, 2.5, True]:
with self.assertRaises(ValueError):
self.S(invalid_value)
class PathSourceTest(SourceTest):
def setUp(self):
super(PathSourceTest, self).setUp()
self.path = tempfile.mkdtemp()
self.source = sources.Path(self.path)
def test__get_name(self):
self.assertEquals(self.source.get_name(), self.path)
def test__uses_expanduser(self):
source = sources.Path("~/a/b/c")
self.assertEquals(source._param, os.path.expanduser("~/a/b/c"))
def test__get_signature(self):
self.assertEquals(self.source.get_signature(), "Path({0})".format(self.path))
def test__checkout(self):
self.assertEquals(self.source.checkout(self.env), self.path)
with self.assertRaises(NotImplementedError):
self.source.checkout(self.env, '/another/path')
@parameters.toggle('reinstall')
def test__install(self, reinstall):
self.env.installer.install_unpacked_package(self.path, self.path, reinstall=reinstall)
self.forge.replay()
self.source.install(self.env, reinstall=reinstall)
class DelegateToPathInstallTest(SourceTest):
def setUp(self):
super(DelegateToPathInstallTest, self).setUp()
self.path_class = self.forge.create_class_mock(sources.Path)
self.orig_path_class = sources.Path
self.forge.replace_with(sources, "Path", self.path_class)
def expect_delegation_to_path_install(self, path, name, reinstall):
|
path_mock = self.forge.create_mock(self.orig_path_class)
self.path_class(path, name=name).and_return(path_mock)
return path_mock.install(self.env, reinstall=reinstall)
class GitSourceTest(DelegateToPathInstallTest):
def setUp(self):
super(GitSourc
|
eTest, self).setUp()
self.repo_url = "some/repo/url"
self.branch = 'some_branch'
self.source = sources.Git(self.repo_url, self.branch)
self.forge.replace_many(git, "clone_to_or_update", "reset_submodules")
def test__master_is_default_branch(self):
self.assertEquals(sources.Git('bla')._branch, 'master')
def test__get_name(self):
self.assertEquals(self.source.get_name(), self.repo_url + "@" + self.branch)
def test__repr(self):
self.assertEquals(repr(self.source), 'Git({})'.format(self.source.get_name()))
def test__get_signature(self):
self.assertEquals(self.source.get_signature(), repr(self.source))
@parameters.toggle('reinstall')
def test__git_source_install(self, reinstall):
self.forge.replace(self.source, "checkout")
checkout_path = "some/checkout/path"
self.source.checkout(self.env).and_return(checkout_path)
self.expect_delegation_to_path_install(checkout_path, name=self.repo_url, reinstall=reinstall)
with self.forge.verified_replay_context():
self.source.install(self.env, reinstall=reinstall)
def test__git_source_checkout_with_path_argument(self):
checkout_path = "/some/path/to/checkout"
git.clone_to_or_update(url=self.repo_url, path=checkout_path, branch=self.branch)
git.reset_submodules(checkout_path)
with self.forge.verified_replay_context():
result = self.source.checkout(self.env, checkout_path)
self.assertIs(result, checkout_path)
def test__git_source_checkout_no_path_argument(self):
checkout_path = "/some/path/to/checkout"
checkout_cache = self.forge.create_mock(CheckoutCache)
self.env.get_checkout_cache().and_return(checkout_cache)
checkout_cache.get_checkout_path(self.repo_url).and_return(checkout_path)
git.clone_to_or_update(url=self.repo_url, branch=self.branch, path=checkout_path)
git.reset_submodules(checkout_path)
with self.forge.verified_replay_context():
result = self.source.checkout(self.env)
self.assertIs(result, checkout_path)
def test__git_identifies_git_prefix(self):
url = "git://bla"
source = sources.Source.from_anything(url)
self.assertIsInstance(source, sources.Git)
class GitContraintsTest(ForgeTest):
def setUp(self):
super(GitContraintsTest, self).setUp()
self.forge.replace(git, "get_remote_references_dict")
self.url = "some_url"
self.source = sources.Git(self.url)
def test__more_than_one_constraint_not_supported(self):
with self.assertRaises(NotImplementedError):
self.source.resolve_constraints([('>=', '2.0.0'), ('<=', '3.0.0')])
@parameters.iterate('tag', ['v2.0.0', '2.0.0'])
def test__exact_version_matches_tag(self, tag):
self._assert_chooses("x==2.0.0", {
git.Tag(tag) : "some_hash"
}, 'tags/{}'.format(tag))
def test__exact_version_with_no_match_raises_exception(self):
self._assert_no_match('x==2.0.0', {
git.Tag('bla') : 'h1',
git.Branch('bloop') : 'h2'
})
@parameters.iterate('branch_name', ['v2.0.0', '2.0.0'])
def test__minimum_version_inclusive_selects_exact(self, branch_name):
self._assert_chooses("x>=2.0.0", {
git.Branch(branch_name) : "h1"
}, branch_name)
@parameters.toggle('inclusive')
@parameters.iterate('branch_name', ['3.0.0', 'v3.0.0', '2.3.2', 'v2.3'])
def test__minimum_version_with_matches(self, inclusive, branch_name):
self._assert_chooses("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch(branch_name)
}, branch_name)
@parameters.toggle('inclusive')
@parameters.iterate('branch_name', ['2.0.0-a1', 'v2.0.0-b1', 'v1.9'])
def test__minimum_version_without_matches(self, inclusive, branch_name):
self._assert_no_match("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch(branch_name)
})
@parameters.toggle('inclusive')
def test__unbound_version_takes_from_master(self, inclusive):
self._assert_chooses("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch('master')
}, 'master')
def _assert_chooses(self, requirement, options, chosen):
requirement = Requirement.parse(requirement)
git.get_remote_references_dict(self.url).and_return(options)
self.forge.replay()
new_source = self.source.resolve_constraints(requirement.specs)
self.assertIsInstance(new_source, sources.Git)
self.assertEquals(new_source._url, self.url)
self.assertEquals(new_source._branch, chosen)
def _assert_no_match(self, requirement, options):
specs = Requirement.parse(requirement).specs
git.get_remote_references_dict(self.url).and_return(options)
self.forge.replay()
with self.assertRaises(exceptions.RequiredVersionNotFound):
self.source.resolve_constraints(specs)
class ExternalToolSourceTest(SourceTest):
d
|
werbk/task-2.1
|
tests_contract/contract_lib.py
|
Python
|
apache-2.0
| 5,896
| 0.002035
|
from TestBase import BaseClass
class ContractBase(BaseClass):
def add_contract(self):
wd = self.wd
wd.find_element_by_link_text("add new").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys()
def add_full_name(self, first_name=None, middle_name=None, last_name=None, nickname=None):
wd = self.wd
if first_name:
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("%s" % first_name)
if middle_name:
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("%s" % middle_name)
if last_name:
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("%s" % last_name)
if nickname:
wd.fin
|
d_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("%s" % nickname)
def add_title(self, title):
wd = self.wd
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys("%s" % title)
def add_company(self, company_name):
wd = self.wd
wd.find_element_by_name("company").cli
|
ck()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("%s" % company_name)
def add_address(self, address_name):
wd = self.wd
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("%s" % address_name)
def add_phone_number(self, home=None, mobile=None, work=None, fax=None):
wd = self.wd
if home:
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("%s" % home)
if mobile:
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("%s" % mobile)
if work:
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("%s" % work)
if fax:
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("%s" % fax)
def add_email(self, email1=None, email2=None, email3=None):
wd = self.wd
if email1:
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys("%s" % email1)
if email2:
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys("%s" % email2)
if email3:
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys("%s" % email3)
def add_homepage(self, homepage=None):
wd = self.wd
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys("%s" % homepage)
def add_year(self):
wd = self.wd
# in futures we can made function where we will sent date and it choose it with similar way as previous
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys("1999")
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys("1999")
def add_secondary_adress(self, address):
wd = self.wd
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys("%s" % address)
def add_secondary_home(self, phone):
wd = self.wd
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys("%s" % phone)
def add_secondary_notes(self, notes):
wd = self.wd
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("%s" % notes)
def restore_contract(self):
self.wd.find_element_by_link_text("Logout").click()
self.wd.quit()
|
rbarlow/ari-backup
|
ari_backup/zfs.py
|
Python
|
bsd-3-clause
| 9,110
| 0.003952
|
"""ZFS based backup workflows."""
import datetime
import shlex
import gflags
import lvm
import workflow
FLAGS = gflags.FLAGS
gflags.DEFINE_string('rsync_options',
'--archive --acls --numeric-ids --delete --inplace',
'rsync command options')
gflags.DEFINE_string('rsync_path', '/usr/bin/rsync', 'path to rsync binary')
gflags.DEFINE_string('zfs_snapshot_prefix', 'ari-backup-',
'prefix for historical ZFS snapshots')
gflags.DEFINE_string('zfs_snapshot_timestamp_format', '%Y-%m-%d--%H%M',
'strftime() formatted timestamp used when naming new ZFS snapshots')
class ZFSLVMBackup(lvm.LVMSourceMixIn, workflow.BaseWorkflow):
"""Workflow for backing up a logical volume to a ZFS dataset.
Data is copied from and LVM snapshot to a ZFS dataset using rsync and then
ZFS commands are issued to create historical snapshots. The ZFS snapshot
lifecycle is also managed by this class. When a backup completes, snapshots
older than snapshot_expiration_days are destroyed.
This approach has some benefits over rdiff-backup in that all backup
datapoints are easily browseable and replication of the backup data using
ZFS streams is generally less resource intensive than using something like
rsync to mirror the files created by rdiff-backup.
One downside is that it's easier to store all file metadata using
rdiff-backup. Rsync can only store metadata for files that the destination
file system can also store. For example, if extended file system
attributes are used on the source file system, but aren't available on the
destination, rdiff-backup will still record those attributes in its own
files. If faced with that same scenario, rsync would lose those attributes.
Furthermore, rsync must have root privilege to write arbitrary file
metadata.
New post-job hooks are added for creating ZFS snapshots and trimming old
ones.
"""
def __init__(self, label,
|
source_hostname, rsync_dst, zfs_hostname,
dataset_name, snapshot_expiration_days, **kwa
|
rgs):
"""Configure a ZFSLVMBackup object.
Args:
label: str, label for the backup job (e.g. database-server1).
source_hostname: str, the name of the host with the source data to
backup.
rsync_dst: str, the destination argument for the rsync command line
(e.g. backupbox:/backup-store/database-server1).
zfs_hostname: str, the name of the backup destination host where we will
be managing the ZFS snapshots.
dataset_name: str, the full ZFS path (not file system path) to the
dataset holding the backups for this job
(e.g. tank/backup-store/database-server1).
snapshot_expiration_days: int, the maxmium age of a ZFS snapshot in days.
Pro tip: It's a good practice to reuse the label argument as the last
path component in the rsync_dst and dataset_name arguments.
"""
# Call our super class's constructor to enable LVM snapshot management
super(ZFSLVMBackup, self).__init__(label, **kwargs)
# Assign instance vars specific to this class.
self.source_hostname = source_hostname
self.rsync_dst = rsync_dst
self.zfs_hostname = zfs_hostname
self.dataset_name = dataset_name
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.rsync_options = FLAGS.rsync_options
self.rsync_path = FLAGS.rsync_path
self.zfs_snapshot_prefix = FLAGS.zfs_snapshot_prefix
self.zfs_snapshot_timestamp_format = FLAGS.zfs_snapshot_timestamp_format
self.add_post_hook(self._create_zfs_snapshot)
self.add_post_hook(self._destroy_expired_zfs_snapshots,
{'days': snapshot_expiration_days})
def _get_current_datetime(self):
"""Returns datetime object with the current date and time.
This method is mostly useful for testing purposes.
"""
return datetime.datetime.now()
def _run_custom_workflow(self):
"""Run rsync backup of LVM snapshot to ZFS dataset."""
# TODO(jpwoodbu) Consider throwing an exception if we see things in the
# include or exclude lists since we don't use them in this class.
self.logger.debug('ZFSLVMBackup._run_custom_workflow started.')
# Since we're dealing with ZFS datasets, let's always exclude the .zfs
# directory in our rsync options.
rsync_options = shlex.split(self.rsync_options) + ['--exclude', '/.zfs']
# We add a trailing slash to the src path otherwise rsync will make a
# subdirectory at the destination, even if the destination is already a
# directory.
rsync_src = self._snapshot_mount_point_base_path + '/'
command = [self.rsync_path] + rsync_options + [rsync_src, self.rsync_dst]
self.run_command(command, self.source_hostname)
self.logger.debug('ZFSLVMBackup._run_custom_workflow completed.')
def _create_zfs_snapshot(self, error_case):
"""Creates a new ZFS snapshot of our destination dataset.
The name of the snapshot will include the zfs_snapshot_prefix provided by
FLAGS and a timestamp. The zfs_snapshot_prefix is used by
_remove_zfs_snapshots_older_than() when deciding which snapshots to
destroy. The timestamp encoded in a snapshot name is only for end-user
convenience. The creation metadata on the ZFS snapshot is what is used to
determine a snapshot's age.
This method does nothing if error_case is True.
Args:
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Creating ZFS snapshot...')
timestamp = self._get_current_datetime().strftime(
self.zfs_snapshot_timestamp_format)
snapshot_name = self.zfs_snapshot_prefix + timestamp
snapshot_path = '{dataset_name}@{snapshot_name}'.format(
dataset_name=self.dataset_name, snapshot_name=snapshot_name)
command = ['zfs', 'snapshot', snapshot_path]
self.run_command(command, self.zfs_hostname)
def _find_snapshots_older_than(self, days):
"""Returns snapshots older than the given number of days.
Only snapshots that meet the following criteria are returned:
1. They were created at least "days" ago.
2. Their name is prefixed with FLAGS.zfs_snapshot_prefix.
Args:
days: int, the minimum age of the snapshots in days.
Returns:
A list of filtered snapshots.
"""
expiration = self._get_current_datetime() - datetime.timedelta(days=days)
# Let's find all the snapshots for this dataset.
command = ['zfs', 'get', '-rH', '-o', 'name,value', 'type',
self.dataset_name]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
snapshots = list()
# Sometimes we get extra lines which are empty, so we'll strip the lines.
for line in stdout.strip().splitlines():
name, dataset_type = line.split('\t')
if dataset_type == 'snapshot':
# Let's try to only consider destroying snapshots made by us ;)
if name.split('@')[1].startswith(self.zfs_snapshot_prefix):
snapshots.append(name)
expired_snapshots = list()
for snapshot in snapshots:
creation_time = self._get_snapshot_creation_time(snapshot)
if creation_time <= expiration:
expired_snapshots.append(snapshot)
return expired_snapshots
def _get_snapshot_creation_time(self, snapshot):
"""Gets the creation time of a snapshot as a Python datetime object
Args:
snapshot: str, the fule ZFS path to the snapshot.
Returns:
A datetime object representing the creation time of the snapshot.
"""
command = ['zfs', 'get', '-H', '-o', 'value', 'creation', snapshot]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
return datetime.datetime.strptime(stdout.strip(), '%a %b %d %H:%M %Y')
def _destroy_expired_zfs_snapshots(self, days, error_case):
"""Destroy snapshots older than the given numnber of days.
Any snapshots in the target dataset with a name that starts with
FLAGS.zfs_snapshot_prefix and a creation date older than days will be
destroyed. Depe
|
lilsweetcaligula/Online-Judges
|
hackerrank/algorithms/implementation/easy/kangaroo/py/solution.py
|
Python
|
mit
| 215
| 0.009302
|
#!/bin/python3
import sys
x1, v1, x2, v2 = map(int, input().strip()
|
.split(' '))
willLand = (
v1 != v2
and (x1 - x2) % (v2 - v1) == 0
and (x1 - x2) // (v2 - v1) >= 0)
print(('NO'
|
, 'YES')[willLand])
|
sergeneren/anima
|
anima/ui/ui_compiled/version_updater_UI_pyside2.py
|
Python
|
bsd-2-clause
| 3,651
| 0.003835
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files\version_updater.ui'
#
# Created: Thu Nov 10 15:32:30 2016
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(1304, 753)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.versions_treeView = QtWidgets.QTreeView(Dialog)
self.versions_treeView.setObjectName("versions_treeView")
self.verticalLayout.addWidget(self.versions_treeView)
self.horizontalWidget = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalWidget.sizePolicy().hasHeightForWidth())
self.horizontalWidget.setSizePolicy(sizePolicy)
self.horizontalWidget.setObjectName("horizontalWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.selectNone_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.selectNone_pushButton.setObjectName("selectNone_pushButton")
self.horizontalLayout.addWidget(self.selectNone_pushButton)
self.selectAll_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.selectAll_pushButton.setObjectName("selectAll_pushButton")
self.horizontalLayout.addWidget(self.selectAll_pushButton)
self.update_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.update_pushButton.setObjectName("update_pushButton")
self.horizontalLayout.addWidget
|
(self.update_pushButton)
self.cancel_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.cancel_pushButton.setObjectName("cancel_pushButton")
self.horizontalLayout.addWidget(self.cancel_pushButton)
|
self.verticalLayout.addWidget(self.horizontalWidget)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Version Updater", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Dialog", "<html><head/><body><p><span style=\" color:#c00000;\">Red Versions need update,</span><span style=\" color:#00c000;\">Greens are OK</span>, check the Versions that you want to trigger an update.</p></body></html>", None, -1))
self.selectNone_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Select None", None, -1))
self.selectAll_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Select All", None, -1))
self.update_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Update", None, -1))
self.cancel_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Cancel", None, -1))
|
biocore/american-gut-rest
|
agr/schema.py
|
Python
|
bsd-3-clause
| 5,875
| 0.00034
|
# ------------------------------
|
----------------------------------------------
# Copyright (c) 2011-2015, The American Gut Development Team.
#
# Distributed under the terms of the Modified BSD Licen
|
se.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import agr
# table definitions, these are of the form: [(table_name, table_definition)].
# the motivation for this structure is to allow for checking if tables exist
# easily (see schema_is_sane)
tables = [
('biom',
"""create table biom (
sample varchar,
biom json,
biomtxt text,
constraint pk_biom primary key(sample)
)"""),
('metadata',
"""create table metadata (
sample varchar,
category varchar,
value varchar,
constraint pk_metadata primary key (sample, category),
constraint fk_metadata foreign key (sample) references biom(sample)
)"""),
('fastq',
"""create table fastq (
sample varchar,
url varchar,
constraint pk_fastq primary key (sample),
constraint fk_fastq foreign key (sample) references biom(sample),
constraint uc_fastq unique (url)
)"""),
('state',
"""create table state (
biom_sha varchar)""")
]
def database_connectivity(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if we can connect to the database
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
Returns
-------
bool
True if a connection was made, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def database_exists(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if the database exists
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
dbname : str
The name of the database to connect to
Returns
-------
bool
True if the database exists, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def schema_is_sane():
"""Check to see if the expected tables exist
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that the table
names exist.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
The expected tables appear to exist
"""
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("""select exists(select *
from information_schema.tables
where table_name=%s)""", [table_name])
if not cur.fetchone()[0]:
return False
return True
def schema_has_data():
"""Check to see if the schema appears to have data
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that there
appears to be rows in the tables.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
If all of the tables appear to have data.
"""
if not schema_is_sane():
return False
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("select count(1) from %s" % table_name)
if cur.fetchone()[0] == 0:
return False
return True
def create_database():
"""Create the database and the schema
Notes
-----
Assumes we have connectivity.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
cur.execute('drop database if exists ag_rest')
cur.execute('create database %s' % agr.db_name)
cur.close()
c.close()
create_tables()
def create_tables():
"""Create the tables"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host, dbname=agr.db_name)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
for _, table in tables:
cur.execute(table)
if __name__ == '__main__':
import sys
if not database_connectivity():
sys.stderr.write("Cannot connect to the database\n")
sys.exit(1)
if not agr.test_environment:
if sys.argv[1] == 'FORCE_CREATE_TABLES':
create_tables()
sys.exit(0)
else:
sys.stderr.write("This does not appear to be a test environment\n")
sys.exit(1)
if database_exists() and schema_is_sane() and schema_has_data():
sys.exit(0)
else:
create_database()
|
raphui/barebox
|
scripts/remote/messages.py
|
Python
|
gpl-2.0
| 4,045
| 0.000247
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import struct
class BBType(object):
command = 1
command_return = 2
consolemsg = 3
ping = 4
pong = 5
getenv = 6
getenv_return = 7
fs = 8
fs_return = 9
class BBPacket(object):
def __init__(self, p_type=0, p_flags=0, payload="", raw=None):
self.p_type = p_type
self.p_flags = p_flags
if raw is not None:
self.unpack(raw)
else:
self.payload = payload
def __repr__(self):
return "BBPacket(%i, %i)" % (self.p_type, self.p_flags)
def _unpack_payload(self, data):
self.payload = data
def _pack_payload(self):
return self.payload
def unpack(self, data):
self.p_type, self.p_flags = struct.unpack("!HH", data[:4])
self._unpack_payload(data[4:])
def pack(self):
return struct.pack("!HH", self.p_type, self.p_flags) + \
self._pack_payload()
class BBPacketCommand(BBPacket):
def __init__(self, raw=None, cmd=None):
self.cmd = cmd
super(BBPacketCommand, self).__init__(BBType.command, raw=raw)
def __repr__(self):
return "BBPacketCommand(cmd=%r)" % self.cmd
def _unpack_payload(self, payload):
self.cmd = payload
def _pack_payload(self):
return self.cmd
class BBPacketCommandReturn(BBPacket):
def __init__(self, raw=None, exit_code=None):
self.exit_code = exit_code
super(BBPacketCommandReturn, self).__init__(BBType.command_return,
raw=raw)
def __repr__(self):
return "BBPacketCommandReturn(exit_code=%i)" % self.exit_code
def _unpack_payload(self, data):
sel
|
f.exit_code, = struct.unpack("!L", data[:4])
def _pack_payload(self):
return struct.pack("!L", self.exit_code)
class BBPacketConsoleMsg(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketConsoleMsg, self).__init__(BBType.consolemsg, raw=raw)
def __repr__(self):
return "BBPacketConsoleMsg(text=%r)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_pa
|
yload(self):
return self.text
class BBPacketPing(BBPacket):
def __init__(self, raw=None):
super(BBPacketPing, self).__init__(BBType.ping, raw=raw)
def __repr__(self):
return "BBPacketPing()"
class BBPacketPong(BBPacket):
def __init__(self, raw=None):
super(BBPacketPong, self).__init__(BBType.pong, raw=raw)
def __repr__(self):
return "BBPacketPong()"
class BBPacketGetenv(BBPacket):
def __init__(self, raw=None, varname=None):
self.varname = varname
super(BBPacketGetenv, self).__init__(BBType.getenv, raw=raw)
def __repr__(self):
return "BBPacketGetenv(varname=%r)" % self.varname
def _unpack_payload(self, payload):
self.varname = payload
def _pack_payload(self):
return self.varname
class BBPacketGetenvReturn(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketGetenvReturn, self).__init__(BBType.getenv_return,
raw=raw)
def __repr__(self):
return "BBPacketGetenvReturn(varvalue=%s)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_payload(self):
return self.text
class BBPacketFS(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFS, self).__init__(BBType.fs, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFS(payload=%r)" % self.payload
class BBPacketFSReturn(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFSReturn, self).__init__(BBType.fs_return, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFSReturn(payload=%r)" % self.payload
|
icchy/tracecorn
|
unitracer/lib/windows/amd64/gdi32.py
|
Python
|
mit
| 12,573
| 0.021236
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for gdi32.dll in ctypes.
"""
__revision__ = "$Id: gdi32.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
from kernel32 import GetLastError, SetLastError
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Helpers ------------------------------------------------------------------
#--- Types --------------------------------------------------------------------
#--- Constants ----------------------------------------------------------------
# GDI object types
OBJ_PEN = 1
OBJ_BRUSH = 2
OBJ_DC = 3
OBJ_METADC = 4
OBJ_PAL = 5
OBJ_FONT = 6
OBJ_BITMAP = 7
OBJ_REGION = 8
OBJ_METAFILE = 9
OBJ_MEMDC = 10
OBJ_EXTPEN = 11
OBJ_ENHMETADC = 12
OBJ_ENHMETAFILE = 13
OBJ_COLORSPACE = 14
GDI_OBJ_LAST = OBJ_COLORSPACE
# Ternary raster operations
SRCCOPY = 0x00CC0020 # dest = source
SRCPAINT = 0x00EE0086 # dest = source OR dest
SRCAND = 0x008800C6 # dest = source AND dest
SRCINVERT = 0x00660046 # dest = source XOR dest
SRCERASE = 0x00440328 # dest = source AND (NOT dest)
NOTSRCCOPY = 0x00330008 # dest = (NOT source)
NOTSRCERASE = 0x001100A6 # dest = (NOT src) AND (NOT dest)
MERGECOPY = 0x00C000CA # dest = (source AND pattern)
MERGEPAINT = 0x00BB0226 # dest = (NOT source) OR dest
PATCOPY = 0x00F00021 # dest = pattern
PATPAINT = 0x00FB0A09 # dest = DPSnoo
PATINVERT = 0x005A0049 # dest = pattern XOR dest
DSTINVERT = 0x00550009 # dest = (NOT dest)
BLACKNESS = 0x00000042 # dest = BLACK
WHITENESS = 0x00FF0062 # dest = WHITE
NOMIRRORBITMAP = 0x80000000 # Do not Mirror the bitmap in this call
CAPTUREBLT = 0x40000000 # Include layered windows
# Region flags
ERROR = 0
NULLREGION = 1
SIMPLEREGION = 2
COMPLEXREGION = 3
RGN_ERROR = ERROR
# CombineRgn() styles
RGN_AND = 1
RGN_OR = 2
RGN_XOR = 3
RGN_DIFF = 4
RGN_COPY = 5
RGN_MIN = RGN_AND
RGN_MAX = RGN_COPY
# StretchBlt() modes
BLACKONWHITE = 1
WHITEONBLACK = 2
COLORONCOLOR = 3
HALFTONE = 4
MAXSTRETCHBLTMODE = 4
STRETCH_ANDSCANS = BLACKONWHITE
STRETCH_ORSCANS = WHITEONBLACK
STRETCH_DELETESCANS = COLORONCOLOR
STRETCH_HALFTONE = HALFTONE
# PolyFill() modes
ALTERNATE = 1
WINDING = 2
POLYFILL_LAST = 2
# Layout orientation options
LAYOUT_RTL = 0x00000001 # Right to left
LAYOUT_BTT = 0x00000002 # Bottom to top
LAYOUT_VBH = 0x00000004 # Vertical before horizontal
LAYOUT_ORIENTATIONMASK = LAYOUT_RTL + LAYOUT_BTT + LAYOUT_VBH
LAYOUT_BITMAPORIENTATIONPRESERVED = 0x00000008
# Stock objects
WHITE_BRUSH = 0
LTGRAY_BRUSH = 1
GRAY_BRUSH = 2
DKGRAY_BRUSH = 3
BLACK_BRUSH = 4
NULL_BRUSH = 5
HOLLOW_BRUSH = NULL_BRUSH
WHITE_PEN = 6
BLACK_PEN = 7
NULL_PEN = 8
OEM_FIXED_FONT = 10
ANSI_FIXED_FONT = 11
ANSI_VAR_FONT = 12
SYSTEM_FONT = 13
DEVICE_DEFAULT_FONT = 14
DEFAULT_PALETTE = 15
SYSTEM_FIXED_FONT = 16
# Metafile functions
META_SETBKCOLOR = 0x0201
META_SETBKMODE = 0x0102
META_SETMAPMODE = 0x0103
META_SETROP2 = 0x0104
META_SETRELABS = 0x0105
META_SETPOLYFILLMODE = 0x0106
META_SETSTRETCHBLTMODE = 0x0107
META_SETTEXTCHAREXTRA = 0x0108
META_SETTEXTCOLOR = 0x0209
META_SETTEXTJUSTIFICATION = 0x020A
META_SETWINDOWORG = 0x020B
META_SETWINDOWEXT = 0x020C
META_SETVIEWPORTORG = 0x020D
META_SETVIEWPORTEXT = 0x020E
META_OFFSETWINDOWORG = 0x020F
META_SCALEWINDOWEXT = 0x0410
META_OFFSETVIEWPORTORG = 0x0211
META_SCALEVIEWPORTEXT = 0x0412
META_LINETO = 0x0213
META_MOVETO = 0x0214
META_EXCLUDECLIPRECT = 0x0415
META_INTERSECTCLIPRECT = 0x0416
META_ARC = 0x0817
META_ELLIPSE = 0x0418
META_FLOODFILL = 0x0419
META_PIE = 0x081A
META_RECTANGLE = 0x041B
META_ROUNDRECT = 0x061C
META_PATBLT = 0x061D
META_SAVEDC = 0x001E
META_SETPIXEL = 0x041F
META_OFFSETCLIPRGN = 0x0220
META_TEXTOUT = 0x0521
META_BITBLT = 0x0922
META_STRETCHBLT = 0x0B23
META_POLYGON = 0x0324
META_POLYLINE = 0x0325
META_ESCAPE = 0x0626
META_RESTOREDC = 0x0127
META_FILLREGION = 0x0228
META_FRAMEREGION = 0x0429
META_INVERTREGION = 0x012A
META_PAINTREGION = 0x012B
META_SELECTCLIPREGION = 0x012C
META_SELECTOBJECT
|
= 0x012D
META
|
_SETTEXTALIGN = 0x012E
META_CHORD = 0x0830
META_SETMAPPERFLAGS = 0x0231
META_EXTTEXTOUT = 0x0a32
META_SETDIBTODEV = 0x0d33
META_SELECTPALETTE = 0x0234
META_REALIZEPALETTE = 0x0035
META_ANIMATEPALETTE = 0x0436
META_SETPALENTRIES = 0x0037
META_POLYPOLYGON = 0x0538
META_RESIZEPALETTE = 0x0139
META_DIBBITBLT = 0x0940
META_DIBSTRETCHBLT = 0x0b41
META_DIBCREATEPATTERNBRUSH = 0x0142
META_STRETCHDIB = 0x0f43
META_EXTFLOODFILL = 0x0548
META_SETLAYOUT = 0x0149
META_DELETEOBJECT = 0x01f0
META_CREATEPALETTE = 0x00f7
META_CREATEPATTERNBRUSH = 0x01F9
META_CREATEPENINDIRECT = 0x02FA
META_CREATEFONTINDIRECT = 0x02FB
META_CREATEBRUSHINDIRECT = 0x02FC
META_CREATEREGION = 0x06FF
# Metafile escape codes
NEWFRAME = 1
ABORTDOC = 2
NEXTBAND = 3
SETCOLORTABLE = 4
GETCOLORTABLE
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
grabbing/notes.py
|
Python
|
agpl-3.0
| 5,474
| 0.011874
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import time, os
import sys, fileinput
from bs4 import BeautifulSoup
class Grabber(object):
def use(self):
print ""
print "* This just Fucking whatever for grabbing."
print "* For license just fucking to change this. ^Summon Agus Created."
print "-------------------------------------------------------------------------------------"
print "[1] Add Note : ./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
print "[2] List Note : ./note
|
s.py listnote <file_name>"
print "[3] Delete Note : ./notes.py delnote <file_name> <numb_line>"
print "[4] Add Url to Grab : ./notes.py addurl <file_name> <url>"
print "-------------------------------------------------------------------------------------"
|
print ""
def addnote(self, args):
self.help = "./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
if len(sys.argv) < 5:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = sys.argv[2]
title = sys.argv[3]
content = sys.argv[4]
tags = sys.argv[5]
print "[+] Your args is: ./notes.py", args, f_note_out, title, content, tags
time.sleep(1)
print "[>] Waiting for save your note ..."
my_note = '"'+title+'": "'+content+'"'+ ' tag: '+ tags
""" [?] Trying if file was exists, so note will add in new line.
[?] But, if file is doesn't exists, this program will automatically write file with your first argument.
"""
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your note was saved in <"+ f_note_out +">"
def listnote(self, args):
self.help = "./notes.py listnote <file_name>"
if len(sys.argv) < 2:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
print "[+] Your args is: ./notes.py", args, sys.argv[2]
try:
with open(sys.argv[2], "r") as f:
print " -------------------------------------- "
for line in f:
print line.replace("\n", "")
time.sleep(0.3)
print " -------------------------------------- "
except IOError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def delnote(self, args):
self.help = "./notes.py delnote <file_name> <numb_line>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = str(sys.argv[2])
try:
for numb, line in enumerate(fileinput.input(f_note_out, inplace=True)): #start index from 0
if numb == int(sys.argv[3]):
continue
else:
sys.stdout.write(line)
sys.exit("[+] Success delete line <"+sys.argv[3]+"> in file of <"+ f_note_out +">")
except OSError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def addurl(self, args):
self.help = "./notes.py addurl <file_name> <url>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
url = str(sys.argv[3])
f_note_out = str(sys.argv[2])
print "[+] Your args is: ./notes.py", args, f_note_out, url
agent = {'User-Agent':'Mozilla/5.0'}
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
soup = BeautifulSoup(page)
title = soup.title.string.encode('utf-8')
descriptions = soup.findAll(attrs={"name":"description"})[0]['content'].encode('utf-8')
keywords = soup.findAll(attrs={"name":"keywords"})[0]['content'].encode('utf-8')
print "[>] Waiting for save your note ..."
time.sleep(1)
my_note = '"'+title+'": "'+descriptions+'"'+ ' tag: '+ keywords
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your url note was saved in <"+ f_note_out +">"
if __name__ == "__main__":
mome = Grabber()
try:
args = str(sys.argv[1])
if args == 'addnote':
mome.addnote(args)
elif args == 'listnote':
mome.listnote(args)
elif args == 'delnote':
mome.delnote(args)
elif args == 'addurl':
mome.addurl(args)
else:
print "Funcking damn!, please checkout your input"
except IndexError:
mome.use()
|
altair-viz/altair
|
altair/vegalite/v3/schema/channels.py
|
Python
|
bsd-3-clause
| 263,403
| 0.006124
|
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
from . import core
import pandas as pd
from altair.utils.schemapi import Undefined
from altair.utils import parse_shorthand
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
shorthand = self._get('shorthand')
field = self._get('field')
if shorthand is not Undefined and field is not Undefined:
raise ValueError("{} specifies both shorthand={} and field={}. "
"".format(self.__class__.__name__, shorthand, field))
if isinstance(shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(sh, **kwds).to_dict(validate=validate, ignore=ignore, context=context)
for sh in shorthand]
if shorthand is Undefined:
parsed = {}
elif isinstance(shorthand, str):
parsed = parse_shorthand(shorthand, data=context.get('data', None))
type_required = 'type' in self._kwds
type_in_shorthand = 'type' in parsed
type_defined_explicitly = self._get('type') is not Undefined
if not type_required:
# Secondary field names don't require a type argument in VegaLite 3+.
# We still parse it out of the shorthand, but drop it here.
parsed.pop('type', None)
elif not (type_in_shorthand or type_defined_explicitly):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(shorthand))
else:
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be automatically inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(shorthand))
else:
# Shorthand is not a string; we pass the definition to field,
# and do not do any parsing.
parsed = {'field': shorthand}
# Set shorthand to Undefined, because it's not part of the base schema.
self.shorthand = Undefined
self._kwds.update({k: v for k, v in parsed.items()
if self._get(k) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy(deep=['condition'])
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class DatumChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
datum = getattr(self, 'datum', Undefined)
copy = self # don't copy unless we need to
if datum is not Undefined:
if isinstance(datum, core.SchemaBase):
pass
return super(DatumChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class Color(FieldChannelMixin, core.StringFieldDefWithCondition):
"""Color schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : st
|
ring
shorthand for field, aggregate, and type
|
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.