repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
OutOfOrder/sshproxy
|
lib/console_extra/__init__.py
|
Python
|
gpl-2.0
| 1,214
| 0.001647
|
#!/usr/bin/env python
# -*- coding: ISO-8859-15 -*-
#
# Copyright (C) 2005-2007 David Guerizec <david@guerizec.net>
#
# Last modified: 2006 Sep 02, 01:40:01 by david
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy
|
of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
__plugin_name__ = "Console Extra Commands"
__description__ = """
This plugin offers several new commands for the console session:
- open user@site
Open a shell session on user@site.
- run u
|
ser@site cmd args...
Run a command remotely on user@site.
"""
def __init_plugin__():
import commands
|
paulrouget/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/doc/en/example/py2py3/conftest.py
|
Python
|
mpl-2.0
| 324
| 0
|
im
|
port sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.colle
|
ct.File):
def collect(self):
return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)
|
pandas-dev/pandas
|
pandas/tests/io/parser/test_skiprows.py
|
Python
|
bsd-3-clause
| 7,845
| 0.001147
|
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
from io import StringIO
import numpy as np
import pytest
from pandas.errors import EmptyDataError
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
# XFAIL ME PLS once hanging tests issues identified
pytestmark = pytest.mark.usefixtures("pyarrow_skip")
@pytest.mark.parametrize("skiprows", [list(range(6)), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(
StringIO(text), skiprows=skiprows, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join(
[",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)]
)
condensed_data = "a,b,c\n" + "\n".join(
[",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]]
)
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(
StringIO(text), skiprows=6, header=None, index_col=0, parse_dates=True
)
index = Index(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0
)
expected = DataFrame(
np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index
)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
{"skiprows": [1]},
DataFrame(
[[2, "line 21\nline 22", 2], [3, "line 31", 1]],
columns=["id", "text", "num_lines"],
),
),
(
"a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
{"quotechar": "~", "skiprows": [2]},
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]),
),
(
(
"Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"
),
{"quotechar": "~", "skiprows": [1, 3]},
DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]),
),
],
)
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2], [3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,exp_data",
[
(
"""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]],
),
(
"""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2], [3, "line '31
|
\n' \r\tline 32", 1]],
),
],
)
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
|
@pytest.mark.parametrize(
"lineterminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR"
)
def test_skiprows_lineterminator(all_parsers, lineterminator, request):
# see gh-9079
parser = all_parsers
data = "\n".join(
[
"SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M ",
]
)
expected = DataFrame(
[
["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"],
],
columns=["date", "time", "var", "flag", "oflag"],
)
if parser.engine == "python" and lineterminator == "\r":
mark = pytest.mark.xfail(reason="'CR' not respect with the Python parser yet")
request.node.add_marker(mark)
data = data.replace("\n", lineterminator)
result = parser.read_csv(
StringIO(data),
skiprows=1,
delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"],
)
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = 'a"\nb"\na\n1'
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
({}, DataFrame({"1": [3, 5]})),
({"header": 0, "names": ["foo"]}, DataFrame({"foo": [3, 5]})),
],
)
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data), skiprows=lambda x: x % 2 == 0, **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_callable_not_in(all_parsers):
parser = all_parsers
data = "0,a\n1,b\n2,c\n3,d\n4,e"
expected = DataFrame([[1, "b"], [3, "d"]])
result = parser.read_csv(
StringIO(data), header=None, skiprows=lambda x: x not in [1, 3]
)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
def test_skip_rows_and_n_rows(all_parsers):
# GH#44021
data = """a,b
1,a
2,b
3,c
4,d
5,e
6,f
7,g
8,h
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6])
expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]})
tm.assert_frame_equal(result, expected)
|
OpenXT/sync-database
|
sync_db/run_script.py
|
Python
|
gpl-2.0
| 2,130
| 0.002347
|
#
# Copyright (c) 2012 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import configuration
import sqlplus
def add_subparser(subparsers):
description = """Run a single sqlplus script."""
epilog = """SCRIPT is assumed to be relative to schema directory (default
{0}) unless it begins with '{1}'.""".format(
sqlplus.get_default_schema_dir(), os.path.sep)
parser = subparsers.add_parser("run",
help="run a single sqlplus script",
description=description,
epilog=epilog)
parser.add_argument("user",
metavar="USER",
choices=["sys"] + configuration.USERS,
help="connect to database as: " +
", ".join(["sys"] + configuration.USERS))
parser.add_argument("script",
|
metavar="SCRIPT",
help="sqlplus script to run")
parser.set_defaults(func=_run)
parser.set_defaults(need_config=True)
parser.set_defaults(need_metadata=True)
def _run(args, metadata, config):
sqlplus.run_steps([(args.user, args.script)],
metadat
|
a,
config,
args.schema_dir,
False,
None,
None,
False)
|
dl1ksv/gnuradio
|
gnuradio-runtime/python/gnuradio/gr/packet_utils.py
|
Python
|
gpl-3.0
| 4,116
| 0.000243
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
import pmt
def make_lengthtags(lengths, offsets, tagname='length', vlen=1):
tags = []
assert(len(offsets) == len(lengths))
for offset, length in zip(offsets, lengths):
tag = gr.tag_t()
tag.offset = offset // vlen
tag.key = pmt.string_to_symbol(tagname)
tag.value = pmt.from_long(length // vlen)
tags.append(tag)
return tags
def string_to_vector(string):
v = []
for s in string:
v.append(ord(s))
return v
def strings_to_vectors(strings, tsb_tag_key):
vs = [string_to_vector(string) for string in strings]
return packets_to_vectors(vs, tsb_tag_key)
def vector_to_string(v):
s = []
for d in v:
s.append(chr(d))
return ''.join(s)
def vectors_to_strings(data, tags, tsb_tag_key):
packets = vectors_to_packets(data, tags, tsb_tag_key)
return [vector_to_string(packet) for packet in packets]
def count_bursts(data, tags, tsb_tag_key, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == tsb_tag_key]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(tsb_tag_key, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value) * vlen
in_burst = False
in_packet = False
packet_length = None
packet_pos = None
burst_count = 0
for pos in range(len(data)):
if pos in lengths:
if in_packet:
print("Got tag at pos {0} current packet_pos is {1}".format(
pos, packet_pos))
raise Exception("Received packet tag while in packet.")
packet_pos = -1
packet_length = lengths[pos]
in_packet = True
if not in_burst:
burst_count += 1
in_burst = True
elif not in_packet:
in_burst = False
if in_packet:
packet_pos += 1
if packet_pos == packet_length - 1:
in_packet = False
packet_pos = None
return burst_count
def vectors_to_packets(data, tags, tsb_tag_key, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == tsb_tag_key]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(tsb_tag_key, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value) * vlen
if 0 not in lengths:
raise ValueError("There is no tag with key {0} and an offset of 0"
.format(tsb_tag_key))
pos = 0
packets = []
while pos < len(data):
if pos not in lengths:
raise ValueError("There is no tag with key {0} and an offset of {1}."
"We were expecting one."
.format(tsb_tag_key, pos))
length = lengths[pos]
if length == 0:
raise ValueError(
|
"Packets cannot have zero length.")
if pos + length > len(data):
raise ValueError("The final packet is incomplete.")
packets.append(data[pos: pos + length])
pos += length
return packets
def packets_to_vectors(packets, tsb_tag_key, vlen=1):
""" Returns a single data vector and a set of tags.
If used with blocks.vector_source_X, this set of data
and tags will produced a correct tagged stream
|
. """
tags = []
data = []
offset = 0
for packet in packets:
data.extend(packet)
tag = gr.tag_t()
tag.offset = offset // vlen
tag.key = pmt.string_to_symbol(tsb_tag_key)
tag.value = pmt.from_long(len(packet) // vlen)
tags.append(tag)
offset = offset + len(packet)
return data, tags
|
knagra/farnsworth
|
threads/models.py
|
Python
|
bsd-2-clause
| 3,639
| 0.002473
|
'''
Project: Farnsworth
Author: Karandeep Singh Nagra
'''
from django.contrib.auth.models import User, Group, Permission
from django.core.urlresolvers import reverse
from django.db import models
from base.models import UserProfile
class Thread(models.Model):
'''
The Thread model. Used to group messages.
'''
owner = models.ForeignKey(
UserProfile,
help_text="The user who started this thread.",
)
subject = models.CharField(
blank=False,
null=False,
max_length=254,
help_text="Subject of this thread.",
)
start_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this thread was started.",
)
change_date = models.DateTimeField(
auto_now_add=True,
help_text="The last time this thread was modified.",
)
number_of_messages = models.PositiveSmallIntegerField(
default=1,
help_text="The number of messages in this thread.",
)
active = models.BooleanField(
default=True,
help_text="Whether this thread is still active.",
)
views = models.PositiveIntegerField(
default=0,
help_text="The number times this thread has been viewed.",
)
followers = models.ManyToManyField(
User,
blank=True,
null=True,
related_name="following",
help_text="Users following this thread",
)
def __unicode__(self):
return self.subject
class Meta:
ordering = ['-change_date']
def is_thread(self):
return True
def get_view_url(self):
return reverse("threads:view_thread", kwargs={"pk": self.pk})
class Message(models.Model):
'''
The Mes
|
sage model. Contains a body, owner, and post_date, referenced by thread.
'''
body = models.TextField(
blank=False,
null=False,
help_text="Body of this message.",
)
owner = models.ForeignKey(
UserProfile,
help_text="The user who posted this message.",
)
post_date = models.DateTimeField(
auto_now_add=True,
help_text="The date this message was posted.",
)
thread = models.Fo
|
reignKey(
Thread,
help_text="The thread to which this message belongs.",
)
edited = models.BooleanField(
default=False,
)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return self.body
class Meta:
ordering = ['post_date']
def is_message(self):
return True
def pre_save_thread(sender, instance, **kwargs):
thread = instance
thread.number_of_messages = thread.message_set.count()
def post_save_thread(sender, instance, created, **kwargs):
thread = instance
if not created and thread.number_of_messages == 0:
thread.delete()
def post_save_message(sender, instance, created, **kwargs):
message = instance
thread = message.thread
if created:
thread.change_date = message.post_date
thread.save()
def post_delete_message(sender, instance, **kwargs):
message = instance
message.thread.save()
# Connect signals with their respective functions from above.
# When a message is created, update that message's thread's change_date to the post_date of that message.
models.signals.post_save.connect(post_save_message, sender=Message)
models.signals.post_delete.connect(post_delete_message, sender=Message)
models.signals.pre_save.connect(pre_save_thread, sender=Thread)
models.signals.post_save.connect(post_save_thread, sender=Thread)
|
mryab/askme
|
labs/L1 - Gradient descent and linear models.py
|
Python
|
mit
| 32,558
| 0.00386
|
# coding: utf-8
# # L1 - Градиентый спуск и линейные модели
# In[1]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
import math
get_ipython().magic('matplotlib notebook')
matplotlib.rcParams['figure.figsize'] = '12,8'
matplotlib.rcParams['figure.max_open_warning'] = False
# In[2]:
def setup_plot_figure(xlabel='x', ylabel='y', hline=False, vline=False, equal_axes=False):
f = plt.figure()
if equal_axes:
plt.axis('equal')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True, which='both')
if hline:
plt.axhline(color='k', alpha=0.7)
if vline:
plt.axvline(color='k', alpha=0.7)
return f
# In[5]:
SAMPLE_NUM = 10000
positive_class_features = np.random.normal(-1, 1, (SAMPLE_NUM, 2))
negative_class_features = np.random.normal(1.7, 1, (SAMPLE_NUM, 2))
X = np.c_[np.ones(SAMPLE_NUM * 2), np.concatenate((positive_class_features, negative_class_features))]
Y = np.r_[np.ones(SAMPLE_NUM), -np.ones(SAMPLE_NUM)].reshape(SAMPLE_NUM * 2)
w_exact = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
x_ax = np.linspace(-5, 5, 2)
y_ax_exact = np.array(-(w_exact[0] + w_exact[1] * x_ax) / w_exact[2])
setup_plot_figure(hline=True, vline=True, equal_axes=True)
plt.scatter(positive_class_features[:, 0], positive_class_features[:, 1], c='green', alpha=0.2)
plt.scatter(negative_class_features[:, 0], negative_class_features[:, 1], c='red', alpha=0.2)
plt.plot(x_ax, y_ax_exact, color='b')
plt.show()
# Две матрицы размеров $a\times b$ и $b \times c$ перемножаются за $O(abc)$ операций ($a\cdot c$ ячеек в новой таблице, на получение каждой требуется $b$ операций), матрица $a \times a$ оборачивается за $O(a^3)$ операций, $a \times b$ транспонируется за $O(ab)$. Зная это, а также считая, что входная выборка имеет размер $n$ и $m$ параметров, получаем следующую асимптотику за счёт выполнения последовательно пяти операций: транспонирования матрицы $X$ умножения матриц $X_{m\times n}^T$ и $X_{n\times m}$, обращения получившейся матрицы $m\times m$, умножения обращённой матрицы $m\times m$ и $X_{m\times n}^T$ и умножения матриц $m\times n$ и $Y_{n \times 1}$. Тогда
# $$O(m\cdot n)+O(m\cdot n \cdot m)+O(m^3)+O(m\cdot m \cdot n)+O(m\cdot n)=O(m^2n)+O(m^3).$$ Если считать, что $m<n$, то можно упростить выражение до $O(m^2n)$.
# **Задание**
#
# 1. Пусть $\mathbb{P}\{y=1|x\} = \sigma(wx+b)$, где $\sigma(z) = \frac{1}{1 + \exp(-z)}$. Покажите, что задача
# $$ \arg\min_{w, b} \sum_{x, y} \ln(1 + \exp(-y(wx + b)))$$
# есть не что иное, как максимизация правдоподобия.
# 2. Отобразите все функционалы качества в осях $M \times Q$ для одного элемента.
#
# 1. Функция правдоподобия: $\mathbb{P}\{y|x\}=\sigma(y(wx+b))$. Если считать совпадения предсказанных классов для элементов выборки с актуальными классами независимыми в совокупности событиями, то можно переписать $\mathbb{P}\{y|x\}$ как $\prod_{i=0}^n\mathbb{P}\{y_i|x_i\}$. Максимизация функции правдоподобия тогда представляется как задача $$\underset{w}{\mathrm{argmax}}\prod_{i=0}^n\mathbb{P}\{y_i|x_i\}=\underset{w}{\mathrm{argmax}}\ln\prod_{i=0}^n\mathbb{P}\{y_i|x_i\}=$$ (исходя из монотонности логарифма, если функция в данной точке имеет максимум, то и её логарифм)$$=\underset{w}{\mathrm{argmax}}\sum_{i=0}^n\ln\mathbb{P}\{y_i|x_i\}=\underset{w}{\mathrm{argmax}}\sum_{i=0}^n\ln\sigma(y_i(wx_i+b))=\underset{w}{\mathrm{argmax}}\sum_{i=0}^n\ln(1+e^{-y(wx+b)})^{-1}=$$(по свойству логарифма)$$=-\underset{w}{\mathrm{argmax}}\sum_{i=0}^n\ln(1+e^{-y(wx+b)})=\underset{w}{\mathrm{argmin}}\sum_{i=0}^n\ln(1+e^{-y(wx+b)})$$
#
# 2. Так как $ M(x, y) = y\cdot(wx)$ (можно добавить первую компоненту к $x$, равную $1$ и к $w$, равную
|
$b$ и получить исходное равенство), получаем следующие зависимости $Q$ от $M$:
#
# 1) $Q = [y_{pred} \neq y_{true}]=[y\cdot(wx)<0]=[M < 0]$
#
# 2) $Q = ((wx) - y)^{2}=\fra
|
c{1}{y^2}((wx\cdot y) - y^2)^2=\frac{1}{y^2}(M - y^2)^2$. Так как $y=\pm 1$, то $Q=(M - 1)^2$.
#
# 3) $Q = max(0, 1 - y\cdot(wx))=max(0,1-M)$
#
# 4) $Q = \ln(1 + e^{-y\cdot(wx)})=\ln(1+e^{-M})$
# In[7]:
margin = np.linspace(-7, 7, 1000)
@np.vectorize
def exact_loss_m(x):
return 1 if x < 0 else 0
@np.vectorize
def mse_loss_m(x):
return (x - 1) ** 2
@np.vectorize
def hinge_loss_m(x):
return max(0, 1 - x)
@np.vectorize
def logistic_loss_m(x):
return np.log(1 + np.exp(-x))
setup_plot_figure('M', 'Q', True, True, True)
plt.plot(margin, exact_loss_m(margin), label='$[M<0$]')
plt.plot(margin, mse_loss_m(margin), label='$(M - 1)^2$')
plt.plot(margin, hinge_loss_m(margin), label='$max(0,1-M)$')
plt.plot(margin, logistic_loss_m(margin), label='$\ln(1+e^{-M})$')
plt.legend()
plt.xlim(-7, 7)
plt.ylim(-3, 3)
plt.show()
# **Градиентный спуск**
# In[6]:
def grad_descent(x0, func, grad, learn_rate, iter_num):
steps = np.empty((iter_num, x0.shape[0]))
costs = np.empty(iter_num)
for i in range(iter_num):
costs[i] = func(x0)
steps[i] = x0
x0 -= learn_rate * grad(x0)
return x0, costs, steps
# In[7]:
def simple_func(x): # z=x^2+y^2
return x[0] ** 2 + x[1] ** 2
def simple_grad(x):
return 2 * x
xx = np.arange(-10, 10, 0.01)
yy = np.arange(-10, 10, 0.01)
xgrid, ygrid = np.meshgrid(xx, yy)
zgrid = simple_func((xgrid, ygrid))
setup_plot_figure(hline=True, vline=True, equal_axes=True)
cont = plt.contour(xgrid, ygrid, zgrid)
plt.xlim(-10, 10)
plt.ylim(-7, 7)
cont.clabel(fmt="%.0f")
start_simple = np.random.randn(2) * 10
bestval_simple = simple_func(start_simple)
bestres = start_simple
bestlr = 0
for lr in np.arange(0, 1.5, 1e-4):
res = grad_descent(start_simple, simple_func, simple_grad, lr, 50)
if simple_func(res[0]) < bestval_simple:
bestval_simple = simple_func(res[0])
bestres = res[2]
bestlr = lr
print('Optimal learning rate: ', bestlr)
plt.plot(bestres.T[0], bestres.T[1], 'bo')
plt.show()
# 1. Функция $z=x^2+y^2$, глобальный минимум $0$ в точке $(0;0)$.
# 2. См. ячейку выше.
# 3. См. ячейку выше.
# 4. См. ячейку выше.
# 5. Перебирались все значения $\lambda$ от 0 до 1.5 (при больших значениях градиент в некоторых случаях начинает значительно возрастать) с шагом в $10^{-4}$, при этом оптимизировалось значение функции в найденной точке. Логично, что оптимальная скорость обучения близка к 0.5, так как тогда за одну итерацию достигается $(x;y)-0.5\cdot(2x;2y)=(0;0)$ - глобальный минимум
# Существуют функции, которые плохо даются градиентному спуску. К примеру, функция Розенброка
#
# <center>$f(x, y) = (1-x)^2 + 100(y-x^2)^2$.</center>
# In[11]:
def rosenbrock(x):
return (1 - x[0]) ** 2 + 100 * (x[1] - x[0] ** 2) ** 2
def rosenbrock_grad(x):
return np.array([-2 * (1 - x[0]) - 400 * x[0] * (-x[0] ** 2 + x[1]), 200 * (-x[0] ** 2 + x[1])])
xx = np.arange(-20, 20, 0.1)
yy = np.arange(-20, 20, 0.1)
xgrid, ygrid = np.meshgrid(xx, yy)
zgrid = rosenbrock((xgrid, ygrid))
fig = setup_plot_figure()
ax = fig.gca(projection='3d')
cont = ax.plot_surface(xgrid, ygrid, zgrid, norm=matplotlib.colors.LogNorm(), cmap=plt.cm.jet, linewidth=0, shade=False)
fig.colorbar(cont, shrink=0.5, aspect=5)
start_ros = np.random.randn(2) * 20
res_rosenbrock = grad_descent(start_ros, rosenbrock, rosenbrock_grad, 1e-5, 5000)[2]
z_ros = rosenbrock(res_rosenbrock.T)
ax.plot(xs=res_rosenbrock.T[0], ys=res_rosenbrock.T[1], zs=z_ros)
fig.show()
# 1. См. ячейку выше.
# 2. Можно заметить, что функция имеет участок с почти нулевым градиентом — «долину», но вне этой долины эта функция очень быстро растёт, поэтому при малых фиксированных $\lambda$ функция будет очень медленно сходиться, а при больших — расходиться, при этом градиент будет вновь быстро расти. Также в окрестности глобального минимума («долине») алгоритм будет двигаться в сторону глобального минимума очень медленно из-за близкого к нулю градиента.
#
# 3. Можно изменять скорость обучения динамически одним из множества способов, например, описанных ниже. Можно также стартовать алгоритм достаточно большое раз со случайными параметрами и затем выбрать лучший. Ещё есть вариант с нормализацией градиента (вообще говоря, длину та
|
mikefeneley/topcoder
|
src/SRM-697/triangle_making.py
|
Python
|
mit
| 423
| 0.002364
|
class TriangleMaki
|
ng:
def maxPerimeter(self, a, b, c):
first = a
second = b
third = c
sides = [first, second, third]
for idx, side in enumerate(sides):
one = (idx + 1) % 3
two = (idx + 2) % 3
total = sides[one] + sides[two]
while sides[idx] >= total:
sides[idx] -= 1
|
return sum(sides)
|
sean-abbott/chamberlain
|
tests/factories.py
|
Python
|
bsd-3-clause
| 769
| 0
|
# -*- coding
|
: utf-8 -*-
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from cham
|
berlain.database import db
from chamberlain.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
|
snirp/juis
|
manage.py
|
Python
|
mit
| 250
| 0
|
#!/usr/bin/
|
env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "juisapp.settings")
from django.core.management import execute_from_command_line
execu
|
te_from_command_line(sys.argv)
|
stackforge/cloudkitty
|
cloudkitty/api/v2/scope/state.py
|
Python
|
apache-2.0
| 4,899
| 0
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import flask
import voluptuous
from werkzeug import exceptions as http_exceptions
from cloudkitty.api.v2 import base
from cloudkitty.api.v2 import utils as api_utils
from cloudkitty.common import policy
from cloudkitty import messaging
from cloudkitty import storage_state
from cloudkitty import tzutils
from cloudkitty import validation_utils as vutils
class ScopeState(base.BaseResource):
@classmethod
def reload(cls):
super(ScopeState, cls).reload()
cls._client = messaging.get_client()
cls._storage_state = storage_state.StateManager()
@api_utils.paginated
@api_utils.add_input_schema('query', {
voluptuous.Optional('scope_id', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('scope_key', default=[]):
api_utils.MultiQueryParam(str
|
),
voluptuous.Optional('fetcher', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('collector', default=[]):
api_utils.MultiQueryParam(str),
})
@api_utils.add_output_schema({'results': [{
voluptuous.Required('scope_id'): vutils.get_string_type(),
|
voluptuous.Required('scope_key'): vutils.get_string_type(),
voluptuous.Required('fetcher'): vutils.get_string_type(),
voluptuous.Required('collector'): vutils.get_string_type(),
voluptuous.Required('state'): vutils.get_string_type(),
}]})
def get(self,
offset=0,
limit=100,
scope_id=None,
scope_key=None,
fetcher=None,
collector=None):
policy.authorize(
flask.request.context,
'scope:get_state',
{'tenant_id': scope_id or flask.request.context.project_id}
)
results = self._storage_state.get_all(
identifier=scope_id,
scope_key=scope_key,
fetcher=fetcher,
collector=collector,
offset=offset,
limit=limit,
)
if len(results) < 1:
raise http_exceptions.NotFound(
"No resource found for provided filters.")
return {
'results': [{
'scope_id': r.identifier,
'scope_key': r.scope_key,
'fetcher': r.fetcher,
'collector': r.collector,
'state': r.state.isoformat(),
} for r in results]
}
@api_utils.add_input_schema('body', {
voluptuous.Exclusive('all_scopes', 'scope_selector'):
voluptuous.Boolean(),
voluptuous.Exclusive('scope_id', 'scope_selector'):
api_utils.MultiQueryParam(str),
voluptuous.Optional('scope_key', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('fetcher', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('collector', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Required('state'):
voluptuous.Coerce(tzutils.dt_from_iso),
})
def put(self,
all_scopes=False,
scope_id=None,
scope_key=None,
fetcher=None,
collector=None,
state=None):
policy.authorize(
flask.request.context,
'scope:reset_state',
{'tenant_id': scope_id or flask.request.context.project_id}
)
if not all_scopes and scope_id is None:
raise http_exceptions.BadRequest(
"Either all_scopes or a scope_id should be specified.")
results = self._storage_state.get_all(
identifier=scope_id,
scope_key=scope_key,
fetcher=fetcher,
collector=collector,
)
if len(results) < 1:
raise http_exceptions.NotFound(
"No resource found for provided filters.")
serialized_results = [{
'scope_id': r.identifier,
'scope_key': r.scope_key,
'fetcher': r.fetcher,
'collector': r.collector,
} for r in results]
self._client.cast({}, 'reset_state', res_data={
'scopes': serialized_results, 'state': state.isoformat(),
})
return {}, 202
|
freerangerouting/frr
|
tests/lib/test_frrlua.py
|
Python
|
gpl-2.0
| 358
| 0
|
import frrtest
import pytest
if 'S["SCRIPTING_TRUE"]=""\
|
n' not in open("../config.status").readlines():
class TestFrrlua:
@pytest.mark.skipif(True, reason="Test unsupported")
def test_exit_cleanly(self):
pass
else:
class TestFrrlua(frrtest.TestMultiOut):
program
|
= "./test_frrlua"
TestFrrlua.exit_cleanly()
|
smartsheet-platform/smartsheet-python-sdk
|
smartsheet/models/alternate_email.py
|
Python
|
apache-2.0
| 2,281
| 0
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import *
from ..util import serialize
from ..util import deserialize
class AlternateEmail(object):
"""Smartsheet AlternateEmail data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Alternate
|
Email model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._confirmed = Boolean()
self._email = String()
self._id_ = Number()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
def __getattr__(self, key):
if key == 'id':
return self.id_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'id':
self.id_ = value
else:
super(AlternateEmail, self).__setattr__(key, value)
@property
def confirmed(self):
return self._confirmed.value
@confirmed.setter
def confirmed(self, value):
self._confirmed.value = value
@property
def email(self):
return self._email.value
@email.setter
def email(self, value):
self._email.value = value
@property
def id_(self):
return self._id_.value
@id_.setter
def id_(self, value):
self._id_.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
ESSICS/org.csstudio.display.builder
|
org.csstudio.display.builder.model/examples/script_util/write_any_pv.py
|
Python
|
epl-1.0
| 855
| 0.003509
|
# Example for script that connects to PV,
# writes a value, then disconnects from the PV.
#
# This is usually a bad idea.
# It's better to have widgets connect to PVs,
# 1) More efficient. Widget
|
connects once on start, then remains connected.
# Widget subscribes to PV updates instead of polling its value.
# 2) Widget will reflect the connection and alarm state of the PV
# 3) Widget will properly disconnect
#
# pvs[0]: PV with name of PV to which to connect
# pvs[1]: PV with value that will be written to the PV
from org.csstudio.display.builder.runtime.script import PVUtil, ScriptUtil
pv_name = PVUt
|
il.getString(pvs[0])
value = PVUtil.getDouble(pvs[1])
print("Should write %g to %s" % (value, pv_name))
try:
PVUtil.writePV(pv_name, value, 5000)
except:
ScriptUtil.showErrorDialog(widget, "Error writing %g to %s" % (value, pv_name))
|
cpaulik/pyscaffold
|
tests/extensions/test_travis.py
|
Python
|
mit
| 1,578
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os.path import exists as path_exists
from pyscaffold.api import create_project
from pyscaffold.cli import run
from pyscaffold.extensions import travis
def test_create_project_with_travis(tmpfolder):
# Given options with the travis extension,
opts = dict(project="proj",
extensions=[travis.Travis('travis')])
# when the project is created,
create_project(opts)
# then travis files should exist
assert path_exists("proj/.travis.yml")
assert path_exists("proj/tests/travis_install.sh")
def test_create_project_without_travis(tmpfolder):
# Given options without the travis extension,
opts = dict(project="proj")
# when the project is created,
create_project(opts)
# then travis files should not exist
assert not path_exists("proj/.travis.yml")
assert not path_exists("proj/tests/travis_install.sh")
def test_cli_with_travis(tmpfolder):
# Given the command line with the travis option,
sys.argv = ["pyscaffold", "--travis", "proj"]
|
# when pyscaffold runs,
run()
# then travis files should exist
assert path_exists("proj/.travis.yml")
assert path_exists("proj/tests/travis_install.sh")
def test_cli_without_travis(tmpfolder):
# Given the command line without the travis option,
sys.argv = ["pyscaffold", "p
|
roj"]
# when pyscaffold runs,
run()
# then travis files should not exist
assert not path_exists("proj/.travis.yml")
assert not path_exists("proj/tests/travis_install.sh")
|
MarkTheF4rth/youtube-dl
|
devscripts/make_supportedsites.py
|
Python
|
unlicense
| 1,152
| 0.001736
|
#!/usr/bin/env python
from __future__ import unicode_literals
import io
import optparse
import os
import sys
# Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, ROOT_DIR)
import youtube_dl
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')
outfile, = args
def gen_ies_md(ies):
for ie in ies:
ie_md = '**{0}**'.format(ie.IE_NAME)
ie_desc = getattr(ie, 'IE_DESC', None)
if ie_desc is False:
continue
if ie_desc is not None:
ie_md += ': {0}'.format(ie.IE_DESC)
if not ie.working():
ie_md += ' (Currently broken)'
yield ie_md
ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower())
out = '# S
|
upported sites\n' + ''.join(
' - ' + md + '\n'
for md in gen_ies_md(ies))
with io.open(outfile, 'w'
|
, encoding='utf-8') as outf:
outf.write(out)
if __name__ == '__main__':
main()
|
perryjohnson/biplaneblade
|
biplane_blade_lib/prep_stn18_mesh.py
|
Python
|
gpl-3.0
| 30,755
| 0.006763
|
"""Write initial TrueGrid files for one biplane blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run biplane_blade_lib/prep_stnXX_mesh.py
or
|> import biplane_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 30, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
reload(bl)
import lib.poly_utils as pu
reload(pu)
from shapely.geometry import Polygon
from shapely.affinity import translate
# SET THESE PARAMETERS -----------------
station_num = 18
# --------------------------------------
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure and airfoil for this station
st = station.structure
af = station.airfoil
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.lower_spar_cap.layer['upper']
is2 = st.lower_internal_surface_2.layer['resin']
points_usc = [
(-0.75, usc.left[0][1]), # lower_SparCap_upper.txt
is2.polygon.interiors[0].coords[-2], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[44-32], # lower_InternalSurface2_resin.txt
( 0.75, usc.right[1][1]), # lower_SparCap_upper.txt
( 0.75, 0.0),
(-0.75, 0.0)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.lower_spar_cap.layer['lower']
points_lsc = [
(-0.75,-6.5),
( 0.75,-6.5),
( 0.75000000, lsc.right[0][1]), # lower_SparCap_lower.txt
is2.polygon.interiors[0].coords[43-32], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[-1], # lower_InternalSurface2_resin.txt
(-0.75000000, lsc.left[1][1]) # lower_SparCap_lower.txt
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer
|
polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
p
|
u.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.lower_TE_reinforcement.layer['foam']
is4 = st.lower_internal_surface_4.layer['resin']
points_teu1 = [
(ter.top[0][0], -3.5), # TE_Reinforcement_foam.txt
(ter.top[0][0], -4.6), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[376-150], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[376-150][0], -3.5) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -5.0), # TE_Reinforcement_foam.txt
(ter.bottom[0][0], -4.6), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[376-150], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[376-150][0], -5.0) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
ter.polygon.exterior.coords[50-3], # lower_TE_reinforcement_foam.txt
(ter.polygon.exterior.coords[50-3][0], -3.5) # lower_TE_reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -5.0),
points_teu2[1],
points_teu2[2],
(points_teu2[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
boun
|
ngcurrier/ProteusCFD
|
tools/extractCM.py
|
Python
|
gpl-3.0
| 990
| 0.009091
|
#!/usr/bin/env python
import sys
import math
def main():
if len(sys.argv) != 3:
print('USAGE: ' + sys.argv[0] + ' <filename> ' + ' <boundary id of interest>')
return
targetString = 'Moment coefficient for body[' + sys.argv[2]
targetTimestep = ': \n'
filename = sys.argv[1]
try:
f = open(filen
|
ame, 'r')
except:
print('File does not exist: ' + filename)
return
for line in f:
if targetTimestep in line:
'replace the colon and newline with a comma
|
so we get a CSV file'
line = line.replace (targetTimestep, ', ')
print(line),
continue
if targetString in line:
'find the last colon and get the string after that which is the numeric value'
'of the lift coefficient'
pos = line.rfind(':')
size = len(line)
print(line[pos+1:size]),
continue
if __name__ == '__main__':
main()
|
mixman/djangodev
|
tests/regressiontests/localflavor/it/tests.py
|
Python
|
bsd-3-clause
| 2,453
| 0.000815
|
from django.contrib.localflavor.it.forms import (ITZipCodeField, ITRegionSelect,
ITSocialSecurityNumberField, ITVatNumberField)
from django.test import SimpleTestCase
class ITLocalFlavorTests(Simp
|
leTestCase):
def test_ITRegionSelect(self):
f = ITRegionSelect()
out = u'''<select name="regions">
<option value="ABR">Abruzzo</option>
<option value="BAS">Basilicata</option>
<option value="CAL">Calabria</option>
<option value="CAM">Campania</option>
<option value="EMR">Emilia-Romagna</option>
<option value="FVG">Friuli
|
-Venezia Giulia</option>
<option value="LAZ">Lazio</option>
<option value="LIG">Liguria</option>
<option value="LOM">Lombardia</option>
<option value="MAR">Marche</option>
<option value="MOL">Molise</option>
<option value="PMN" selected="selected">Piemonte</option>
<option value="PUG">Puglia</option>
<option value="SAR">Sardegna</option>
<option value="SIC">Sicilia</option>
<option value="TOS">Toscana</option>
<option value="TAA">Trentino-Alto Adige</option>
<option value="UMB">Umbria</option>
<option value="VAO">Valle d\u2019Aosta</option>
<option value="VEN">Veneto</option>
</select>'''
self.assertEqual(f.render('regions', 'PMN'), out)
def test_ITZipCodeField(self):
error_invalid = [u'Enter a valid zip code.']
valid = {
'00100': '00100',
}
invalid = {
' 00100': error_invalid,
}
self.assertFieldOutput(ITZipCodeField, valid, invalid)
def test_ITSocialSecurityNumberField(self):
error_invalid = [u'Enter a valid Social Security number.']
valid = {
'LVSGDU99T71H501L': 'LVSGDU99T71H501L',
'LBRRME11A01L736W': 'LBRRME11A01L736W',
'lbrrme11a01l736w': 'LBRRME11A01L736W',
'LBR RME 11A01 L736W': 'LBRRME11A01L736W',
}
invalid = {
'LBRRME11A01L736A': error_invalid,
'%BRRME11A01L736W': error_invalid,
}
self.assertFieldOutput(ITSocialSecurityNumberField, valid, invalid)
def test_ITVatNumberField(self):
error_invalid = [u'Enter a valid VAT number.']
valid = {
'07973780013': '07973780013',
'7973780013': '07973780013',
7973780013: '07973780013',
}
invalid = {
'07973780014': error_invalid,
'A7973780013': error_invalid,
}
self.assertFieldOutput(ITVatNumberField, valid, invalid)
|
mholtrop/Phys605
|
Python/DevLib/MCP320x.py
|
Python
|
gpl-3.0
| 11,971
| 0.002423
|
#!/usr/bin/env python
#
# MCP320x
#
# Author: Maurik Holtrop
#
# This module interfaces with the MCP300x or MCP320x family of chips. These
# are 10-bit and 12-bit ADCs respectively. The x number indicates the number
# of multiplexed analog inputs: 2 (MCP3202), 4 (MCP3204) or 8 (MCP3208)
# Communications with this chip are over the SPI protocol.
# See: https://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus
#
# The version of the code has two SPI interfaces: the builtin hardware
# SPI interface on the RPI, or a "bit-banged" GPIO version.
#
# Bit-Bang GPIO:
# We emulate a SPI port in software using the GPIO lines.
# This is a bit slower than the hardware interface, but it is far more
# clear what is going on, plus the RPi has only one SPI device.
# Connections: RPi GPIO to MCP320x
# CS_bar_pin = CS/SHDN
# CLK_pin = CLK
# MOSI_pin = D_in
# MISO_pin = D_out
#
# Hardware SPI:
# This uses the builtin hardware on the RPi. You need to enable this with the
# raspi-config program first. The data rate can be up to 1MHz.
# Connections: RPi pins to MCP320x
# CE0 or CE1 = CS/SHDN (chip select) set CS_bar = 0 or 1
# SCK = CLK set CLK_pin = 1000000 (transfer speed)
# MOSI = D_in set MOSI_pin = 0
# MISO = D_out set MISO_pin = 0
# The SPI protocol simulated here is MODE=0, CPHA=0, which has a positive polarity clock,
# (the clock is 0 at rest, active at 1) and a positive phase (0 to 1 transition) for reading
# or writing the data. Thus corresponds to the specifications of the MCP320x chips.
#
# From MCP3208 datasheet:
# Outging data : MCU latches data to A/D converter on rising edges of SCLK
# Incoming data: Data is clocked out of A/D converter on falling edges, so should be read on rising edge.
try:
import RPi.GPIO as GPIO
except ImportError as error:
pass
try:
import Adafruit_BBIO as GPIO
except ImportError as error:
pass
try:
import spidev
except ImportError as error:
pass
from DevLib.MyValues import MyValues
class MCP320x:
"""This is an class that implements an interface to the MCP320x ADC chips.
Standard is the MCP3208, but is will also work wiht the MCP3202, MCP3204, MCP3002, MCP3004 and MCP3008."""
def __init__(self, cs_bar_pin, clk_pin=1000000, mosi_pin=0, miso_pin=0, chip='MCP3208',
channel_max=None, bit_length=None, single_ended=True):
"""Initialize the code and set the GPIO pins.
The last argument, ch_max, is 2 for the MCP3
|
202, 4 for the
MCP3204 or 8 for the MCS3208."""
self._CLK = clk_pin
self._MOSI = mosi_pin
self._MISO = miso_pin
self._CS_bar = cs_bar_pin
chip_dictionary = {
"MCP3202": (2, 12),
"MCP3204": (4, 12),
"MCP3208": (8, 12),
"MCP3002": (2, 10),
"MC
|
P3004": (4, 10),
"MCP3008": (8, 10)
}
if chip in chip_dictionary:
self._ChannelMax = chip_dictionary[chip][0]
self._BitLength = chip_dictionary[chip][1]
elif chip is None and (channel_max is not None) and (bit_length is not None):
self._ChannelMax = channel_max
self._BitLength = bit_length
else:
print("Unknown chip: {} - Please re-initialize.")
self._ChannelMax = 0
self._BitLength = 0
return
self._SingleEnded = single_ended
self._Vref = 3.3
self._values = MyValues(self.read_adc, self._ChannelMax)
self._volts = MyValues(self.read_volts, self._ChannelMax)
# This is used to speed up the SPIDEV communication. Send out MSB first.
# control[0] - bit7-3: upper 5 bits 0, because we can only send 8 bit sequences.
# - bit2 : Start bit - starts conversion in ADCs
# - bit1 : Select single_ended=1 or differential=0
# - bit0 : D2 high bit of channel select.
# control[1] - bit7 : D1 middle bit of channel select.
# - bit6 : D0 low bit of channel select.
# - bit5-0 : Don't care.
if self._SingleEnded:
self._control0 = [0b00000110, 0b00100000, 0] # Pre-compute part of the control word.
else:
self._control0 = [0b00000100, 0b00100000, 0] # Pre-compute part of the control word.
if self._MOSI > 0: # Bing Bang mode
assert self._MISO != 0 and self._CLK < 32
if GPIO.getmode() != 11:
GPIO.setmode(GPIO.BCM) # Use the BCM numbering scheme
GPIO.setup(self._CLK, GPIO.OUT) # Setup the ports for in and output
GPIO.setup(self._MOSI, GPIO.OUT)
GPIO.setup(self._MISO, GPIO.IN)
GPIO.setup(self._CS_bar, GPIO.OUT)
GPIO.output(self._CLK, 0) # Set the clock low.
GPIO.output(self._MOSI, 0) # Set the Master Out low
GPIO.output(self._CS_bar, 1) # Set the CS_bar high
else:
self._dev = spidev.SpiDev(0, self._CS_bar) # Start a SpiDev device
self._dev.mode = 0 # Set SPI mode (phase)
self._dev.max_speed_hz = self._CLK # Set the data rate
self._dev.bits_per_word = 8 # Number of bit per word. ALWAYS 8
def __del__(self):
""" Cleanup the GPIO before being destroyed """
if self._MOSI > 0:
GPIO.cleanup(self._CS_bar)
GPIO.cleanup(self._CLK)
GPIO.cleanup(self._MOSI)
GPIO.cleanup(self._MISO)
def get_channel_max(self):
"""Return the maximum number of channels"""
return self._ChannelMax
def get_bit_length(self):
"""Return the number of bits that will be read"""
return self._BitLength
def get_value_max(self):
"""Return the maximum value possible for an ADC read"""
return 2 ** self._BitLength - 1
def send_bit(self, bit):
""" Send out a single bit, and pulse clock."""
if self._MOSI == 0:
return
#
# The input is read on the rising edge of the clock.
#
GPIO.output(self._MOSI, bit) # Set the bit.
GPIO.output(self._CLK, 1) # Rising edge sends data
GPIO.output(self._CLK, 0) # Return clock to zero.
def read_bit(self):
""" Read a single bit from the ADC and pulse clock."""
if self._MOSI == 0:
return 0
#
# The output is going out on the falling edge of the clock,
# and is to be read on the rising edge of the clock.
# Clock should be already low, and data should already be set.
GPIO.output(self._CLK, 1) # Set the clock high. Ready to read.
bit = GPIO.input(self._MISO) # Read the bit.
GPIO.output(self._CLK, 0) # Return clock low, next bit will be set.
return bit
def read_adc(self, channel):
"""This reads the actual ADC value, after connecting the analog multiplexer to
the desired channel.
ADC value is returned at a n-bit integer value, with n=10 or 12 depending on the chip.
The value can be converted to a voltage with:
volts = data*Vref/(2**n-1)"""
if channel < 0 or channel >= self._ChannelMax:
print("Error - chip does not have channel = {}".format(channel))
if self._MOSI == 0:
# SPIdev Code
# This builds up the control word, which selects the channel
# and sets single/differential more.
control = [self._control0[0] + ((channel & 0b100) >> 2), self._control0[1]+((channel & 0b011) << 6), 0]
dat = self._dev.xfer(control)
value = (dat[1] << 8)+dat[2] # Unpack the two 8-bit words to a single integer.
return value
else:
# Bit Bang code.
# To read out this chip you need to send:
# 1 - start bit
# 2 - Single ended (1) or differential
|
nathangeffen/tbonline-old
|
tbonlineproject/external/filebrowser/models.py
|
Python
|
mit
| 51
| 0.019608
|
# This fil
|
e is only necessary for the tests to work
| |
abutcher/Taboot
|
taboot-func/__init__.py
|
Python
|
gpl-3.0
| 851
| 0
|
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
|
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it
|
will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from func.minion.modules import func_module
class Test(func_module.FuncModule):
pass
|
foolcage/fooltrader
|
fooltrader/datasource/tdx.py
|
Python
|
mit
| 854
| 0.004684
|
# -*- coding: utf-8 -*-
from pytdx.hq import TdxHq_API
from fooltrader.api import technical
from fooltrader.contract.data_contract import KDATA_COLUMN_SINA
from fooltrader.utils.utils import get_exchange
def get_tdx_kdata(security_item, start, end):
api = TdxHq_API()
with api.connect():
# open close high low vol amount date code
# KDATA_COLUMN = ['timestamp', 'code', 'low', 'open', 'close', 'high', 'volume', 'turnover', 'securityId']
|
df = api.get_k_data(security_item['code'], start, end)
df = df[['date', 'code', 'low', 'open', 'close', 'high', 'vol', 'amount']]
df['securityId'] = df['code'].apply(lambda x: 'stock_{}_{}'.format(get_exchange(x), x))
df['vol'] = df['vol'].apply(lambda x: x * 100)
df.columns = KDATA_COLUMN_SINA
return df
if __n
|
ame__ == '__main__':
pass
|
moniquehw/quoterizer
|
quotes/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,134
| 0.002646
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 20:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('amount', mo
|
dels.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Quote',
|
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='lineitem',
name='quote',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quotes.Quote'),
),
]
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMkkbunkotoikemenWordpressCom.py
|
Python
|
bsd-3-clause
| 565
| 0.033628
|
def extractMkkbunkotoikemenWordpressCom(item):
'''
Parser for 'mkkbunkotoikemen.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return
|
_buil
|
dReleaseMessage(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
cosee-concourse/slack-upload-resource
|
opt/resource/slack_post.py
|
Python
|
mit
| 3,302
| 0.004543
|
import os
from concourse_common import jsonutil
def post_successful_tests(filepath, payload, sc, total_string):
sc.api_call("chat.postMessage", as_user=True,
channel=jsonutil.get_params_value(payload, "channel"),
attachments=[{"fallback": "Test Results",
"pretext": "Test results of " + os.environ["BUILD_JOB_NAME"] + " in version " + open(
os.path.join(filepath, jsonutil.get_params_value(payload, "version"))).read(),
"color": "good",
"title": "Test Results: ",
"fields": [{"value": total_strin
|
g,
"short": False}]}])
def post_failed_tests(failed_string, filepath, payload, sc, total_string):
sc.api_call(
|
"chat.postMessage", as_user=True,
channel=jsonutil.get_params_value(payload, "channel"),
attachments=[{"fallback": "Test Results",
"pretext": "Test results of " + os.environ["BUILD_JOB_NAME"] + " in version " + open(
os.path.join(filepath, jsonutil.get_params_value(payload, "version"))).read(),
"color": "danger",
"text": total_string,
"title": "Test Results",
"fields": [{"title": "Failures: ",
"value": failed_string,
"short": False}]}])
def post_success_message(filepath, payload, sc):
sc.api_call("chat.postMessage", as_user=True,
channel=jsonutil.get_params_value(payload, "channel"),
attachments=[{"fallback": "Pipeline Success of version " + open(
os.path.join(filepath, jsonutil.get_params_value(payload, "version"))).read(),
"pretext": "Pipeline Success",
"color": "good",
"title": "Success:",
"fields": [
{"value": "Version " + open(os.path.join(filepath,
jsonutil.get_params_value(payload, "version"))).read() +
" successfully finished the Pipeline with Job: " +
os.environ["BUILD_JOB_NAME"],
"short": False}]}])
def post_failure_message(filepath, payload, sc):
sc.api_call("chat.postMessage", as_user=True,
channel=jsonutil.get_params_value(payload, "channel"),
attachments=[{"fallback": "Pipeline Failure in " + jsonutil.get_params_value(payload, "pipeline_step"),
"pretext": "Pipeline Failure",
"color": "danger",
"title": "Failure:",
"fields": [{"value": os.environ["BUILD_JOB_NAME"] + " in version " + open(
os.path.join(filepath, jsonutil.get_params_value(payload, "version"))).read() + "failed",
"short": False}]}])
|
HEPData/hepdata
|
hepdata/modules/search/webpack.py
|
Python
|
gpl-2.0
| 1,105
| 0
|
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it and/or
# modify it unde
|
r the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a co
|
py of the GNU General Public License
# along with HEPData; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
from flask_webpackext import WebpackBundle
search_js = WebpackBundle(
__name__,
'assets',
entry={
'hepdata-search-js': './js/hepdata_search.js',
'hepdata-search-facets-js': './js/hepdata_search_facets.js',
},
dependencies={
"d3": "~3.5.12",
"d3-tip": "~0.6.7",
"typeahead.js": "0.11.1"
}
)
|
thandal/passe-partout
|
pp/pp_parse.py
|
Python
|
mit
| 1,809
| 0.028192
|
# The following parse_* methods are from bitcoin-abe
import base58
def parse_TxIn(vds):
d = {}
d['prevout_hash'] = vds.read_bytes(32)
d['prevout_n'] = vds.read_uint32()
d['scriptSig'] = vds.read_bytes(vds.read_compact_size())
d['sequence'] = vds.read_uint32()
return d
def parse_TxOut(vds):
d = {}
d['value'] = vds.read_int64()
raw = vds.read_bytes(vds.read_compact_size())
d['scriptPubKey'] = raw
if len(raw) == 25 and raw[0] == '\x76' and raw[1] == '\xa9' and raw[2] == '\x14':
d['address'] = base58.hash_160_to_bc_address(raw[3:-2])
return d
def parse_Transaction(vds):
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['txIn'] = []
for i in xrange(n_vin):
d['txIn'].append(parse_TxIn(vds))
n_vout = vds.read_compact_size()
d['txOut'] = []
for i in xrange(n_vout):
d['txOut'].append(parse_TxOut(vds))
d['lockTime'] = vds.read_uint32()
d['tx'] = vds.input[start:vds.read_cursor]
|
return d
def parse_BlockHeader(vds):
d = {}
blk_magic = vds.read_bytes(4)
#if blk_ma
|
gic != '\xf9\xbe\xb4\xd9':
# if blk_magic != '\xbf\xfa\xda\xb5':
# raise Exception('Bad magic' + str(blk_magic))
# return d
blk_length = vds.read_int32()
header_start = vds.read_cursor
d['version'] = vds.read_int32()
d['hashPrev'] = vds.read_bytes(32)
d['hashMerkleRoot'] = vds.read_bytes(32)
d['nTime'] = vds.read_uint32()
d['nBits'] = vds.read_uint32()
d['nNonce'] = vds.read_uint32()
header_end = vds.read_cursor
d['__header__'] = vds.input[header_start:header_end]
return d
def parse_Block(vds):
d = parse_BlockHeader(vds)
d['transactions'] = []
nTransactions = vds.read_compact_size()
for i in xrange(nTransactions):
d['transactions'].append(parse_Transaction(vds))
return d
|
QuantCrimAtLeeds/PredictCode
|
tests/network_test.py
|
Python
|
artistic-2.0
| 25,390
| 0.037968
|
import pytest
import unittest.mock as mock
import open_cp.network as network
import open_cp.data
import numpy as np
import datetime
def test_PlanarGraphBuilder():
b = network.PlanarGraphBuilder()
assert b.add_vertex(0.2, 0.5) == 0
b.set_vertex(5, 1, 2)
b.add_edge(0, 5)
g = b.build()
assert g.vertices == {0:(0.2,0.5), 5:(1,2)}
assert g.edges == [(0,5)]
b1 = network.PlanarGraphBuilder(g)
assert b1.add_vertex(5,6) == 6
b1.add_edge(5,6)
g1 = b1.build()
assert g1.vertices == {0:(0.2,0.5), 5:(1,2), 6:(5,6)}
assert g1.edges == [(0,5), (5,6)]
# Check haven't mutated g
assert g.vertices == {0:(0.2,0.5), 5:(1,2)}
assert g.edges == [(0,5)]
def test_PlanarGraphBuilder_remove_unused_vertices():
b = network.PlanarGraphBuilder()
b.add_vertex(0.2, 0.5)
b.add_vertex(0.6, 0.4)
b.set_vertex(5, 1, 2)
b.add_edge(0, 5)
assert len(b.vertices) == 3
b.remove_unused_vertices()
assert len(b.vertices) == 2
@pytest.fixture
def planar_graph_geo_builder():
b = network.PlanarGraphGeoBuilder()
b.add_path([(0,0),(1,1),(5.1,1.2)])
b.add_path([(2,0),(1,1),(0,5),(5.1,1.2)])
b.add_path([(0,0),(0,5)])
return b
def test_PlanarGraphGeoBuilder(planar_graph_geo_builder):
b = planar_graph_geo_builder
assert b.coord_nodes == {(0,0):[0], (1,1):[1,4], (5.1,1.2):[2],
(2,0):[3], (0,5):[5]}
assert b.edges == [(0,1), (1,2), (3,4), (4,5), (5,2), (0,5)]
def test_PlanarGraphGeoBuilder_builds(planar_graph_geo_builder):
g = planar_graph_geo_builder.build()
assert g.vertices == {0:(0,0), 1:(1,1), 2:(5.1,1.2), 3:(2,0), 4:(1,1), 5:(0,5)}
assert g.edges == [(0,1), (1,2), (3,4), (4,5), (5,2), (0,5)]
assert g.number_edges == 6
assert g.bounds == (0, 0, 5.1, 5)
@pytest.fixture
def planar_graph_node_builder():
b = network.PlanarGraphNodeBuilder()
b.add_path([(0,0),(1,1),(5.1,1.2)])
b.add_edge(0,0,2,2)
b.add_path([(1,0),(1,1),(2,2)])
return b
def test_PlanarGraphNodeBuilder(planar_graph_node_builder):
b = planar_graph_node_builder
assert b.coord_nodes == [(0,0), (1,1), (5.1,1.2), (2,2), (1,0)]
assert b.edges == [(0,1), (1,2), (0,3), (4,1), (1,3)]
def test_PlanarGraphNodeBuilder_builds(planar_graph_node_builder):
g = planar_graph_node_builder.build()
assert g.vertices == {0:(0,0), 1:(1,1), 2:(5.1,1.2), 3:(2,2), 4:(1,0)}
assert g.edges == [(0,1), (1,2), (0,3), (4,1), (1,3)]
def test_PlanarGraphNodeBuilder_tolerance():
b = network.PlanarGraphNodeBuilder()
b.tolerance = 0.2
assert b.tolerance == pytest.approx(0.2)
b.add_path([(0,0),(1,1),(5.1,1.2)])
b.add_edge(0.1,0.01,2,2)
assert b.coord_nodes == [(0,0), (1,1), (5.1,1.2), (2,2)]
assert b.edges == [(0,1), (1,2), (0,3)]
def test_PlanarGraphNodeOneShot():
nodes = [(0,0), (1,1), (5.1,1.2), (0.1,0.01), (2,2)]
b = network.PlanarGraphNodeOneShot(nodes, 0.2)
r = b.add_path([(0,0),(1,1),(5.1,1.2)])
assert r == [(0,1), (1,2)]
r = b.add_edge(0.1,0.01,2,2)
assert r == (0,3)
g = b.build()
assert set(g.vertices.values()) == {(0,0), (1,1), (5.1,1.2), (2,2)}
assert len(g.edges) == 3
assert [g.vertices[x] for x in g.edges[0]] == [(0,0), (1,1)]
assert [g.vertices[x] for x in g.edges[1]] == [(1,1), (5.1,1.2)]
assert [g.vertices[x] for x in g.edges[2]] == [(0,0), (2,2)]
def test_PlanarGraphNodeOneShot_remove_duplicates():
nodes = [(0,0), (1,1), (5.1,1.2), (0.1,0.01), (2,2)]
b = network.PlanarGraphNodeOneShot(nodes, 0.2)
b.add_path([(0,0),(1,1),(5.1,1.2)])
b.add_edge(0.1,0.01,2,2)
b.add_edge(1,1,1,1)
b.add_edge(0.1,0.01,2,2)
with pytest.raises(ValueError):
b.build()
b.remove_duplicate_edges()
g = b.build()
assert set(g.vertices.values()) == {(0,0), (1,1), (5.1,1.2), (2,2)}
assert len(g.edges) == 3
assert [g.vertices[x] for x in g.edges[0]] == [(0,0), (1,1)]
assert [g.vertices[x] for x in g.edges[1]] == [(1,1), (5.1,1.2)]
assert [g.vertices[x] for x in g.edges[2]] == [(0,0), (2,2)]
def test_PlanarGraph_constructs():
with pytest.raises(ValueError):
network.PlanarGraph([(0,1,2), (0,2,3)], [])
with pytest.raises(ValueError):
network.PlanarGraph([(0,1,2), (1,2,3)], [(0,0)])
g = network.PlanarGraph([(0,1,2), (1,2,3)], [(0,1)])
assert g.vertices == {0:(1,2), 1:(2,3)}
assert g.edges == [(0,1)]
def test_PlanarGraph_projects():
g = network.PlanarGraph([(0,1,2), (1,2,3)], [(0,1)])
def proj(x, y):
return x-1, y-2
gg = g.project(proj)
assert gg.vertices[0] == pytest.approx((0,0))
assert gg.vertices[1] == pytest.approx((1,1))
assert set(gg.vertices.keys()) == {0,1}
assert gg.edges == [(0,1)]
@pytest.fixture
def graph1():
b = network.PlanarGraphGeoBuilder()
b.add_path([(0,0), (10,0)])
b.add_path([(0,1), (5,5), (9,1)])
return b.build()
def test_derived_graph1(graph1):
g = network.to_derived_graph(graph1)
assert g.vertices == { (0,1), (2,3), (3,4) }
assert g.edges == [((2,3), (3,4))]
assert g.le
|
ngths == [pytest.approx((np.sqrt(25+16)+np.sqrt(32))/2)]
def test_shortest_edge_paths(graph1):
dists, prevs = network.shortest_edge_paths(graph1, 0)
assert dists == {0:5, 1:5}
assert prevs == {0:0, 1:1}
|
dists, prevs = network.shortest_edge_paths(graph1, 0, 0.1)
assert dists == {0:1, 1:9}
assert prevs == {0:0, 1:1}
def test_shortest_paths(graph1):
dists, prevs = network.shortest_paths(graph1, 0)
assert dists == {0:0, 1:10, 2:-1, 3:-1, 4:-1}
assert prevs == {0:0, 1:0}
dists, prevs = network.shortest_paths(graph1, 1)
assert prevs == {1:1, 0:1}
assert dists == {0:10, 1:0, 2:-1, 3:-1, 4:-1}
dists, prevs = network.shortest_paths(graph1, 2)
assert dists == {0:-1, 1:-1, 2:0,
3:pytest.approx(np.sqrt(25+16)),
4:pytest.approx(np.sqrt(25+16)+np.sqrt(32))}
assert prevs == {2:2, 3:2, 4:3}
def test_PlanarGraph_lengths(graph1):
assert graph1.length(0) == pytest.approx(10)
assert graph1.length(1) == pytest.approx(np.sqrt(25+16))
assert graph1.length(2) == pytest.approx(np.sqrt(32))
def test_PlanarGraph_as_quads(graph1):
exp = [ (0,0,10,0), (0,1,5,5), (5,5,9,1) ]
x = graph1.as_quads()
np.testing.assert_allclose(x, exp)
def test_PlanarGraph_as_lines(graph1):
exp = [ ((0,0),(10,0)), ((0,1),(5,5)), ((5,5),(9,1)) ]
x = graph1.as_lines()
np.testing.assert_allclose(x, exp)
def test_PlanarGraph_project(graph1):
edge, t = graph1.project_point_to_graph(5,1)
assert edge == (0, 1)
assert t == pytest.approx(0.5)
edge, t = graph1.project_point_to_graph(-0.5, -0.5)
assert edge == (0, 1)
assert t == 0
edge, t = graph1.project_point_to_graph(-0.1, 1)
assert edge == (2, 3)
assert t == 0
edge, t = graph1.project_point_to_graph(5, 5.2)
assert (edge, t) == ((2,3), 1) or (edge, t) == ((3,4), 0)
edge, t = graph1.project_point_to_graph(9, .4)
assert edge == (0, 1)
assert t == pytest.approx(0.9)
edge, t = graph1.project_point_to_graph(9, .6)
assert edge == (3, 4)
assert t == 1
edge, t = graph1.project_point_to_graph(2.5, 2)
assert edge == (2, 3)
assert t == pytest.approx(0.402439024)
def test_io(graph1):
js = graph1.dump_json()
import json
out = json.loads(js)
assert set(out.keys()) == {"keys", "xcoords", "ycoords", "edges"}
g = network.PlanarGraph.from_json(js)
assert network.approximately_equal(graph1, g)
b = graph1.dump_bytes()
g = network.PlanarGraph.from_bytes(b)
assert network.approximately_equal(graph1, g)
@pytest.fixture
def graph2():
b = network.PlanarGraphGeoBuilder()
b.add_path([(0,10), (1,10)])
b.add_path([(1,10), (2,11), (3, 11), (4,10)])
b.add_path([(1,10), (2,9), (3, 9), (4,10)])
b.add_path([(2,9), (2,11)])
b.add_path([(4,10), (5,10)])
return b.build()
def test_graph2(graph2):
assert graph2.vertices == {0:(0,10), 1:(1,10), 2:(2,11), 3:(3,11), 4:(4,10),
|
oleiade/Elevator
|
elevator/__init__.py
|
Python
|
mit
| 177
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
version = (
|
0, "5d")
__title__ = "Elevator"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, vers
|
ion))
|
coala-analyzer/coala-quickstart
|
tests/generation/UtilitiesTest.py
|
Python
|
agpl-3.0
| 11,382
| 0.000088
|
import inspect
import itertools
import types
import unittest
from tempfile import NamedTemporaryFile
from tests.test_bears.AllKindsOfSettingsDependentBear import (
AllKindsOfSettingsDependentBear)
from coala_quickstart.generation.Utilities import (
contained_in,
get_hashbang,
get_default_args, get_all_args,
search_for_orig, concatenate, peek,
split_by_language,
get_language_from_hashbang)
from coalib.results.SourcePosition import SourcePosition
from coalib.results.SourceRange import SourceRange
def foo():
pass
def foo_bar(n):
def bar():
return n+1
return bar
class TestAdditionalFunctions(unittest.TestCase):
def second(func):
def wrapper():
return func()
return wrapper
def first():
pass
third = second(first)
fourth = second(second(first))
def test_search_for_orig(self):
self.assertEqual(types.MethodType(search_for_orig(self.third, 'first'),
self), self.first)
self.assertEqual(types.MethodType(search_for_orig(self.fourth,
'first'),
self), self.first)
self.assertEqual(search_for_orig(self.first, 'first'), None)
self.assertEqual(search_for_orig(self.first, "bar"), None)
self.assertEqual(search_for_orig(self.first, "first"), None)
# function without closure
self.assertEqual(search_for_orig(foo, "bar"), None)
self.assertEqual(search_for_orig(foo, "foo"), None)
# function with closure
func = foo_bar(3)
self.assertEqual(search_for_orig(func, "bar"), None)
def test_get_default_args(self):
|
self.assertEqual(get_default_args(AllKindsOfSettingsDependentBear.run),
{'chars': False,
|
'dependency_results': {},
'max_line_lengths': 1000,
'no_chars': 79,
'use_spaces': None,
'use_tabs': False})
def test_get_all_args(self):
empty = inspect._empty
self.assertEqual(get_all_args(AllKindsOfSettingsDependentBear.run),
{'self': empty, 'file': empty, 'filename': empty,
'configs': empty,
'use_bears': empty, 'no_lines': empty,
'use_spaces': None,
'use_tabs': False, 'max_line_lengths': 1000,
'no_chars': 79,
'chars': False, 'dependency_results': {}})
class TestHashBang(unittest.TestCase):
def test_missing_file(self):
self.assertIsNone(get_hashbang('does_not_exist'))
def test_with_bash(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#!bin/bash\n')
temp_file.close()
self.assertEqual(get_hashbang(temp_file.name), '#!bin/bash')
def test_no_eol(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#!bin/bash')
temp_file.close()
self.assertEqual(get_hashbang(temp_file.name), '#!bin/bash')
def test_with_slash(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#!/bin/bash\n')
temp_file.close()
self.assertEqual(get_hashbang(temp_file.name), '#!/bin/bash')
def test_with_space(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#!/bin/bash \n')
temp_file.close()
self.assertEqual(get_hashbang(temp_file.name), '#!/bin/bash')
def test_env(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#!/bin/env bash\n')
temp_file.close()
self.assertEqual(get_hashbang(temp_file.name), '#!/bin/env bash')
def test_non_unicode_file(self):
with NamedTemporaryFile(mode='w+b', delete=False) as temp_file:
temp_file.write(b'\2000x80')
temp_file.close()
self.assertIsNone(get_hashbang(temp_file.name))
def test_empty_file(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('\n')
temp_file.close()
self.assertIsNone(get_hashbang(temp_file.name))
def test_no_bang(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('#bin/bash')
temp_file.close()
self.assertIsNone(get_hashbang(temp_file.name))
def test_no_hash(self):
with NamedTemporaryFile(mode='w+t', delete=False) as temp_file:
temp_file.write('!bin/bash')
temp_file.close()
self.assertIsNone(get_hashbang(temp_file.name))
def test_get_language_from_hashbang(self):
self.assertEqual(get_language_from_hashbang('#!/usr/bin/env python'),
'python')
self.assertEqual(get_language_from_hashbang('#!bin/bash'),
'bash')
self.assertEqual(get_language_from_hashbang('#!/bin/bash'),
'bash')
def test_split_by_language(self):
with NamedTemporaryFile(delete=False, suffix='.py') as temp_file1, \
NamedTemporaryFile(delete=False, suffix='.txt') as temp_file2, \
NamedTemporaryFile(delete=False, suffix='.txt') as temp_file3:
temp_file3.write(b'#!bin/python')
temp_file3.close()
langs = split_by_language(
[temp_file1.name, temp_file2.name, temp_file3.name])
self.assertCountEqual(
langs,
{
'all': [temp_file1.name, temp_file3.name],
'python': [temp_file1.name, temp_file3.name],
}
)
class TestDataStructuresOperationsFunctions(unittest.TestCase):
def test_concatenate(self):
dict1 = {'1': {'a', 'b', 'c'},
'2': {'d', 'e', 'f'},
'3': {'g', 'h', 'i'}}
dict2 = {'4': {'j', 'k', 'l'},
'2': {'m', 'n', 'o'},
'5': {'p', 'q', 'r'}}
result_dict = {'1': {'a', 'b', 'c'},
'2': {'d', 'e', 'f', 'm', 'n', 'o'},
'3': {'g', 'h', 'i'},
'4': {'j', 'k', 'l'},
'5': {'p', 'q', 'r'}}
ret_val = concatenate(dict1, dict2)
self.assertEqual(ret_val, result_dict)
def test_peek(self):
def give_gen():
for i in range(1, 5):
yield i
def give_empty_gen():
for i in range(1, 1):
yield i
obj = give_gen()
for i in range(1, 5):
num, new_obj = peek(obj)
obj, new_obj = itertools.tee(obj)
self.assertEqual(i, num)
ret_val = peek(obj)
obj = give_empty_gen()
ret_val_1 = peek(obj)
self.assertEqual(ret_val, None)
self.assertEqual(ret_val_1, None)
class TestContainedIn(unittest.TestCase):
def test_contained_in_1(self):
start = SourcePosition('a.py', line=1, column=5)
end = SourcePosition('a.py', line=5, column=1)
smaller = SourceRange(start, end)
start = SourcePosition('a.py', line=1, column=5)
end = SourcePosition('a.py', line=5, column=2)
bigger = SourceRange(start, end)
self.assertTrue(contained_in(smaller, bigger))
start = SourcePosition('a.py', line=1, column=4)
end = SourcePosition('a.py', line=5, column=1)
bigger = SourceRange(start, end)
self.assertTrue(contained_in(smaller, bigger))
start = SourcePosition('a.py', line=1, column=5)
end = SourcePosition('a.py', line=5, column=1)
bigger = SourceRange(start, end)
self.assertTrue(contained_in(smaller, bigger))
def test_contained_in_2(self):
start = Source
|
karanisverma/feature_langpop
|
librarian/utils/timer.py
|
Python
|
gpl-3.0
| 1,356
| 0
|
"""
timer.py: Request timer statistical tool
Code adapted from Bottle documentation
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free
|
software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
from __future__ import division
import time
from functools import wraps
try:
import resource
except ImportError:
# Platform does not support ``resources`` module.
resource = None
from bottle import response
def get_mem():
if not resource:
return 0
|
rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # in KB
return round(rss / 1024, 3)
def request_timer(label):
t_header = str('X-%s-Time' % label)
m_header = str('X-%s-Mem' % label)
def _timer(callback):
@wraps(callback)
def wrapper(*args, **kwargs):
start = time.time()
res = callback(*args, **kwargs)
delta = time.time() - start
response.headers[t_header] = str(
round(delta * 1000, 4)) + 'ms'
response.headers[m_header] = str(get_mem())
return res
return wrapper
return _timer
def total_timer_plugin(app):
app.install(request_timer('Total'))
def handler_timer_plugin(app):
app.install(request_timer('Handler'))
|
erigones/esdc-ce
|
gui/docs/views.py
|
Python
|
apache-2.0
| 1,344
| 0.000744
|
from django.shortcuts import render, resolve_url
from django.con
|
trib.auth.decorators import login_required
from gui.decorators import profile_required
from gui.utils import collect_view_data
from gui.signals import view_faq
from api.decorators import setting_required
@login_required
@profile_required
def api(request):
"""
API Documentation view (via iframe).
|
"""
context = collect_view_data(request, 'api_docs')
return render(request, 'gui/docs/api.html', context)
@login_required
@profile_required
def user_guide(request):
"""
User Guide view (via iframe).
"""
context = collect_view_data(request, 'user_guide')
return render(request, 'gui/docs/user_guide.html', context)
@login_required
@profile_required
@setting_required('FAQ_ENABLED', check_settings=False) # FAQ must be enabled only in DC
def faq(request):
"""
Frequently Asked Questions view.
"""
dc_settings = request.dc.settings
context = collect_view_data(request, 'faq')
context['support_email'] = dc_settings.SUPPORT_EMAIL
if dc_settings.SUPPORT_ENABLED:
context['support_section_url'] = resolve_url('add_ticket')
else:
context['support_section_url'] = '#'
view_faq.send(sender='faq', request=request, context=context)
return render(request, 'gui/docs/faq.html', context)
|
Ecotrust/F2S-MOI
|
moi/recommendations/migrations/0002_auto_20151230_0007.py
|
Python
|
apache-2.0
| 1,297
| 0.002313
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 00:07
from __future__ import unicode_literals
from django.db import migrati
|
ons, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration
|
):
dependencies = [
('wagtailimages', '0010_change_on_delete_behaviour'),
('recommendations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recommendation',
name='displayTitle',
field=wagtail.wagtailcore.fields.RichTextField(blank=True),
),
migrations.AddField(
model_name='recommendation',
name='image',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='recommendation',
name='main_content',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='recommendation',
name='sub_title',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, default=None, null=True),
),
]
|
sqor/3rdeye
|
fetch_repos.py
|
Python
|
mit
| 789
| 0.032953
|
"""
To use this, create a settings.py file and make these variables:
|
TOKEN=<oath token for github>
ORG=<your org in github>
DEST=<Path to download to>
"""
from github import Github
from subprocess import call
import os
from settings import TOKEN, ORG, DEST
def download():
"""Quick and Dirty Download all repos function"""
os.chdir(DEST)
print "Downloading to destination: ", os.getcwd()
g = Github(TOKEN)
repos = []
for repo in g.get_organization(ORG).get_repos():
print "Fetching Repo Name: %s" % repo.name
repos.append("git@github.com:%s/%s.git" %
|
(ORG, repo.name))
total = len(repos)
print "Found %s repos" % total
count = 0
for repo in repos:
count +=1
print "Cloning Repo [%s]/[%s]: %s" % (count, total, repo)
call([u'git', u'clone', repo])
download()
|
higee/project_euler
|
11-20/12.py
|
Python
|
mit
| 439
| 0.009112
|
def count_factor(n, factor=0):
for i in range(1, int(n**0.5)+1):
if n % i == 0:
factor += 2
return factor
def nth_triangular_number(n):
return int(n+(n*(n-1))/2
|
)
def find_triangular_number_over(k, n=0):
while count_factor(nth_triangular_number(n)) <= k:
n += 1
return nth_triangular_number(n)
def main():
print(find_triangular_number_over(500))
if __name__ == "__main__":
m
|
ain()
|
jmluy/xpython
|
exercises/practice/robot-simulator/robot_simulator.py
|
Python
|
mit
| 201
| 0
|
# Globals for the directions
# Change the values as you see fit
EAST
|
= None
NORTH = None
WEST =
|
None
SOUTH = None
class Robot:
def __init__(self, direction=NORTH, x_pos=0, y_pos=0):
pass
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatter/unselected/textfont/_color.py
|
Python
|
mit
| 470
| 0.002128
|
import _plotly_utils.basevalida
|
tors
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self
|
, plotly_name="color", parent_name="scatter.unselected.textfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
piller-imre/grimoire-tk
|
grimoire/document.py
|
Python
|
gpl-3.0
| 806
| 0.003722
|
"""
Document class definition
"""
class Document(object):
"""Represents a document"""
def __init__(self, id, name, type, path):
if '\n' in name:
raise ValueEr
|
ror('The document name cannot contain newline character!')
if '\n' i
|
n type:
raise ValueError('The document type cannot contain newline character!')
if '\n' in path:
raise ValueError('The document path cannot contain newline character!')
self._id = id
self._name = name
self._type = type
self._path = path
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def path(self):
return self._path
|
xenserver/xs-cbt-samples
|
cbt_import_whole_vdi.py
|
Python
|
bsd-3-clause
| 2,718
| 0.000368
|
#!/usr/bin/env python
"""
For a given vdi and import file this script will import a VDI on to a XS host.
This script needs to be run whenever you want to restore a VDI to a previous
version.
example: python cbt_import_whole_vdi.py -ip <host address> -u <host username>
-p <host password> -v <vdi uuid> -f <import VDI filename>
"""
import urllib3
import requests
import XenAPI
import argparse
def create_new_vdi(session, sr, size):
vdi_record = {
"SR": sr,
"virtual_size": size,
"type": "user",
"sharable": False,
"read_only": False,
"other_config": {},
"name_label": "CBT backup"
}
vdi_ref = session.xenapi.VDI.create(vdi_record)
vdi_uuid = session.xenapi.VDI.get_uuid(vdi_ref)
return vdi_uuid
def import_vdi(host, session_id, vdi_uuid, file_format, import_path):
url = ('https://%s/import_raw_vdi?session_id=%s&vdi=%s&format=%s'
% (host, session_id, vdi_uuid, file_format))
with open(import_path, 'r') as filehandle:
# ToDo: Security - We need to verify the SSL certificate
|
here.
# Depends on CP-23051.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
with requests.Session() as session:
request = session.put(url, filehandle, verify=False)
request.raise_for_status()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--host-ip', dest='host')
parser.add_argument('-u', '--username', dest='username')
parser.add_argument('-p', '--password', dest='password')
|
parser.add_argument('-v', '--vdi-uuid', dest='vdi_uuid')
parser.add_argument('-f', '--filename', dest='path')
parser.add_argument('--as-new-vdi', dest='new_vdi', action='store_const',
const=True, default=False,
help='Create a new VDI for the import')
args = parser.parse_args()
session = XenAPI.Session("https://" + args.host, ignore_ssl=True)
session.login_with_password(args.username, args.password, "0.1",
"CBT example")
try:
vdi_uuid = args.vdi_uuid
if args.new_vdi:
vdi_ref = session.xenapi.VDI.get_by_uuid(args.vdi_uuid)
size = session.xenapi.VDI.get_virtual_size(vdi_ref)
sr_ref = session.xenapi.VDI.get_SR(vdi_ref)
vdi_uuid = create_new_vdi(session, sr_ref, size)
import_vdi(args.host, session._session, vdi_uuid, 'raw',
args.path)
print vdi_uuid
finally:
session.xenapi.session.logout(session)
if __name__ == "__main__":
main()
|
forslund/mycroft-core
|
mycroft/util/log.py
|
Python
|
apache-2.0
| 4,287
| 0
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Mycroft Logging module.
This module provides the LOG pseudo function quickly creating a logger instance
for use.
The default log level of the logger created here can ONLY be set in
/etc/mycroft/mycroft.conf or ~/.config/mycroft/mycroft.conf
The default log level can also be programatically be changed by setting the
LOG.level parameter.
"""
import inspect
import logging
import sys
import mycroft
def getLogger(name="MYCROFT"):
"""Depreciated. Use LOG instead"""
return logging.getLogger(name)
def _make_log_method(fn):
@classmethod
def method(cls, *args, **kwargs):
cls._log(fn, *args, **kwargs)
method.__func__.__doc__ = fn.__doc__
return method
class LOG:
"""
Custom logger class that acts like logging.Logger
The logger name is automatically generated by the module of the caller
Usage:
>>> LOG.debug('My message: %s', debug_str)
13:12:43.673 - :<module>:1 - DEBUG - My message: hi
>>> LOG('custom_name').debug('Another message')
13:13:10.462 - custom_name - DEBUG - Another message
"""
_custom_name = None
handler = None
level = logging.getLevelName('INFO')
# Copy actual logging methods from logging.Logger
# Usage: LOG.debug(message)
debug = _make_log_method(logging.Logger.debug)
info = _make_log_method(logging.Logger.info)
warning = _make_log_method(logging.Logger.warning)
error = _make_log_method(logging.Logger.error)
exception = _make_log_method(logging.Logger.exception)
@classmethod
def init(cls):
""" Initializes the class, sets the default log level and creates
the required handlers.
"""
log_message_format = (
'{asctime} | {levelname:8} | {process:5} | {name} | {message}'
)
formatter = logging.Formatter(log_message_format, style='{')
formatter.default_msec_format = '%s.%03d'
|
cls.handler = logging.StreamHandler(sys.stdout)
cls.handler.setFormatter(formatter)
config = mycroft
|
.configuration.Configuration.get(cache=False,
remote=False)
if config.get('log_format'):
formatter = logging.Formatter(config.get('log_format'), style='{')
cls.handler.setFormatter(formatter)
cls.level = logging.getLevelName(config.get('log_level', 'INFO'))
# Enable logging in external modules
cls.create_logger('').setLevel(cls.level)
@classmethod
def create_logger(cls, name):
logger = logging.getLogger(name)
logger.propagate = False
logger.addHandler(cls.handler)
return logger
def __init__(self, name):
LOG._custom_name = name
@classmethod
def _log(cls, func, *args, **kwargs):
if cls._custom_name is not None:
name = cls._custom_name
cls._custom_name = None
else:
# Stack:
# [0] - _log()
# [1] - debug(), info(), warning(), or error()
# [2] - caller
try:
stack = inspect.stack()
# Record:
# [0] - frame object
# [1] - filename
# [2] - line number
# [3] - function
# ...
record = stack[2]
mod = inspect.getmodule(record[0])
module_name = mod.__name__ if mod else ''
name = module_name + ':' + record[3] + ':' + str(record[2])
except Exception:
# The location couldn't be determined
name = 'Mycroft'
func(cls.create_logger(name), *args, **kwargs)
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/aio/_configuration.py
|
Python
|
mit
| 3,315
| 0.004223
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class WebSiteManagementClientConfiguration(Configuration):
"""Configuration for WebSiteManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(WebSiteManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-09-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-web/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
|
self.redirect_policy = kwargs.get('redirect_poli
|
cy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
30loops/libthirty
|
libthirty/documents.py
|
Python
|
bsd-3-clause
| 6,039
| 0.001325
|
from docar import Document, Collection
from docar import fields
from docar.backends.http import HttpBackendManager
from libthirty.state import uri, app_uri, service_uri, resource_collection_uri
from libthirty.validators import naming, max_25_chars, naming_with_dashes
import os
HttpBackendManager.SSL_CERT = os.path.join(
os.path.dirname(__file__), "ssl", "StartSSL_CA.pem")
class User(Document):
|
username = fields.StringField(validators=[naming, max_25_chars])
email = fields.StringField()
is_active = fields.Boolea
|
nField()
class Account(Document):
name = fields.StringField(validators=[naming, max_25_chars])
#users = fields.CollectionField(User)
class Meta:
backend_type = 'http'
identifier = 'name'
class CnameRecord(Document):
record = fields.StringField()
class Meta:
backend_type = 'http'
identifier = 'record'
class CnameRecords(Collection):
document = CnameRecord
class EnvironmentVariable(Document):
id = fields.NumberField(render=False, optional=True)
name = fields.StringField()
value = fields.StringField()
class Meta:
backend_type = 'http'
class EnvironmentVariables(Collection):
document = EnvironmentVariable
class Postgres(Document):
name = fields.StringField(validators=[naming_with_dashes, max_25_chars],
read_only=True, optional=True)
label = fields.StaticField(value="postgres")
variant = fields.ChoicesField(choices=['postgres_micro'],
default="postgres_micro")
username = fields.StringField(optional=True, read_only=True)
password = fields.StringField(optional=True, read_only=True)
host = fields.StringField(optional=True, read_only=True)
port = fields.NumberField(optional=True, read_only=True)
published = fields.BooleanField(default=False, read_only=True)
class Meta:
backend_type = 'http'
identifier = 'name'
context = ['account']
def post_uri(self):
return '%s/services' % app_uri()
def uri(self):
return service_uri(service='postgres')
class PostgresCollection(Collection):
document = Postgres
def uri(self):
return resource_collection_uri(label='postgres')
class Mongodb(Document):
name = fields.StringField(validators=[naming_with_dashes, max_25_chars],
read_only=True, optional=True)
label = fields.StaticField(value="mongodb")
variant = fields.ChoicesField(choices=['mongodb_micro'],
default='mongodb_micro')
username = fields.StringField(optional=True, read_only=True)
password = fields.StringField(optional=True, read_only=True)
host = fields.StringField(optional=True, read_only=True)
port = fields.NumberField(optional=True, read_only=True)
published = fields.BooleanField(default=False, read_only=True)
class Meta:
backend_type = 'http'
identifier = 'name'
context = ['account']
def post_uri(self):
return '%s/services' % app_uri()
def uri(self):
return service_uri(service='mongodb')
class MongodbCollection(Collection):
document = Mongodb
def uri(self):
return resource_collection_uri(label='mongodb')
class Repository(Document):
name = fields.StringField(validators=[naming_with_dashes, max_25_chars],
read_only=True, optional=True, render=False)
label = fields.StaticField(value="repository")
variant = fields.ChoicesField(choices=['git'], default='git')
location = fields.StringField()
ssh_key = fields.StringField(optional=True)
class Meta:
backend_type = 'http'
identifier = 'name'
context = ['account']
def post_uri(self):
return '%s/services' % app_uri()
def uri(self):
return service_uri(service='repository')
class RepositoryCollection(Collection):
document = Repository
def uri(self):
return resource_collection_uri(label='repository')
class Worker(Document):
name = fields.StringField(validators=[naming_with_dashes, max_25_chars],
read_only=True, render=False, optional=True)
label = fields.StaticField(value="worker")
variant = fields.ChoicesField(choices=['python'], default='python')
instances = fields.NumberField(default=1)
published = fields.BooleanField(default=False, read_only=True)
envvars = fields.CollectionField(EnvironmentVariables, inline=True)
class Meta:
backend_type = 'http'
identifier = 'name'
context = ['account']
def post_uri(self):
return '%s/services' % app_uri()
def uri(self):
return service_uri(service='worker')
class WorkerCollection(Collection):
document = Worker
def uri(self):
return resource_collection_uri(label='worker')
class App(Document):
name = fields.StringField(validators=[naming, max_25_chars])
label = fields.StaticField(value="app")
variant = fields.ChoicesField(default='python',
choices=['static', 'python'])
repository = fields.ForeignDocument(Repository)
postgres = fields.ForeignDocument(Postgres, optional=True)
mongodb = fields.ForeignDocument(Mongodb, optional=True)
worker = fields.ForeignDocument(Worker, optional=True)
repo_commit = fields.StringField(default='HEAD')
region = fields.ChoicesField(default="eu-nl", choices=['eu-nl', 'ams1'])
instances = fields.NumberField(default=1)
dns_record = fields.StringField(optional=True)
cnames = fields.CollectionField(CnameRecords, inline=True)
published = fields.BooleanField(default=False, read_only=True)
envvars = fields.CollectionField(EnvironmentVariables, inline=True)
class Meta:
backend_type = 'http'
identifier = 'name'
context = ['account']
def post_uri(self):
return '%s/apps' % uri()
def uri(self):
return app_uri(appname=self.name)
class AppCollection(Collection):
document = App
def uri(self):
return '%s/apps' % uri()
|
LegNeato/buck
|
scripts/migrations/include_def.py
|
Python
|
apache-2.0
| 1,059
| 0.000944
|
import ast
import label
import repository
import os
class IncludeDef:
"""
Represents build file include definition like
include_defs("//inclu
|
de/path").
"""
def __init__(self, ast_call: ast.Call) -> None:
self.ast_call = ast_call
def get_location(self) -> str:
"""
Returns an include definition location.
For include_defs("//include/path") it is "//include/path".
"""
return self.ast_call.args[0].s
def get_label(self) -> label.Label:
|
"""Returns a label identifying a build extension file."""
return label.from_string(self.get_location())
def get_include_path(self, repo: repository.Repository):
"""Returns a path to a file from which symbols should be imported."""
l = self.get_label()
return os.path.join(repo.get_cell_path(l.cell), l.package)
def from_ast_call(ast_call: ast.Call) -> IncludeDef:
"""
IncludeDef factory method that creates instances from ast Call description.
"""
return IncludeDef(ast_call)
|
mapossum/SeymourSolo
|
tester.py
|
Python
|
gpl-3.0
| 495
| 0.006061
|
import
|
relayManager
import dronekit
class ShotManager():
def __init__(self):
# see the shotlist in app/shots/shots.p
print "init"
def Start(self, vehicle):
self.vehicle = vehicle
# Initialize relayManager
self.relayManager = relayManager.RelayManager(self)
target = 'udp:127.0.0.1:14551' #'tcp:127.0.0.1:5760'
print 'Connecting to ' + target + '...'
vehicle = dronekit.connect(target, wait_ready=True)
sm = ShotManager()
sm.Start(vehicle
|
)
|
Freso/listenbrainz-server
|
listenbrainz/listenstore/tests/test_redislistenstore.py
|
Python
|
gpl-2.0
| 4,058
| 0.002957
|
# coding=utf-8
import datetime
import logging
import time
import uuid
from dateutil.relativedelta import relativedelta
from redis.connection import Connection
import listenbrainz.db.user as db_user
from listenbrainz.db.testing import DatabaseTestCase
from listenbrainz import config
from listenbrainz.listen import Listen
from listenbrainz.webserver.redis_connection import init_redis_connection
from listenbrainz.listenstore.redis_listenstore import RedisListenStore
class RedisListenStoreTestCase(DatabaseTestCase):
def setUp(self):
super(RedisListenStoreTestCase, self).setUp()
self.log = logging.getLogger()
# TODO: Ideally this would use a config from a flask app, but this test case doesn't create an app
self._redis = init_redis_connection(self.log, config.REDIS_HOST, config.REDIS_PORT, config.REDIS_NAMESPACE)
self.testuser = db_user.get_or_create(1, "test")
def tearDown(self):
self._redis.redis.flushdb()
Connection(self._redis.redis).disconnect()
super(RedisListenStoreTestCase, self).tearDown()
def test_get_and_put_playing_now(self):
listen = {
'user_id': self.testuser['id']
|
,
'user_name': self.testuser['musicbrainz_id'],
'listened_at': int(time.time()),
'track_metadata': {
'artist_name': 'The Strokes',
'track_name': 'Call It Fate, Call It Karma',
'additional_info': {},
|
},
}
self._redis.put_playing_now(listen['user_id'], listen, config.PLAYING_NOW_MAX_DURATION)
playing_now = self._redis.get_playing_now(listen['user_id'])
self.assertIsNotNone(playing_now)
self.assertIsInstance(playing_now, Listen)
self.assertEqual(playing_now.data['artist_name'], 'The Strokes')
self.assertEqual(playing_now.data['track_name'], 'Call It Fate, Call It Karma')
def test_update_and_get_recent_listens(self):
recent = self._redis.get_recent_listens()
self.assertEqual(recent, [])
listens = []
t = int(time.time())
for i in range(RedisListenStore.RECENT_LISTENS_MAX * 3):
listen = Listen(user_id=self.testuser['id'],
user_name = self.testuser['musicbrainz_id'],
timestamp = t - i,
data = {
'artist_name': str(uuid.uuid4()),
'track_name': str(uuid.uuid4()),
'additional_info': {},
}
)
listens.append(listen)
self._redis.update_recent_listens(listens)
recent = self._redis.get_recent_listens()
self.assertEqual(len(recent), RedisListenStore.RECENT_LISTENS_MAX)
self.assertIsInstance(recent[0], Listen)
for i, r in enumerate(recent):
self.assertEqual(r.timestamp, listens[i].timestamp)
recent = self._redis.get_recent_listens(5)
self.assertEqual(len(recent), 5)
for i, r in enumerate(recent):
self.assertEqual(r.timestamp, listens[i].timestamp)
def test_incr_listen_count_for_day(self):
today = datetime.datetime.utcnow()
# get without setting any value, should return None
self.assertIsNone(self._redis.get_listen_count_for_day(today))
# set a value to a key that doesn't exists
self._redis.increment_listen_count_for_day(today, 2)
self.assertEqual(2, self._redis.get_listen_count_for_day(today))
# increment again
self._redis.increment_listen_count_for_day(today, 3)
self.assertEqual(5, self._redis.get_listen_count_for_day(today))
# check for a different day
yesterday = today - relativedelta(days=1)
self.assertIsNone(self._redis.get_listen_count_for_day(yesterday))
self._redis.increment_listen_count_for_day(yesterday, 2)
self.assertEqual(2, self._redis.get_listen_count_for_day(yesterday))
|
loosecannon93/chittyrc
|
tests/chirc/tests/common.py
|
Python
|
apache-2.0
| 28,644
| 0.031804
|
import subprocess
import tempfile
import random
import os
import shutil
import re
import chirc.replies as replies
from chirc.client import ChircClient
from chirc.types import ReplyTimeoutException
import pytest
import time
class IRCSession():
def __init__(self, chirc_exe = None, msg_timeout = 0.1, randomize_ports = False,
default_port = None, loglevel = -1, debug = False):
if chirc_exe is None:
self.chirc_exe = "../chirc"
else:
self.chirc_exe = chirc_exe
if not (os.path.exists(self.chirc_exe) and os.path.isfile(self.chirc_exe) and os.access(self.chirc_exe, os.X_OK)):
raise RuntimeError("{} does not exist or it is not executable".format(self.chirc_exe))
if default_port is None:
self.default_port = 7776
else:
self.default_port = default_port
self.msg_timeout = msg_timeout
self.randomize_ports = randomize_ports
self.loglevel = loglevel
self.debug = debug
self.oper_password = "foobar"
# Testing functions
def _assert_equals(self, a, b, explanation, irc_msg = None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a == b, explanation
def _assert_is_none(self, a, explanation, irc_msg = None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a is None, explanation
def _assert_is_not_none(self, a, explanation, irc_msg = None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a is not None, explanation
def _assert_in(self, x, l, explanation, irc_msg = None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert x in l, explanation
# Start/end IRC session
def start_session(self):
self.tmpdir = tempfile.mkdtemp()
if self.randomize_ports:
self.port = random.randint(10000,60000)
else:
self.port = self.default_port
if self.randomize_ports:
tries = 10
else:
tries = 1
while tries > 0:
chirc_cmd = [os.path.abspath(self.chirc_exe), "-p", str(self.port), "-o", self.oper_password]
if self.loglevel == -1:
chirc_cmd.append("-q")
elif self.loglevel == 1:
chirc_cmd.append("-v")
elif self.loglevel == 2:
chirc_cmd.append("-vv")
self.chirc_proc = subprocess.Popen(chirc_cmd, cwd = self.tmpdir)
time.sleep(0.01)
rc = self.chirc_proc.poll()
if rc != None:
tries -=1
if tries == 0:
pytest.fail("chirc process failed to start. rc = %i" % rc)
else:
if self.randomize_ports:
self.port = random.randint(10000,60000)
else:
break
self.clients = []
def end_session(self):
for c in self.clients:
self.disconnect_client(c)
rc = self.chirc_proc.poll()
if rc is not None:
if rc != 0:
shutil.rmtree(self.tmpdir)
pytest.fail("chirc process failed during test. rc = %i" % rc)
else:
self.chirc_proc.kill()
self.chirc_proc.wait()
shutil.rmtree(self.tmpdir)
# Client connect/disconnect
def get_client(self, nodelay = False):
c = ChircClient(msg_timeout = self.msg_timeout, port=self.port, nodelay = nodelay)
self.clients.append(c)
return c
def disconnect_client(self, c):
c.disconnect()
self.clients.remove(c)
def connect_user(self, nick, username):
client = self.get_client()
client.send_cmd("NICK %s" % nick)
client.send_cmd("USER %s * * :%s" % (nick, username))
self.verify_welcome_messages(client, nick)
self.verify_lusers(client, nick)
self.verify_motd(client, nick)
return client
def connect_clients(self, numclients, join_channel = None):
clients = []
for i in range(numclients):
nick = "user%i" % (i+1)
username = "User %s" % nick
client = self.connect_user(nick, username)
clients.append( (nick, client) )
if join_channel != None:
self.join_channel(clients, join_channel)
return clients
def connect_and_join_channels(self, channels, aways = [], ircops = [], test_names = False):
users = {}
if None in channels:
for user in channels[None]:
if user not in users:
client = self.connect_user(user, user)
users[user] = client
channelsl = sorted([k for k in channels.keys() if k is not None])
for channel in channelsl:
channelusers = channels[channel]
joined = []
joinedp = []
op = channelusers[0][1:]
if op not in users:
client = self.connect_user(op, op)
users[op] = client
if test_names:
expect_names = [channelusers[0]]
else:
expect_names = None
users[op].send_cmd("JOIN %s" % channel)
self.verify_join(users[op], op, channel, expect_names = expect_names)
joined.append(op)
joinedp.append(channelusers[0])
for user in channelusers[1:]:
if user[0] in ("@", "+"):
nick = user[1:]
else:
nick = user
if nick not in users:
client = self.connect_user(nick, nick)
users[nick] = client
if test_names:
expect_names = joinedp + [nick]
else:
expect_names = None
users[nick].send_cmd("JOIN %s" % channel)
self.verify_join(users[nick], nick, channel, expect_names = expect_names)
for user2 in joined:
self.verify_relayed_join(users[user2], from_nick = None, channel=channel)
joined.append(nick)
joinedp.append(user)
if user[0] in ("@","+"):
if user[0] == "@":
mode = "+o"
elif
|
user[0] == "
|
+":
mode = "+v"
self.set_channel_mode(users[op], op, channel, mode, nick)
for user2 in joined:
self.verify_relayed_mode(users[user2], from_nick=op, channel=channel, mode=mode, mode_nick=nick)
for user in aways:
users[user].send_cmd("AWAY :I'm away")
self.get_reply(users[user], expect_code = replies.RPL_NOWAWAY, expect_nick = user,
expect_nparams = 1, long_param_re = "You have been marked as being away")
for user in ircops:
users[user].send_cmd("OPER %s %s" % (user, self.oper_password))
self.get_reply(users[user], expect_code = replies.RPL_YOUREOPER, expect_nick = user,
expect_nparams = 1, long_param_re = "You are now an IRC operator")
return users
# IRC actions
def join_channel(self, clients, channel)
|
alexbruy/QGIS
|
python/plugins/processing/gui/FileSelectionPanel.py
|
Python
|
gpl-2.0
| 3,127
| 0.00064
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
FileSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
*
|
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public L
|
icense as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QFileDialog
from qgis.PyQt.QtCore import QSettings
from processing.tools.system import isWindows
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class FileSelectionPanel(BASE, WIDGET):
def __init__(self, isFolder, ext=None):
super(FileSelectionPanel, self).__init__(None)
self.setupUi(self)
self.ext = ext or '*'
self.isFolder = isFolder
self.btnSelect.clicked.connect(self.showSelectionDialog)
def showSelectionDialog(self):
# Find the file dialog's working directory
settings = QSettings()
text = self.leText.text()
if os.path.isdir(text):
path = text
elif os.path.isdir(os.path.dirname(text)):
path = os.path.dirname(text)
elif settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
if self.isFolder:
folder = QFileDialog.getExistingDirectory(self,
self.tr('Select folder'), path)
if folder:
self.leText.setText(folder)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(folder))
else:
filenames = QFileDialog.getOpenFileNames(self,
self.tr('Select file'), path, '*.' + self.ext)
if filenames:
self.leText.setText(u';'.join(filenames))
settings.setValue('/Processing/LastInputPath',
os.path.dirname(filenames[0]))
def getValue(self):
s = self.leText.text()
if isWindows():
s = s.replace('\\', '/')
return s
def setText(self, text):
self.leText.setText(text)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/title/font/_family.py
|
Python
|
mit
| 558
| 0
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
pare
|
nt_name="scatter3d.marker.colorbar.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=
|
parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
|
thinkle/gourmet
|
gourmet/plugins/import_export/gxml_plugin/gxml_exporter_plugin.py
|
Python
|
gpl-2.0
| 2,889
| 0.014192
|
import re
from gourmet.plugin import ExporterPlugin
from gourmet.convert import seconds_to_timestring, float_to_frac
from . import gxml2_exporter
from gettext import gettext as _
GXML = _('Gourmet XML File')
class GourmetExportChecker:
def check_rec (self, rec, file):
self.txt = file.read()
self.rec = rec
self.check_attrs()
def check_attrs (self):
for attr in ['title','cuisine',
'source','link']:
if getattr(self.rec,attr):
assert re.search(r'<%(attr)s>\s*%(val)s\s*</%(attr)s>'%{
'attr':attr,
'val':getattr(self.rec,attr)
},
self.txt), \
'Did not find %s value %s'%(attr,getattr(self.rec,attr))
if self.re
|
c.yields:
assert re.search(r'<yields>\s*%s\s*%s\s*</yields>'%(
sel
|
f.rec.yields,
self.rec.yield_unit),
self.txt) or \
re.search(r'<yields>\s*%s\s*%s\s*</yields>'%(
float_to_frac(self.rec.yields),
self.rec.yield_unit),
self.txt), \
'Did not find yields value %s %s'%(self.rec.yields,
self.rec.yield_unit)
for att in ['preptime','cooktime']:
if getattr(self.rec,att):
tstr = seconds_to_timestring(getattr(self.rec,att))
assert re.search(r'<%(att)s>\s*%(tstr)s\s*</%(att)s>'%locals(),self.txt),\
'Did not find %s value %s'%(att,tstr)
class GourmetExporterPlugin (ExporterPlugin):
label = _('Gourmet XML Export')
sublabel = _('Exporting recipes to Gourmet XML file %(file)s.')
single_completed_string = _('Recipe saved in Gourmet XML file %(file)s.'),
filetype_desc = GXML
saveas_filters = [GXML,['text/xml'],['*.grmt','*.xml','*.XML']]
saveas_single_filters = saveas_filters
def get_multiple_exporter (self, args):
return gxml2_exporter.recipe_table_to_xml(
args['rd'],
args['rv'],
args['file'],
)
def do_single_export (self, args) :
gxml2_exporter.recipe_table_to_xml(args['rd'],
[args['rec']],
args['out'],
change_units=args['change_units'],
mult=args['mult']
).run()
def run_extra_prefs_dialog (self):
pass
def check_export (self, rec, file):
gec = GourmetExportChecker()
gec.check_rec(rec,file)
|
fajoy/horizon-example
|
openstack_dashboard/dashboards/project/instances/urls.py
|
Python
|
apache-2.0
| 1,941
| 0.000515
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from openstack_dashboard.dashboards.project.instances.views import DetailView
from openstack_dashboard.dashboards.project.instances.views import IndexView
from openstack_dashboard.dashboards.project.instances.views import \
LaunchInstanceView
from openstack_dashboard.dashboards.project.instances.views import ResizeView
from openstack_dashboard.dashboards.project.instances.views import UpdateView
INSTANCES = r'^(?P<instance_id>[^/]+)/%s$'
VIEW_MOD = 'open
|
stack_dashboard.dashboards.project.instances.views'
urlpatterns = patterns(VIEW_MOD,
url(r'^$', IndexVi
|
ew.as_view(), name='index'),
url(r'^launch$', LaunchInstanceView.as_view(), name='launch'),
url(r'^(?P<instance_id>[^/]+)/$', DetailView.as_view(), name='detail'),
url(INSTANCES % 'update', UpdateView.as_view(), name='update'),
url(INSTANCES % 'console', 'console', name='console'),
url(INSTANCES % 'vnc', 'vnc', name='vnc'),
url(INSTANCES % 'spice', 'spice', name='spice'),
url(INSTANCES % 'resize', ResizeView.as_view(), name='resize'),
)
|
jazinga/psutil
|
examples/process_detail.py
|
Python
|
bsd-3-clause
| 3,753
| 0.00373
|
#!/usr/bin/env python
# $Id$
"""
Print detailed information about a process.
"""
import os
import datetime
import socket
import sys
import psutil
from psutil._compat import namedtuple
def convert_bytes(n):
if n == 0:
return '0B'
symbols = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
def print_(a, b):
if sys.stdout.isatty():
fmt = '\x1b[1;32m%-17s\x1b[0m %s' %(a, b)
else:
fmt = '%-15s %s' %(a, b)
print fmt
def run(pid):
p = psutil.Process(pid)
if p.parent:
parent = '(%s)' % p.parent.name
else:
parent = ''
started = datetime.datetime.fromtimestamp(p.create_time).strftime('%Y-%M-%d %H:%M')
io = p.get_io_counters()
mem = p.get_memory_info()
mem = '%s%% (resident=%s, virtual=%s) ' %(round(p.get_memory_percent(), 1),
convert_bytes(mem.rss),
convert_bytes(mem.vms))
cpu_times = p.get_cpu_times()
cpu_percent = p.get_cpu_percent(0)
children = p.get_children()
files = p.get_open_files()
threads = p.get_threads()
connections = p.get_connections()
print_('pid', p.pid)
print_('name', p.name)
print_('exe', p.exe)
print_('parent', '%s %s' % (p.ppid, parent))
print_('cmdline', ' '.join(p.cmdline))
print_('started', started)
print_('user', p.username)
if os.name == 'posix':
print_('uids', 'real=%s, effective=%s, saved=%s' % p.uids)
print_('gids', 'real=%s, effective=%s, saved=%s' % p.gids)
print_('terminal', p.terminal or '')
if hasattr(p, 'getcwd'):
print_('cwd', p.getcwd())
print_('memory', mem)
print_('cpu', '%s%% (user=%s, system=%s)' % (cpu_percent,
cpu_times.user,
cpu_times.system))
print_('status', p.status)
print_('niceness', p.nice)
print_('num threads', p.get_num_threads())
if hasattr(p, 'get_io_counters'):
print_('I/O', 'bytes-read=%s, bytes-written=%s' % \
(convert_bytes(io.read_bytes),
convert_bytes(io.write_bytes)))
if children:
print_('children', '')
for child in children:
print_('', 'pid=%s name=%s' % (child.pid, child.name))
if files:
print_('open files', '')
for file in files:
print_('', 'fd=%s %s ' % (file.fd, file.path))
if threads:
print_('running threads', '')
for thread in threads:
print_('', 'id=%s, user-time=%s, sys-time=%s' \
% (thread.id, thread.user_time, thread.system_time))
if connections:
print_('open connections', '')
for conn in connections:
type = 'TCP' if conn.type == socket.SOCK_STREAM else 'UDP'
|
lip, lport = conn.local_address
if not conn.remote_address:
rip, rport = '*', '*'
else:
rip, rport = conn.remote_address
print_('', '%s:%s -> %s:%s type=%s status=%s' \
% (lip, lport, rip, rport, type, conn.status))
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv
|
) == 1:
sys.exit(run(os.getpid()))
elif len(argv) == 2:
sys.exit(run(int(argv[1])))
else:
sys.exit('usage: %s [pid]' % __file__)
if __name__ == '__main__':
sys.exit(main())
|
mekkablue/Glyphs-Scripts
|
Guides/Guides through All Selected Nodes.py
|
Python
|
apache-2.0
| 2,786
| 0.037688
|
#MenuTitle: Guides through All Selected Nodes
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Creates guides through all selected nodes.
"""
from Foundation import NSPoint
import math
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def angle( firstPoint, secondPoint ):
"""
Returns the angle (in deg
|
rees) of the straight line between firstPoint and secondPoint,
0 degrees being the second point to the right of first point.
firstPoint, secondPoint: must be NSPoint or GSNode
"""
|
xDiff = secondPoint.x - firstPoint.x
yDiff = secondPoint.y - firstPoint.y
return math.degrees(math.atan2(yDiff,xDiff))
def newGuide( position, angle=0 ):
try:
# GLYPHS 3
newGuide = GSGuide()
except:
# GLYPHS 2
newGuide = GSGuideLine()
newGuide.position = position
newGuide.angle = angle
return newGuide
def isThereAlreadyAGuideWithTheseProperties(thisLayer,guideposition,guideangle):
if guideangle < 0:
guideangle += 180
if guideangle > 180:
guideangle -= 180
for thisGuide in thisLayer.guides:
thisAngle = thisGuide.angle
if thisAngle < 0:
thisAngle += 180
if thisAngle > 180:
thisAngle -= 180
if abs(thisAngle - guideangle) < 0.01 and abs(thisGuide.position.x - guideposition.x) < 0.01 and abs(thisGuide.position.y - guideposition.y) < 0.01:
return True
return False
if len(selectedLayers) == 1:
thisLayer = selectedLayers[0]
thisGlyph = thisLayer.parent
currentPointSelection = [point.position for point in thisLayer.selection if type(point) in (GSNode,GSAnchor)]
# thisGlyph.beginUndo() # undo grouping causes crashes
try:
if len(currentPointSelection) > 1:
# clear selection:
thisLayer.clearSelection()
currentPointSelection.append(currentPointSelection[0])
for i,j in enumerate(range(1,len(currentPointSelection))):
point1 = currentPointSelection[i]
point2 = currentPointSelection[j]
angleBetweenPoints = angle(point1,point2)
middlePoint = addPoints(point1,point2)
middlePoint.x *= 0.5
middlePoint.y *= 0.5
# create guide and add it to layer:
if not isThereAlreadyAGuideWithTheseProperties(thisLayer, middlePoint, angleBetweenPoints):
guideBetweenPoints = newGuide(middlePoint, angleBetweenPoints)
thisLayer.guides.append( guideBetweenPoints )
# select it:
thisLayer.selection.append(guideBetweenPoints)
elif len(currentPointSelection) == 1:
point = currentPointSelection[0]
guide = newGuide(point)
thisLayer.guides.append(guide)
# select only guide:
thisLayer.clearSelection()
thisLayer.selection.append(guide)
except Exception as e:
raise e
# finally:
# thisGlyph.endUndo() # undo grouping causes crashes
|
rs2/pandas
|
pandas/core/groupby/base.py
|
Python
|
bsd-3-clause
| 3,488
| 0.000573
|
"""
Provide basic components for groupby. These definitions
hold the allowlist of methods that
|
are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
from __future__ import annotations
impor
|
t dataclasses
from typing import Hashable
@dataclasses.dataclass(order=True, frozen=True)
class OutputKey:
label: Hashable
position: int
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
plotting_methods = frozenset(["plot", "hist"])
common_apply_allowlist = (
frozenset(
[
"quantile",
"fillna",
"mad",
"take",
"idxmax",
"idxmin",
"tshift",
"skew",
"corr",
"cov",
"diff",
]
)
| plotting_methods
)
series_apply_allowlist: frozenset[str] = (
common_apply_allowlist
| frozenset(
{"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"}
)
) | frozenset(["dtype", "unique"])
dataframe_apply_allowlist: frozenset[str] = common_apply_allowlist | frozenset(
["dtypes", "corrwith"]
)
# cythonized transformations or canned "agg+broadcast", which do not
# require postprocessing of the result by transform.
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
# List of aggregation/reduction functions.
# These map each group to a single numeric value
reduction_kernels = frozenset(
[
"all",
"any",
"corrwith",
"count",
"first",
"idxmax",
"idxmin",
"last",
"mad",
"max",
"mean",
"median",
"min",
"ngroup",
"nth",
"nunique",
"prod",
# as long as `quantile`'s signature accepts only
# a single quantile value, it's a reduction.
# GH#27526 might change that.
"quantile",
"sem",
"size",
"skew",
"std",
"sum",
"var",
]
)
# List of transformation functions.
# a transformation is a function that, for each group,
# produces a result that has the same shape as the group.
transformation_kernels = frozenset(
[
"backfill",
"bfill",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"diff",
"ffill",
"fillna",
"pad",
"pct_change",
"rank",
"shift",
"tshift",
]
)
# these are all the public methods on Grouper which don't belong
# in either of the above lists
groupby_other_methods = frozenset(
[
"agg",
"aggregate",
"apply",
"boxplot",
# corr and cov return ngroups*ncolumns rows, so they
# are neither a transformation nor a reduction
"corr",
"cov",
"describe",
"dtypes",
"expanding",
"ewm",
"filter",
"get_group",
"groups",
"head",
"hist",
"indices",
"ndim",
"ngroups",
"ohlc",
"pipe",
"plot",
"resample",
"rolling",
"tail",
"take",
"transform",
"sample",
]
)
# Valid values of `name` for `groupby.transform(name)`
# NOTE: do NOT edit this directly. New additions should be inserted
# into the appropriate list above.
transform_kernel_allowlist = reduction_kernels | transformation_kernels
|
qedsoftware/commcare-hq
|
corehq/apps/style/forms/widgets.py
|
Python
|
bsd-3-clause
| 10,116
| 0.00257
|
import collections
from django import forms
from django.forms.fields import MultiValueField, CharField
from django.forms.utils import flatatt
from django.forms.widgets import (
CheckboxInput,
Input,
RadioChoiceInput,
RadioSelect,
RadioFieldRenderer,
TextInput,
MultiWidget,
Widget,
)
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
import json
from django.utils.translation import ugettext_noop
from dimagi.utils.dates import DateSpan
class BootstrapCheckboxInput(CheckboxInput):
def __init__(self, attrs=None, check_test=bool, inline_label=""):
super(BootstrapCheckboxInput, self).__init__(attrs, check_test)
self.inline_label = inline_label
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
try:
result = self.check_test(value)
except: # Silently catch exceptions
result = False
if result:
final_attrs['checked'] = 'checked'
if value not in ('', True, False, None):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<label class="checkbox"><input%s /> %s</label>' %
(flatatt(final_attrs), self.inline_label))
class BootstrapRadioInput(RadioChoiceInput):
def __unicode__(self):
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return mark_safe(u'<label class="radio"%s>%s %s</label>' % (label_for, self.tag(), choice_label))
class BootstrapRadioFieldRenderer(RadioFieldRenderer):
def render(self):
return mark_safe(u'\n'.join([u'%s'
% force_unicode(w) for w in self]))
def __iter__(self):
for i, choice in enumerate(self.choices):
yield BootstrapRadioInput(self.name, self.value, self.attrs.copy(), choice, i)
class BootstrapRadioSelect(RadioSelect):
renderer = BootstrapRadioFieldRenderer
class BootstrapAddressField(MultiValueField):
"""
The original for this was found here:
http://stackoverflow.com/questions/7437108/saving-a-form-model-with-using-multiwidget-and-a-multivaluefield
"""
def __init__(self,num_lines=3,*args,**kwargs):
fields = tuple([CharField(widget=TextInput(attrs={'class':'input-xxlarge'})) for _ in range(0, num_lines)])
self.widget = BootstrapAddressFieldWidget(widgets=[field.widget for field in fields])
super(BootstrapAddressField,self).__init__(fields=fields,*args,**kwargs)
def compress(self, data_list):
return data_list
class BootstrapAddressFieldWidget(MultiWidget):
def decompress(self, value):
return ['']*len(self.widgets)
def format_output(self, rendered_widgets):
lines = list()
for field in rendered_widgets:
lines.append("<p>%s</p>" % field)
return u'\n'.join(lines)
# def value_from_datadict(self, data, files, name):
# line_list = [widget.value_from_datadict(data,files,name+'_%s' %i) for i,widget in enumerate(self.widgets)]
# try:
# return line_list[0] + ' ' + line_list[1] + ' ' + line_list[2]
# except Exception:
# return ''
class BootstrapDisabledInput(Input):
input_type = 'hidden'
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
return mark_safe(u'<span class="uneditable-input %s">%s</span><input%s />' %
(attrs.get('class', ''), value, flatatt(final_attrs)))
class BootstrapPhoneNumberInput(Input):
input_type = 'text'
def render(self, name, value, attrs=None):
return mark_safe(u"""<div class="input-prepend">
<span class="add-on">+</span>%s
</div>""" % super(BootstrapPhoneNumberInput, self).render(name, value, attrs))
class AutocompleteTextarea(forms.Textarea):
"""
Textarea with auto-complete. Uses a custom extension on top of Twitter
Bootstrap's typeahead plugin.
"""
def render(self, name, value, attrs=None):
if hasattr(self, 'choices') and self.choices:
output = mark_safe("""
<script>
$(function() {
$("#%s").select2({
multiple: true,
tags: %s
});
});
</script>\n""" % (attrs['id'], json.dumps(map(lambda c: {'text': c, 'id': c}, self.choices))))
else:
output = mark_safe("")
output += super(AutocompleteTextarea, self).render(name, value,
attrs=attrs)
return output
class Select2MultipleChoiceWidget(forms.SelectMultiple):
class Media:
css = {
'all': ('select2-3.4.5-legacy/select2.css',)
}
js = ('select2-3.4.5-legacy/select2.js',)
def render(self, name, value, attrs=None, choices=()):
final_attrs = self.build_attrs(attrs)
output = super(Select2MultipleChoiceWidget, self).render(name, value, attrs, choices)
output += """
<script>
$(function() {
$('#%s').select2({ width: 'resolve' });
});
</script>
""" % final_attrs.get('id')
return mark_safe(output)
class Select2Ajax(forms.TextInput):
"""
A Select2 widget that loads its options asynchronously.
You must use `set_url()` to set the url. This will usually be done in the form's __init__() method.
The url is not specified in the form class definition because in most cases the url will be dependent on the
domain of the request.
"""
class Media:
css = {
'all': ('select2-3.5.2-legacy/select2.css', 'select2-3.5.2-legacy/select2
|
-bootstrap.css')
}
js = ('select2-3.5.2-legacy/select2.js',)
def __init__(self, attrs=None, page_size=20):
self.page_si
|
ze = page_size
super(Select2Ajax, self).__init__(attrs)
def set_url(self, url):
self.url = url
def _clean_initial(self, val):
if isinstance(val, collections.Sequence) and not isinstance(val, (str, unicode)):
return {"id": val[0], "text": val[1]}
else:
return {"id": val, "text": val}
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs)
output = super(Select2Ajax, self).render(name, value, attrs)
output += render_to_string(
'hqstyle/forms/select_2_ajax_widget.html',
{
'id': final_attrs.get('id'),
'initial': self._clean_initial(value),
'endpoint': self.url,
'page_size': self.page_size,
}
)
return mark_safe(output)
class DateRangePickerWidget(Input):
"""SUPPORTS BOOTSTRAP 3 ONLY
Extends the standard input widget to render a Date Range Picker Widget.
Documentation and Demo here: http://www.daterangepicker.com/
usage:
apply the following decorator to your view's dispatch method
@use_daterangepicker
def dispatch(self, request, *args, **kwargs):
super(self, MyView).dispatch(request, *args, **kwargs)
"""
class Range(object):
LAST_7 = 'last_7_days'
LAST_MONTH = 'last_month'
LAST_30_DAYS = 'last_30_days'
range_labels = {
Range.LAST_7: ugettext_noop('Last 7 Days'),
Range.LAST_MONTH: ugettext_noop('Last Month'),
Range.LAST_30_DAYS: ugettext_noop('Last 30 Days'),
}
separator = ugettext_noop(' to ')
def __init__(self, attrs=None, range_labels=None, separato
|
michaelhelmick/lassie
|
tests/test_open_graph.py
|
Python
|
mit
| 2,426
| 0.001237
|
import lassie
from .base import LassieBaseTestCase
class LassieOpenGraphTestCase(LassieBaseTestCase):
def test_open_graph_all_properties(self):
url = 'http://lassie.it/open_graph/all_properties.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], url)
self.assertEqual(data['title'], 'Lassie Open Graph All Properies Test')
self.assertEqual(data['description'], 'Just a test template with OG data!')
self.assertEqual(data['locale'], 'en_US')
self.assertEqual(data['site_name'], 'Lassie')
self.assertEqual(len(data['images']), 1)
image = data['images'][0]
self.assertEqual(image['src'], 'http://i.imgur.com/cvoR7zv.jpg')
self.assertEqual(image['width'], 550)
self.assertEqual(image['height'], 365)
self.assertEqual(image['type'], 'og:image')
self.assertEqual(len(data['videos']), 1)
video = data['videos'][0]
self.assertEqual(video['src'], 'http://www.youtube.com/v/dQw4w9WgXcQ?version=3&autohide=1')
self.assertEqual(video['width'], 640)
self.assertEqual(video['height'], 480)
self.assertEqual(video['type'], 'applica
|
tion/x-shockwave-flash')
def test_open_graph_no_og_title_no_og_url(self):
url = 'http://lassie.it/open_graph/no_og_title_no_og_url.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], url)
self.assertEqual(data['title'], 'Lassie Open Graph Test | No og:title, No og:url')
def test_open_graph_og_image_plus_two_body_images(self):
url = 'http://lass
|
ie.it/open_graph/og_image_plus_two_body_images.html'
data = lassie.fetch(url)
# Try without passing "all_images", then pass it
self.assertEqual(len(data['images']), 1)
data = lassie.fetch(url, all_images=True)
self.assertEqual(len(data['images']), 3)
image_0 = data['images'][0]
image_1 = data['images'][1]
image_2 = data['images'][2]
self.assertEqual(image_0['type'], 'og:image')
self.assertEqual(image_1['type'], 'body_image')
self.assertEqual(image_2['type'], 'body_image')
def test_open_graph_og_image_relative_url(self):
url = 'http://lassie.it/open_graph/og_image_relative_url.html'
data = lassie.fetch(url)
self.assertEqual(
data['images'][0]['src'], 'http://lassie.it/open_graph/name.jpg')
|
mkfifo/open-source-stats
|
scripts/gen_colour_palette.py
|
Python
|
gpl-3.0
| 6,213
| 0.002253
|
#!/usr/bin/python3
import sys
import os
# this script allows the loading of palettes from files
# when invoked you must specify a palette
# google-blue.hex
# google-light-blue.hex
# old-blue.rgb
#
# each palette must contain 10 colours for the graduations between 0 and 100 %
# and an 11th colour for the 'incomplete' (current) month
#
# a hex file must be made up with one rgb hex file per line, like so:
#
# chris@Ox1b open-source-stats(master)-> cat palettes/google-light-blue.hex
# E1F5FE
# B3E5FC
# 81D4FA
# 4FC3F7
# 29B6F6
# 03A9F4
# 039BE5
# 0288D1
# 0277BD
# 01579B
# 7C019F
#
# a rgb file takes 3 values per line (r, g and then b)
# these values must be whitespace or comma separated
# lea
|
ding and trailing whitespace are ignored:
#
# chris@Ox1b open-source-stats(master)-> cat palettes/old-blue.rgb
# 227 242 253
# 187 222 251
# 144 202 249
# 100 181 246
# 66 165 245
# 33 150 243
# 30 136 229
# 25 118 210
# 21 101 192
# 13 71 161
# 124 1 159
#
def hex_to_rgb_list(hex):
r = hex[0:2]
g = hex[2:4]
|
b = hex[4:6]
return [
"%4d" %(int(r,16)),
"%4d" %(int(g,16)),
"%4d" %(int(b,16))
]
def rgb_str_to_list(rgbstr):
parts = rgbstr.split()
out = []
for part in parts:
part = "%3d" %(int(part))
out.append(part)
print(part)
return out
def parse_palette(path):
colours = []
if not os.path.exists(path):
print("Error: failed to find palette at '" + path + "'")
exit(1)
mode = path[-3:]
if mode != 'hex' and mode != 'rgb':
print("Error: unsupported palette format '" + mode + "'")
exit(1)
lines = []
with open(path, "r") as f:
lines = f.readlines()
if len(lines) != 11:
print("Error: palette did not specify exactly 11 colours")
exit(1)
colours = []
for line in lines:
line = line.lstrip(" ")
line = line.rstrip("\n")
line = line.rstrip(" ")
line = line.replace(",", " ")
colour = None
if mode == "rgb":
colour = rgb_str_to_list(line)
elif mode == "hex":
colour = hex_to_rgb_list(line)
else:
print("Error: unsupported format")
exit(1)
colours.append(colour)
if len(colours) != 11:
print("Error: failed to parse 11 colour lines")
exit(1)
return colours
# example output:
#
# var colour_range = [
# /* 0 = 0 .. 9 % */
# [225, 245, 254]
#
# /* 1 = 10 .. 19 % */
# [179, 229, 252]
#
# /* 2 = 20 .. 29 % */
# [129, 212, 250]
#
# /* 3 = 30 .. 39 % */
# [79, 195, 247]
#
# /* 4 = 40 .. 49 % */
# [41, 182, 246]
#
# /* 5 = 50 .. 59 % */
# [3, 169, 244]
#
# /* 6 = 60 .. 69 % */
# [3, 155, 229]
#
# /* 7 = 70 .. 79 % */
# [2, 136, 209]
#
# /* 8 = 80 .. 89 % */
# [2, 119, 189]
#
# /* 9 = 90 .. 100 % */
# [1, 87, 155]
#
# ];
header = '''
/* FIXME we need to decide on these colour graduations
* the idea is:
* colour_range[0] is for 0 .. 9 %
* colour_range[1] is for 10 .. 19 %
* colour_range[2] is for 20 .. 29 %
* ...
* colour_range[8] is for 80 .. 89 %
* colour_range[9] is for 90 ..100 %
*
* currently using the light blue palette from
* http://www.google.com/design/spec/style/color.html#color-color-palette
*
* FIXME I do really like this blue...
* [ 0, 0, 255],
*/
'''
comments = [
"/* 0 = 0 .. 9 % */",
"/* 1 = 10 .. 19 % */",
"/* 2 = 20 .. 29 % */",
"/* 3 = 30 .. 39 % */",
"/* 4 = 40 .. 49 % */",
"/* 5 = 50 .. 59 % */",
"/* 6 = 60 .. 69 % */",
"/* 7 = 70 .. 79 % */",
"/* 8 = 80 .. 89 % */",
"/* 9 = 90 .. 100 % */",
"// ongoing colour is for the current month, to show that the number is not yet final",
]
footer = '''
function get_colour(percent){
var index = Math.floor(colour_range.length * percent);
index = Math.max(index, 0);
index = Math.min(index, (colour_range.length-1));
if( index in colour_range ){
return colour_range[index];
}
console.log("Error: failed to find colour")
return [0,0,0];
}
'''
def pretty_print(f, colours):
if len(colours) != 11:
print("Error: pretty_print output must be 10 elems")
exit(1)
incomplete_colour = colours[10]
lines = []
# print header
lines.append(header)
lines.append("\n")
# print open
lines.append("var colour_range = [")
for i in range(0, 10):
lines.append(" ")
lines.append(comments[i])
lines.append("\n")
lines.append(" ")
lines.append("[")
for o in colours[i]:
o += ","
lines.append(o)
lines.append("],\n")
# print close
lines.append("];\n")
lines.append("\n")
lines.append(comments[10])
lines.append("\n")
lines.append('''var colour_ongoing = [''')
for o in colours[10]:
o += ","
lines.append(o)
lines.append("];\n")
# print footer
lines.append(footer)
lines.append("\n")
f.writelines(lines)
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
print("Error: must specify palette file")
exit(1)
if len(args) > 2:
print("Error: too many args, only take palette file")
exit(1)
palette = args[1]
if not os.path.exists(palette):
expanded_palette = os.path.join("palettes", palette)
if not os.path.exists(expanded_palette):
print("Error: failed to find palette, checked at '" + palette + "' and '" + expanded_palette + "'")
print("Please see palettes/ for a list of available palettes")
exit(1)
palette = expanded_palette
colours = parse_palette(palette)
if len(colours) != 11:
print("Error: colour list must have 10 elems")
exit(1)
target = "site/colours.js"
with open(target, "w") as f:
pretty_print(f, colours)
|
stanvit/pyomapic
|
__init__.py
|
Python
|
mit
| 31
| 0.032258
|
from
|
PyOMAPIc import PyOMA
|
PIc
|
pitomba/libra
|
libra/settings.py
|
Python
|
mit
| 259
| 0
|
from settings_base import *
PORT =
|
80
SERVER_NAME = 'ht
|
tp://libra.pitomba.org:%s' % PORT
MONGODB_DATABASE_URL = "localhost"
MONGODB_DATABASE_PORT = 27017
MONGODB_DATABASE_USER = "usr_libra"
MONGODB_DATABASE_PWD = "usr_libra"
MONGODB_DATABASE_POOL_SIZE = 50
|
MyRobotLab/pyrobotlab
|
home/kwatters/harry/gestures/italianhello.py
|
Python
|
apache-2.0
| 2,293
| 0.066289
|
def italianhello():
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(105,78)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,144,60,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",0,0,0,50,82,180)
ear.pauseListening()
sleep(1)
for w in range(0,3):
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.60, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
if w==1:
i01.setHandSpeed("left", 0.60, 0.60, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.60)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.65, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(83,70)
i01.mouth.speakBlocking("ciao , il mio nome e inmoov one")
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",57,145,50,68)
i01.moveHand("left",100,90,85,80,71,15)
i01.moveHand("right",3,0,31,12,26,45)
sleep(1)
i01.moveHead(83,98)
i01.moveArm("left",78,48,37,11)
i01.moveArm("right",90,157,47,75)
i01.moveHand("left",112,111,105,102,81,10)
i01.moveHand("right",3,0,62,41,117,94)
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(79,100)
i01.m
|
oveArm("left",5,94,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",42,58,42,55,71,35)
i01.moveHand("right",81,50,82,60,105,113)
ear.res
|
umeListening()
|
jimi-c/ansible
|
lib/ansible/plugins/cliconf/junos.py
|
Python
|
gpl-3.0
| 7,917
| 0.001768
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import json
import re
from itertools import chain
from functools import wraps
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase
def configure(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self._connection.get_prompt()
if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
self.send_command('configure')
return func(self, *args, **kwargs)
return wrapped
class Cliconf(CliconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'junos'
reply = self.get(command='show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Junos: (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'Model: (\S+)', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'Hostname: (\S+)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_config(self, source='running', format='text', flags=None):
if source != 'running':
raise ValueError("fetching configuration from %s is not supported" % source)
options_values = self.get_option_values()
if format not in options_values['format']:
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
if format == 'text':
cmd = 'show configuration'
else:
cmd = 'show configuration | display %s' % format
cmd += ' '.join(to_list(flags))
cmd = cmd.strip()
return self.send_command(cmd)
@configure
def edit_config(self, candidate=None, commit=True, replace=None, comment=None):
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
resp = {}
results = []
requests = []
if replace:
candidate = 'load replace {0}'.format(replace)
for line in to_list(candidate):
if not isinstance(line, collections.Mapping):
line = {'command': line}
cmd = line['command']
results.append(self.send_command(**line))
requests.append(cmd)
diff = self.compare_configuration()
if diff:
resp['diff'] = diff
if commit:
self.commit(comment=comment)
else:
self.discard_changes()
else:
for cmd in ['top', 'exit']:
self.send_command(cmd)
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, output=None):
if output:
command = self._get_command_with_output(command, output)
return self.send_command(command, prompt=prompt, answer=answer, sendonly=sendonly)
@configure
def commit(self, comment=None, confirmed=False, at_time=None, synchronize=False):
"""
Execute commit command on remote device.
:param comment: Comment to be associated with commit
:param confirmed: Boolean flag to indicate if the previous commit should confirmed
:param at_time: Time at which to activate configuration changes
:param synchronize: Boolean flag to indicate if commit should synchronize on remote peers
:return: Command response received from device
"""
command = 'commit'
if comment:
command += ' comment {0}'.format(comment)
if confirmed:
command += ' confirmed'
if at_time:
command += ' {0}'.format(at_time)
if synchronize:
command += ' peers-synchronize'
command += ' and-quit'
return self.send_command(command)
|
@configure
def discard_changes(self):
command = 'rollback 0'
|
for cmd in chain(to_list(command), 'exit'):
self.send_command(cmd)
@configure
def validate(self):
return self.send_command('commit check')
@configure
def compare_configuration(self, rollback_id=None):
command = 'show | compare'
if rollback_id is not None:
command += ' rollback %s' % int(rollback_id)
resp = self.send_command(command)
r = resp.splitlines()
if len(r) == 1 and r[0] == '[edit]':
resp = ''
return resp
def get_diff(self, rollback_id=None):
diff = {'config_diff': None}
response = self.compare_configuration(rollback_id=rollback_id)
if response:
diff['config_diff'] = response
return diff
def get_device_operations(self):
return {
'supports_diff_replace': False,
'supports_commit': True,
'supports_rollback': True,
'supports_defaults': False,
'supports_onbox_diff': True,
'supports_commit_comment': True,
'supports_multiline_delimiter': False,
'supports_diff_match': False,
'supports_diff_ignore_lines': False,
'supports_generate_diff': False,
'supports_replace': True
}
def get_option_values(self):
return {
'format': ['text', 'set', 'xml', 'json'],
'diff_match': [],
'diff_replace': [],
'output': ['text', 'set', 'xml', 'json']
}
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'run_commands', 'compare_configuration', 'validate', 'get_diff']
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
result['device_operations'] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def _get_command_with_output(self, command, output):
options_values = self.get_option_values()
if output not in options_values['output']:
raise ValueError("'output' value %s is invalid. Valid values are %s" % (output, ','.join(options_values['output'])))
if output == 'json' and not command.endswith('| display json'):
cmd = '%s | display json' % command
elif output == 'xml' and not command.endswith('| display xml'):
cmd = '%s | display xml' % command
elif output == 'text' and (command.endswith('| display json') or command.endswith('| display xml')):
cmd = command.rsplit('|', 1)[0]
else:
cmd = command
return cmd
|
why168/PythonProjects
|
MxOnlie/extra_apps/xadmin/apps.py
|
Python
|
artistic-2.0
| 396
| 0
|
from django.apps import AppConfig
from django.core import checks
from django.utils.translation import ugettext_lazy as _
import xadmin
class XAdminConfig(AppConfig):
"""Simple AppConfig which does not do automatic discovery."""
name = '
|
xadmin'
verbose_name = _("Administration")
def ready(self):
self.module.autodiscover()
|
setattr(xadmin, 'site', xadmin.site)
|
gmjosack/modlunky
|
setup.py
|
Python
|
mit
| 1,002
| 0.001996
|
#!/usr/bin/env python
from distutils.core import setup
execfile('modlunky/version.py')
with open('requirements.txt') as requirements:
required = requirements.read().splitlines()
kwargs = {
"name": "modlunky",
"version": str(__version__),
"packages": ["modlunky"],
"scripts": ["bin/modlunky"],
"description": "Library and Command Line Tool for Spelunky.",
"au
|
thor": "Gary M. Josack",
"maintainer": "Gary M. Josack",
"author_email": "gary@byoteki.com",
"maintainer_email": "gary@byoteki.com",
"license": "MIT",
"url": "https://github.com/gmjosack/modlunky",
"download_url": "https://github.com/gmjosack/modlunky/archive/master.tar.gz",
"classifiers": [
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development ::
|
Libraries :: Python Modules",
]
}
if required:
kwargs["install_requires"] = required
setup(**kwargs)
|
bladealslayer/nettraf-scripts
|
tcp2tcp.py
|
Python
|
gpl-2.0
| 2,816
| 0.003196
|
#!/usr/bin/python
############################################################################
# tcp2tcp.py #
# v0.1 #
# #
# Copyright (C) 2011 by Boyan Tabakov #
# blade@alslayer.net #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
import sys,re
inp = sys.stdin
debug = 0
if len(sys.argv) < 2:
dbg ( "Usage: ", sys.argv[0], " <frame file>")
exit(1)
def dbg(*args):
global debug
if not debug:
return
for a in args:
print >> sys.stderr, a,
print >> sys.stderr
frames = []
f_index = 0
fr = open(sys.argv[1], 'r')
first_line = True
for line in inp:
if line == '\n' or line[0] == '#':
continue
if first_line:
first_line = False
print line[:-1] + '\tcurrent_throughput'
continue
l
|
= re.split('\t', line[:-1])
cstart = float(l[8])
cend = float(l[9])
if cend <= cstart:
continue
raise Exception('bad connection times')
cbytes = 0
fr.seek(f_index)
seeking = True
for f in fr:
ll = len(f)
f = re.split('\t', f[:-1])
ts = float(f[0])
if ts < cstart:
f_index += ll
seeking = False
continue
if ts > cend:
break
cbytes += in
|
t(f[1])
thr = float(cbytes) / (cend - cstart)
print line[:-1] + '\t' + str(thr)
|
obi-two/Rebelion
|
data/scripts/templates/object/ship/player/shared_player_z95.py
|
Python
|
mit
| 434
| 0.048387
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "ob
|
ject/ship/player/shared_player_z95.iff"
result.a
|
ttribute_template_id = -1
result.stfName("space_ship","player_z95")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
bennylope/django-lokoj
|
locations/utils.py
|
Python
|
mit
| 5,917
| 0.00169
|
import csv
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from urllib2 import URLError
from googlemaps import GoogleMaps, GoogleMapsError
from locations.models import Location
from locations.exceptions import LocationEncodingError
class CsvParseError(csv.Error):
pass
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
"""From Python csv documentation, used to read non-ASCII data"""
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield unicode(line.decode('cp1252')).encode('utf8')
def get_data_list(csv_reader, has_header=False):
"""
Reads from the csv reader object and creates a list of dictionaries with
all fo the values, separating csv parsing from all other data management.
"""
locations_list = []
# TODO: re-implement this, perhaps after fixing problem of newlines
# inside strings... the problems look like this (next two rows):
# "My string starts
# ", 5, 3
##row_start = 1 if csv.Sniffer().has_header(sample) else 0
# This had to be commented out because the csv reader object is not
# subscriptable!
##for (counter, row) in enumerate(csv_reader[row_start:]):
for (counter, row) in enumerate(csv_reader):
# Postal code is optional
try:
postal_code = row[4].strip()
except IndexError:
postal_code = ''
try:
locations_list.append({
'name': row[0].strip(),
'address': row[1].strip(),
'city': row[2].strip(),
'state': row[3].strip(),
'postal_code': postal_code,
})
except IndexError:
raise CsvParseError("Missing a column in row %s" % (counter + row))
except Exception:
raise CsvParseError(
"%s exception in row %s, %s" % (Exception, (counter + row), row))
return locations_list
def locations_from_csv(csv_file, category, has_header=False,
duplicates_field=None):
"""
Something something
"""
sample = csv_file.read(1024)
dialect = csv.Sniffer().sniff(sample)
csv_file.seek(0)
csv_reader = unicode_csv_reader(csv_file, dialect)
messages = {
'errors': True,
'warnings': [],
'created': [],
'skipped': [],
'created_count': 0,
'skipped_count': 0,
'upload_count': 0,
}
try:
location_list = get_data_list(csv_reader)
except CsvParseError, e:
messages['warnings'].append(e)
return messages
counter_query = Location.objects.aggregate(Max('upload_count')).get('upload_count__max', 0)
upload_counter = 0 if counter_query is None else counter_query + 1
for location_row in location_list:
try:
location, created = Location.objects.get_or_create(
original_name=location_row['name'], defaults={
'name': " ".join([word[0].upper() + word[1:].lower() for word in location_row['name'].split()]),
'street_address': " ".join([word[0].upper() + word[1:].lower() for word in location_row['address'].split()]),
'city': " ".join([word[0].upper() + word[1:].lower() for word in location_row['city'].split()]),
'state': location_row['state'].upper(),
'postal_code': location_row['postal_code'],
'upload_count': upload_counter,
})
except Location.MultipleObjectsReturned:
messages['warnings'].append(
"%s is already duplicated in the database" % location_row['name'])
created = False
if created:
location.category.add(category)
location.save()
messages['created'].append(location_row['name'])
else:
# Duplicate, but enforce that it is now active
location.is_active = True
location.save()
messages['skipped'].append(location_row['name'])
messages['errors'] = False
messages['created_count'] = len(messages['created'])
messages['skipped_count'] = len(messages['skipped'])
messages['upload_count'] = upload_counter
return messages
def geopoint_average(points):
"""Takes a list of lat-lng tuples and returns an average"""
count = len(points)
if not count:
return None
lat = 0
lng = 0
|
for point in points:
lat += point[0]
lng += point[1]
return (lat/count, lng/count)
def get_address_latlng(location):
"""
Requests the latitude and longitude for the given location's address.
Uses the Google Maps API, but
|
could be extended to use a different API.
"""
address = u"%s, %s, %s %s" % (location.street_address, location.city,
location.state, location.postal_code)
try:
return GoogleMaps().address_to_latlng(address)
except GoogleMapsError:
raise LocationEncodingError(_("Google reported an error!"))
except URLError:
raise LocationEncodingError(_("Hmm, network error. Please try again."))
except Exception, e:
raise LocationEncodingError(_("Unknown error: %s, %s" % (
Exception, e)))
def geocode_location(location):
"""
Basically the same as geocode_address but acts on the location.
"""
location.point = get_address_latlng(location)
location.save()
return location
|
fraunhoferfokus/fixmycity
|
dummy/templatetags/value_from_settings.py
|
Python
|
lgpl-3.0
| 2,196
| 0.013206
|
'''
/*******************************************************************************
*
* Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* AUTHORS: Louay Bassbouss (louay.bassbouss@fokus.fraunhofer.de)
*
******************************************************************************/
'''
from django.template import TemplateSyntaxError, Node, Variable, Library
from django.conf import settings
register = Library()
# I found some tricks in URLNode and url from defaulttags.py:
# https://code.djangoproject.com/browser/django/trunk/django/template/defaulttags.py
@register.tag
def value_from_settings(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one " \
"argument (settings constant to retrieve)" % bits[0])
settingsvar = bits[1]
settingsvar
|
= settingsvar[1:-1] if settingsvar[0] == '"' else settingsvar
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
raise TemplateSyntaxError("'value_from_settings' didn't recognise " \
"the arguments '%s'" % ", ".join(bits))
return ValueFromSettings(settingsvar, asvar)
class ValueFromSettings(Node):
|
def __init__(self, settingsvar, asvar):
self.arg = Variable(settingsvar)
self.asvar = asvar
def render(self, context):
ret_val = getattr(settings,str(self.arg))
if self.asvar:
context[self.asvar] = ret_val
return ''
else:
return ret_val
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/official/retinanet/retinanet_architecture.py
|
Python
|
apache-2.0
| 25,570
| 0.004263
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www
|
.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribute
|
d under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RetinaNet (via ResNet) model definition.
Defines the RetinaNet model and loss functions from this paper:
https://arxiv.org/pdf/1708.02002
Uses the ResNet model as a basis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
_WEIGHT_DECAY = 1e-4
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-4
_RESNET_MAX_LEVEL = 5
def batch_norm_relu(inputs,
is_training_bn,
relu=True,
init_zero=False,
data_format='channels_last',
name=None):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training_bn,
fused=True,
gamma_initializer=gamma_initializer,
name=name)
if relu:
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format='channels_last'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format='channels_last'):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
return tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
def residual_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(
shortcut, is_training_bn, relu=False, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(
inputs,
is_training_bn,
relu=False,
init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs,
filters,
is_training_bn,
strides,
use_projection=False,
data_format='channels_last'):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training_bn: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use
|
PlanTool/plantool
|
code/Uncertainty/T0/translator/generators/sortnum.py
|
Python
|
gpl-2.0
| 3,602
| 0.009162
|
#! /usr/bin/env python
# -*- coding: latin-1 -*-
# Copyright (C) 2006 Universitat Pompeu Fabra
#
# Permission is hereby granted to distribute this software for
# non-commercial research purposes, provided that this copyright
# notice is included with any such distribution.
#
# THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
# SOFTWARE IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU
# ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
#
# Elaborated by Hector Palacios, hlp@ldc.usb.ve, hectorpal@gmail.com
import sys
import os
name = "s"
if len(sys.argv) < 2 or len(sys.argv) > 3:
print """
usage: %s <n> {cff}
generates files d.pddl and p.pddl at directory %s-<n>
for the problem of sorting <n> numbers
if {cff} is passwd, then PDDL compatible with ConformantFF is generated.
""" % (sys.argv[0], name)
sys.exit(1)
n = int(sys.argv[1])
cff=False
if len(sys.argv) == 3:
if sys.argv[2] == 'cff':
cff = True
else:
print 'Param should be empty or cff:', sys.argv[2]
sys.exit(1)
path=name+"-"+str(n)
name_p="%s-%d" % (name, n)
if cff:
path += "-cff"
name_p += "-cff"
os.system("/bin/rm -rf ./"+path)
os.makedirs(path)
domain = file(path + "/d.pddl", "w")
cs = ''
for i in range(1,n+1):
cs += ' n%d' % i
if cff:
t = ''
else:
t = ' - num'
print >> domain, """
(define (domain %s) """ % name + """
(:requirements :typing :equality)
(:types num)
(:constants %s%s)
(:predicates (foo) (less ?n1 ?n2%s))
""" % (cs,t,t)
def print_act(i,j):
print >> domain, """
(:action cmpswap-%d-%d
:effect (and (less n%d n%d) (not (less n%d n%d))""" % (i, j, i, j, j, i),
for k in range(1,n+1):
if k != i and k != j:
print >> domain, """
(when (less n%d n%d)
(and (less n%d n%d) (not (less n%d n%d))))""" % (
k, i, k, j, j, k),
print >> domain, """
(when (and (less n%d n%d) (not (less n%d n%d)))
(not (less n%d n%d)))""" % (
k, i, k, j, k, i),
print >> domain, """
(when (less n%d n%d)
(and (less n%d n%d) (not (less n%d n%d))))""" % (
j, k, i, k, k, i),
print >> domain, """
(when (and (less n%d n%d) (not (less n%d n%d)))
(not (less n%d n%d)))""" % (
j, k, i, k, j, k),
print
|
>> domain, """
))"""
for i in range(1,n+1):
for j in range(i+1,n+1):
if i != j:
print_act(i
|
,j)
print >> domain, """
)
"""
problem = file(path + "/p.pddl", "w")
if cff:
t = ''
else:
t = '(and '
print >> problem, """
(define (problem s%d)
(:domain %s)
(:init %s""" % (n,name,t),
for i in range(1,n+1):
for j in range(1,n+1):
if i != j:
print >> problem, """
(or (less n%d n%d) (not (less n%d n%d)))""" % (i,j,i,j),
if cff:
print >> problem, """
(unknown (less n%d n%d))""" % (i,j),
if cff:
t = ''
else:
t = ')'
print >> problem, """
%s)
(:goal (and""" % t,
for i in range(1,n):
print >> problem, """
(less n%d n%d)""" % (i,i+1),
print >> problem, """
))
)
"""
|
mjasher/gac
|
original_libraries/flopy-master/flopy/modflow/mfupw.py
|
Python
|
gpl-2.0
| 13,021
| 0.010598
|
import sys
import numpy as np
from flopy.mbase import Package
from flopy.utils import util_2d,util_3d
from flopy.modflow.mfpar import ModflowPar as mfpar
class ModflowUpw(Package):
'Upstream weighting package class\n'
def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0, laywet=0, iupwcb = 53, hdry=-1E+30, iphdry = 0,\
hk=1.0, hani=1.0, vka=1.0, ss=1e-5, sy=0.15, vkcb=0.0, noparcheck=False, \
extension='upw', unitnumber = 31):
Package.__init__(self, model, extension, 'UPW', unitnumber) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# UPW for MODFLOW-NWT, generated by Flopy.'
self.url = 'upw_upstream_weighting_package.htm'
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
self.iupwcb = iupwcb # Unit number for file with cell-by-cell flow terms
self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.npupw = 0 # number of UPW parameters
self.iphdry = iphdry
self.laytyp = util_2d(model,(nlay,),np.int,laytyp,name='laytyp')
self.layavg = util_2d(model,(nlay,),np.int,layavg,name='layavg')
self.chani = util_2d(model,(nlay,),np.int,chani,name='chani')
self.layvka = util_2d(model,(nlay,),np.int,layvka,name='vka')
self.laywet = util_2d(model,(nlay,),np.int,laywet,name='laywet')
self.options = ' '
i
|
f noparcheck: self.options = self.options + 'NOPARCHECK '
self.hk = util_3d(model,(nlay,nrow,ncol),np.float32,hk,name='hk',locat=self.unit_number[0])
self.hani = util
|
_3d(model,(nlay,nrow,ncol),np.float32,hani,name='hani',locat=self.unit_number[0])
self.vka = util_3d(model,(nlay,nrow,ncol),np.float32,vka,name='vka',locat=self.unit_number[0])
self.ss = util_3d(model,(nlay,nrow,ncol),np.float32,ss,name='ss',locat=self.unit_number[0])
self.sy = util_3d(model,(nlay,nrow,ncol),np.float32,sy,name='sy',locat=self.unit_number[0])
self.vkcb = util_3d(model,(nlay,nrow,ncol),np.float32,vkcb,name='vkcb',locat=self.unit_number[0])
self.parent.add_package(self)
def write_file(self):
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# Open file for writing
f_upw = open(self.fn_path, 'w')
# Item 0: text
f_upw.write('%s\n' % self.heading)
# Item 1: IBCFCB, HDRY, NPLPF
f_upw.write('{0:10d}{1:10.3G}{2:10d}{3:10d}{4:s}\n'.format(self.iupwcb,self.hdry,self.npupw,self.iphdry,self.options))
# LAYTYP array
f_upw.write(self.laytyp.string);
# LAYAVG array
f_upw.write(self.layavg.string);
# CHANI array
f_upw.write(self.chani.string);
# LAYVKA array
f_upw.write(self.layvka.string)
# LAYWET array
f_upw.write(self.laywet.string);
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
raise Exception, 'LAYWET should be 0 for UPW'
transient = not self.parent.get_package('DIS').steady.all()
for k in range(nlay):
f_upw.write(self.hk[k].get_file_entry())
if self.chani[k] < 1:
f_upw.write(self.hani[k].get_file_entry())
f_upw.write(self.vka[k].get_file_entry())
if transient == True:
f_upw.write(self.ss[k].get_file_entry())
if self.laytyp[k] !=0:
f_upw.write(self.sy[k].get_file_entry())
if self.parent.get_package('DIS').laycbd[k] > 0:
f_upw.write(self.vkcb[k].get_file_entry())
if (self.laywet[k] != 0 and self.laytyp[k] != 0):
f_upw.write(self.laywet[k].get_file_entry())
f_upw.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
dis : ModflowUPW object
ModflowLpf object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowUpw.load('test.upw', m)
"""
if model.verbose:
sys.stdout.write('loading upw package file...\n')
if type(f) is not file:
filename = f
f = open(filename, 'r')
#dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# Item 1: IBCFCB, HDRY, NPLPF - line already read above
if model.verbose:
print ' loading IUPWCB, HDRY, NPUPW, IPHDRY...'
t = line.strip().split()
iupwcb, hdry, npupw, iphdry = int(t[0]), float(t[1]), int(t[2]), int(t[3])
if iupwcb != 0:
model.add_pop_key_list(iupwcb)
iupwcb = 53
# options
noparcheck = False
if len(t) > 3:
for k in xrange(3,len(t)):
if 'NOPARCHECK' in t[k].upper():
noparcheck = True
# LAYTYP array
if model.verbose:
print ' loading LAYTYP...'
line = f.readline()
t = line.strip().split()
laytyp = np.array((t[0:nlay]),dtype=np.int)
# LAYAVG array
if model.verbose:
print ' loading LAYAVG...'
line = f.readline()
t = line.strip().split()
layavg = np.array((t[0:nlay]),dtype=np.int)
# CHANI array
if model.verbose:
print ' loading CHANI...'
line = f.readline()
t = line.strip().split()
chani = np.array((t[0:nlay]),dtype=np.float32)
# LAYVKA array
if model.verbose:
print ' loading LAYVKA...'
line = f.readline()
t = line.strip().split()
layvka = np.array((t[0:nlay]),dtype=np.int)
# LAYWET array
if model.verbose:
print ' loading LAYWET...'
line = f.readline()
t = line.strip().split()
laywet = np.array((t[0:nlay]),dtype=np.int)
# Item 7: WETFCT, IWETIT, IHDWET
wetfct,iwetit,ihdwet = None,None,None
iwetdry = laywet.sum()
if iwetdry > 0:
raise Exception, 'LAYWET should be 0 for UPW'
#--get parameters
par_types = []
if npupw > 0:
par_types, parm_dict = mfpar.load(f, nplpf, model.verbose)
#--get arrays
transient = not model.get_package('DIS').steady.all()
hk = [0] * nlay
hani = [0] * nlay
vka = [0] * nlay
ss = [0] * nlay
sy = [0] * nlay
vkcb = [0] * nlay
for k in range(nlay):
if model.verbose:
print ' loading hk layer {0:3d}...'.format(k+1)
if 'hk' not in par_types:
t = util_2d.load(f, model, (nrow,ncol), np.float32, 'hk',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict, findlayer=k)
hk[k] = t
|
DonYum/LogAna
|
app/log_analyzer/__init__.py
|
Python
|
mit
| 101
| 0.009901
|
fr
|
om flask import Blueprint
log_analyzer = Blueprint('log_analyzer', __name__)
from . import views
| |
curoverse/libcloud
|
libcloud/test/compute/test_gce.py
|
Python
|
apache-2.0
| 123,700
| 0.001156
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Compute Engine Driver
"""
import sys
import unittest
import datetime
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.gce import (GCENodeDriver, API_VERSION,
timestamp_to_datetime,
GCEAddress, GCEBackendService,
GCEFirewall, GCEForwardingRule,
GCEHealthCheck, GCENetwork,
GCENodeImage, GCERoute,
GCETargetHttpProxy, GCEUrlMap,
GCEZone)
from libcloud.common.google import (GoogleBaseAuthConnection,
ResourceNotFoundError, ResourceExistsError,
InvalidRequestError, GoogleBaseError)
from libcloud.test.common.test_google import GoogleAuthMockHttp, GoogleTestCase
from libcloud.compute.base import Node, StorageVolume
from libcloud.test import MockHttpTestCase
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS
class GCENodeDriverTest(GoogleTestCase, TestCaseMixin):
"""
Google Compute Engine Test Class.
"""
# Mock out a few specific calls that interact with the user, system or
# environment.
GCEZone._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0)
datacenter = 'us-central1-a'
def setUp(self):
GCEMockHttp.test = self
GCENodeDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp)
GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp,
GoogleAuthMockHttp)
GCEMockHttp.type = None
kwargs = GCE_KEYWORD_PARAMS.copy()
kwargs['auth_type'] = 'IA'
kwargs['datacenter'] = self.datacenter
self.driver = GCENodeDriver(*GCE_PARAMS, **kwa
|
rgs)
def test_default_scopes(self):
self.assertEqual(self.driver.scopes, None)
def test_timestamp_to_datetime(self):
timestamp1 = '2013-06-26T10:05:19.340-07:00'
datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19)
self.assertEqual(timestamp_to_datetime(timestamp1), datetime1)
timestamp2 = '2013-06-26T17:
|
43:15.000-00:00'
datetime2 = datetime.datetime(2013, 6, 26, 17, 43, 15)
self.assertEqual(timestamp_to_datetime(timestamp2), datetime2)
def test_get_object_by_kind(self):
obj = self.driver._get_object_by_kind(None)
self.assertIsNone(obj)
obj = self.driver._get_object_by_kind('')
self.assertIsNone(obj)
obj = self.driver._get_object_by_kind(
'https://www.googleapis.com/compute/v1/projects/project_name/'
'global/targetHttpProxies/web-proxy')
self.assertEqual(obj.name, 'web-proxy')
def test_get_region_from_zone(self):
zone1 = self.driver.ex_get_zone('us-central1-a')
expected_region1 = 'us-central1'
region1 = self.driver._get_region_from_zone(zone1)
self.assertEqual(region1.name, expected_region1)
zone2 = self.driver.ex_get_zone('europe-west1-b')
expected_region2 = 'europe-west1'
region2 = self.driver._get_region_from_zone(zone2)
self.assertEqual(region2.name, expected_region2)
def test_find_zone_or_region(self):
zone1 = self.driver._find_zone_or_region('libcloud-demo-np-node',
'instances')
self.assertEqual(zone1.name, 'us-central2-a')
zone2 = self.driver._find_zone_or_region(
'libcloud-demo-europe-np-node', 'instances')
self.assertEqual(zone2.name, 'europe-west1-a')
region = self.driver._find_zone_or_region('libcloud-demo-address',
'addresses', region=True)
self.assertEqual(region.name, 'us-central1')
def test_match_images(self):
project = 'debian-cloud'
image = self.driver._match_images(project, 'debian-7')
self.assertEqual(image.name, 'debian-7-wheezy-v20131120')
image = self.driver._match_images(project, 'backports')
self.assertEqual(image.name, 'backports-debian-7-wheezy-v20131127')
def test_ex_get_serial_output(self):
self.assertRaises(ValueError, self.driver.ex_get_serial_output, 'foo')
node = self.driver.ex_get_node('node-name', 'us-central1-a')
self.assertTrue(self.driver.ex_get_serial_output(node),
'This is some serial\r\noutput for you.')
def test_ex_list(self):
d = self.driver
# Test the default case for all list methods
# (except list_volume_snapshots, which requires an arg)
for list_fn in (d.ex_list_addresses,
d.ex_list_backendservices,
d.ex_list_disktypes,
d.ex_list_firewalls,
d.ex_list_forwarding_rules,
d.ex_list_healthchecks,
d.ex_list_networks,
d.ex_list_project_images,
d.ex_list_regions,
d.ex_list_routes,
d.ex_list_snapshots,
d.ex_list_targethttpproxies,
d.ex_list_targetinstances,
d.ex_list_targetpools,
d.ex_list_urlmaps,
d.ex_list_zones,
d.list_images,
d.list_locations,
d.list_nodes,
d.list_sizes,
d.list_volumes):
full_list = [item.name for item in list_fn()]
li = d.ex_list(list_fn)
iter_list = [item.name for sublist in li for item in sublist]
self.assertEqual(full_list, iter_list)
# Test paging & filtering with a single list function as they require
# additional test fixtures
list_fn = d.ex_list_regions
for count, sublist in zip((2, 1), d.ex_list(list_fn).page(2)):
self.assertTrue(len(sublist) == count)
for sublist in d.ex_list(list_fn).filter('name eq us-central1'):
self.assertTrue(len(sublist) == 1)
self.assertEqual(sublist[0].name, 'us-central1')
def test_ex_list_addresses(self):
address_list = self.driver.ex_list_addresses()
address_list_all = self.driver.ex_list_addresses('all')
address_list_uc1 = self.driver.ex_list_addresses('us-central1')
address_list_global = self.driver.ex_list_addresses('global')
self.assertEqual(len(address_list), 2)
self.assertEqual(len(address_list_all), 5)
self.assertEqual(len(address_list_global), 1)
self.assertEqual(address_list[0].name, 'libcloud-demo-address')
self.assertEqual(address_list_uc1[0].name, 'libcloud-demo-address')
self.assertEqual(address_list_global[0].name, 'lcaddressglobal')
names = [a.name for a in address_list_all]
self.assertTrue('libcloud-demo-address' in names)
def test_ex_list_backendservices(self):
self.backendservices_mock = 'empty'
backendservices_list = self.driver.ex_list_back
|
ancho85/pylint-playero-plugin
|
tests/fulltest.py
|
Python
|
gpl-2.0
| 2,139
| 0.004675
|
import os
import sys
import unittest
from logilab.common import testlib
from pylint.testutils import make_tests, LintTestUsingFile, cb_test_gen, linter
import ConfigParser
HERE = os.path.dirname(os.path.abspath(__file__))
PLUGINPATH = os.path.join(HERE, "..")
linter.prepare_import_path(PLUGINPATH)
linter.load_plugin_modules(['Playero'])
linter.global_set_option('required-attributes', ()) # remove required __revision__
linter.load_file_configuration(os.path.join(HERE, "..",
|
"config", ".pylintrc"))
convs = ['C0111', 'C0103', 'C0301', 'C0303', 'C0304', 'C0321']
warns = ['W0141', 'W0142', 'W0212', 'W0312', 'W0401', 'W0403', 'W0511', 'W0512', 'W0614', 'W0622']
refac = ['R0903', 'R0904', 'R0913']
for disabled in convs + warns + refac:
linter.disable(disabled
|
)
config = ConfigParser.SafeConfigParser()
config.read(os.path.join(HERE, "..", "config", "playero.cfg"))
PLAYEROPATH = config.get('paths', os.name)
sys.path.append(os.path.join(PLAYEROPATH, "core"))
for scriptdir in ["base", "standard", "extra/StdPy"]:
for pydir in ['records', 'windows', 'reports', 'routines', 'documents','tools']:
sys.path.append(os.path.join(PLAYEROPATH, scriptdir, pydir))
sys.path.append(os.path.join(PLUGINPATH, "corepy", "embedded"))
def tests():
callbacks = [cb_test_gen(LintTestUsingFile)]
input_dir = os.path.join(HERE, 'input')
messages_dir = os.path.join(HERE, 'messages')
return make_tests(input_dir, messages_dir, None, callbacks)
def additional_tests():
suites = unittest.TestSuite()
for fn in os.listdir(os.path.dirname(__file__)):
if fn.endswith('.py') and fn not in ('__init__.py', 'fulltest.py'):
name = os.path.splitext(fn)[0]
module = __import__(name, globals(), locals(), [name])
if hasattr(module, 'test_suite'):
suites.addTests(module.test_suite())
return suites
def suite():
default = [unittest.makeSuite(test, suiteClass=testlib.TestSuite) for test in tests()]
default.append(additional_tests())
return testlib.TestSuite(default)
if __name__ == '__main__':
testlib.unittest_main(defaultTest='suite')
|
EddyCodeIt/SPA_Project_2016_Data_Rep-Quering
|
db_downgrade.py
|
Python
|
apache-2.0
| 600
| 0.006667
|
# code source: https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-iv-database
from migr
|
ate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
print 'Current database version: ' + str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGR
|
ATE_REPO))
# This script downgrades database by 1 revision every time it runs.
# To downgrade in multiple revisions, run script as many as needed.
|
zaneb/heat-convergence-prototype
|
scenarios/basic_create_rollback.py
|
Python
|
apache-2.0
| 358
| 0
|
e
|
xample_template = Template({
'A': RsrcDef({}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'a': '4alpha'}, ['A', 'B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', 'a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(3)
engine.rollback_stack('foo')
engine.
|
noop(6)
engine.call(verify, Template())
|
SlateScience/MozillaJS
|
js/src/python/mozboot/mozboot/openbsd.py
|
Python
|
mpl-2.0
| 954
| 0.013627
|
# This Source Code For
|
m is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from mozboot.base import BaseBootstrapper
class OpenBSDBootstrapper(BaseBootstrapper):
def __init__(self, version):
BaseBootstrapper.__init__(self)
def install_system_packages(self):
# we use -
|
z because there's no other way to say "any autoconf-2.13"
self.run_as_root(['pkg_add', '-z',
'mercurial',
'llvm',
'autoconf-2.13',
'yasm',
'gtk+2',
'libIDL',
'gmake',
'gtar',
'wget',
'unzip',
'zip'])
def _update_package_manager(self):
self.run_as_root(['port', 'sync'])
def upgrade_mercurial(self, current):
self.run_as_root(['pkg_add', '-u', 'mercurial'])
|
Rosebotics/cwc-projects
|
lego-ev3/examples/analog_sensors/ir_sensor/print_beacon_seeking.py
|
Python
|
gpl-3.0
| 1,582
| 0.003793
|
#!/usr/bin/env python3
"""
The goal of this example is to show you the syntax for IR seeking readings. When using
IR-SEEK with a remote control you get both heading and distance data. The code below
shows the syntax for beacon seeking. Additionally it's good to play with a demo so that
you can see how well or not well a sensor behaves.
To test this module, put the IR Remote into beacon mode by pressing the button at the top
of the remote and making sure the green LED is on. Use channel 1 for this module. Move
the beacon around and watch the values that are printed.
Authors: David Fisher and PUT_YOUR_NAME_HERE. February 2017.
"""
import ev3dev.ev3 as ev3
import time
def main():
print("--------------------------------------------")
print(" Printing beacon seeking data")
print(" Press the
|
touch sensor to exit")
print("--------------------------------------------")
ev3.Sound.speak("Printing beacon seeki
|
ng").wait()
touch_sensor = ev3.TouchSensor()
ir_sensor = ev3.InfraredSensor()
assert touch_sensor
assert ir_sensor
ir_sensor.mode = "IR-SEEK"
while not touch_sensor.is_pressed:
current_heading = ir_sensor.value(0)
current_distance = ir_sensor.value(1)
print("IR Heading = {} Distance = {}".format(current_heading, current_distance))
time.sleep(0.5)
ev3.Sound.speak("Goodbye")
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
artur-shaik/qutebrowser
|
tests/unit/misc/test_split.py
|
Python
|
gpl-3.0
| 6,878
| 0.000583
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.split."""
import collections
import pytest
from qutebrowser.misc import split
# Most tests copied from Python's shlex.
# The original test data set was from shellwords, by Hartmut Goebel.
# Format: input/split|output|without|keep/split|output|with|keep/
test_data_str = r"""
one two/one|two/one| two/
one "two three" four/one|two three|four/one| "two three"| four/
one 'two three' four/one|two three|four/one| 'two three'| four/
one "two\" three" four/one|two" three|four/one| "two\" three"| four/
one 'two'\'' three' four/one|two' three|four/one| 'two'\'' three'| four/
one "two three/one|two three/one| "two three/
one 'two three/one|two three/one| 'two three/
one\/one\/one\/
one "two\/one|two\/one| "two\/
one /one/one| /
open -t i/open|-t|i/open| -t| i/
foo bar/foo|bar/foo| bar/
foo bar/foo|bar/ foo| bar/
foo bar /foo|bar/ foo| bar| /
foo bar bla fasel/foo|bar|bla|fasel/foo| bar| bla| fasel/
x y z xxxx/x|y|z|xxxx/x| y| z| xxxx/
\x bar/x|bar/\x| bar/
\ x bar/ x|bar/\ x| bar/
\ bar/ bar/\ bar/
foo \x bar/foo|x|bar/foo| \x| bar/
foo \ x bar/foo| x|bar/foo| \ x| bar/
foo \ bar/foo| bar/foo| \ bar/
foo "bar" bla/foo|bar|bla/foo| "bar"| bla/
"foo" "bar" "bla"/foo|bar|bla/"foo"| "bar"| "bla"/
"foo" bar "bla"/foo|bar|bla/"foo"| bar| "bla"/
"foo" bar bla/foo|bar|bla/"foo"| bar| bla/
foo 'bar' bla/foo|bar|bla/foo| 'bar'| bla/
'foo' 'bar' 'bla'/foo|bar|bla/'foo'| 'bar'| 'bla'/
'foo' bar 'bla'/foo|bar|bla/'foo'| bar| 'bla'/
'foo' bar bla/foo|bar|bla/'foo'| bar| bla/
blurb foo"bar"bar"fasel" baz/blurb|foobarbarfasel|baz/blurb| foo"bar"bar"fasel"| baz/
blurb foo'bar'bar'fasel' baz/blurb|foobarbarfasel|baz/blurb| foo'bar'bar'fasel'| baz/
""//""/
''//''/
foo "" bar/foo||bar/foo| ""| bar/
foo '' bar/foo||bar/foo| ''| bar/
foo "" "" "" bar/foo||||bar/foo| ""| ""| ""| bar/
foo '' '' '' bar/foo||||bar/foo| ''| ''| ''| bar/
\"/"/\"/
"\""/"/"\""/
"foo\ bar"/foo\ bar/"foo\ bar"/
"foo\\ bar"/foo\ bar/"foo\\ bar"/
"foo\\ bar\""/foo\ bar"/"foo\\ bar\""/
"foo\\" bar\"/foo\|bar"/"foo\\"| bar\"/
"foo\\ bar\" dfadf"/foo\ bar" dfadf/"foo\\ bar\" dfadf"/
"foo\\\ bar\" dfadf"/foo\\ bar" dfadf/"foo\\\ bar\" dfadf"/
"foo\\\x bar\" dfadf"/foo\\x bar" dfadf/"foo\\\x bar\" dfadf"/
"foo\x bar\" dfadf"/foo\x bar" dfadf/"foo\x bar\" dfadf"/
\'/'/\'/
'foo\ bar'/foo\ bar/'foo\ bar'/
'foo\\ bar'/foo\\ bar/'foo\\ bar'/
"foo\\\x bar\" df'a\ 'df"/foo\\x bar" df'a\ 'df/"foo\\\x bar\" df'a\ 'df"/
\"foo/"foo/\"foo/
\"foo\x/"foox/\"foo\x/
"foo\x"/foo\x/"foo\x"/
"foo\ "/foo\ /"foo\ "/
foo\ xx/foo xx/foo\ xx/
foo\ x\x/foo xx/foo\ x\x/
foo\ x\x\"/foo xx"/foo\ x\x\"/
"foo\ x\x"/foo\ x\x/"foo\ x\x"/
"foo\ x\x\\"/foo\ x\x\/"foo\ x\x\\"/
"foo\ x\x\\""foobar"/foo\ x\x\foobar/"foo\ x\x\\""foobar"/
"foo\ x\x\\"\'"foobar"/foo\ x\x\'foobar/"foo\ x\x\\"\'"foobar"/
"foo\ x\x\\"\'"fo'obar"/foo\ x\x\'fo'obar/"foo\ x\x\\"\'"fo'obar"/
"foo\ x\x\\"\'"fo'obar" 'don'\''t'/foo\ x\x\'fo'obar|don't/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'/
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\/foo\ x\x\'fo'obar|don't|\/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'| \\/
foo\ bar/foo bar/foo\ bar/
:-) ;-)/:-)|;-)/:-)| ;-)/
áéíóú/áéíóú/áéíóú/
"""
def _parse_split_test_data_str():
"""
Parse the test data set into a namedtuple to use in tests.
Returns:
A list of namedtuples with str attributes: input, keep, no_keep
"""
tuple_class = collections.namedtuple('TestCase', 'input, keep, no_keep')
for line in test_data_str.splitlines():
if not line:
continue
data = line.split('/')
item = tuple_class(input=data[0], keep=data[1].split('|'),
no_keep=data[2].split('|'))
yield item
yield tuple_class(input='', keep=[], no_keep=[])
class TestSplit:
"""Test split."""
@pytest.fixture(params=_parse_split_test_data_str(), ids=lambda e: e.input)
def split_test_case(self, request):
"""Fixture to automatically parametrize all depending tests.
It will use the test data from test_data_str, parsed using
_parse_split_test_data_str().
"""
return request.param
def test_split(self, split_test_case):
"""Test splitting."""
items = split.split(split_test_case.input)
assert items == split_test_case.keep
def test_split_keep_original(self, split_test_case):
"""Test if splitting with keep=True yields the original string."""
items = split.split(split_test_case.in
|
put, keep=True)
assert ''.join(items) == split_test_case.input
def test_split_keep(self, split_test_case):
"""Test splitting with keep=True."""
items = split.split(split_test_case.input, keep=True)
assert items == split_test_case.no_keep
class TestSimpleSplit:
"""Test simple_split."""
TESTS = {
' foo bar': [' foo', ' bar'],
'foobar': ['foobar'],
' foo bar baz ': [' foo', ' bar', ' baz', ' '],
'f\ti\ts\th':
|
['f', '\ti', '\ts', '\th'],
'foo\nbar': ['foo', '\nbar'],
}
@pytest.mark.parametrize('test', TESTS)
def test_str_split(self, test):
"""Test if the behavior matches str.split."""
assert split.simple_split(test) == test.rstrip().split()
@pytest.mark.parametrize('s, maxsplit',
[("foo bar baz", 1), (" foo bar baz ", 0)])
def test_str_split_maxsplit(self, s, maxsplit):
"""Test if the behavior matches str.split with given maxsplit."""
actual = split.simple_split(s, maxsplit=maxsplit)
expected = s.rstrip().split(maxsplit=maxsplit)
assert actual == expected
@pytest.mark.parametrize('test, expected', TESTS.items())
def test_split_keep(self, test, expected):
"""Test splitting with keep=True."""
assert split.simple_split(test, keep=True) == expected
def test_maxsplit_0_keep(self):
"""Test special case with maxsplit=0 and keep=True."""
s = "foo bar"
assert split.simple_split(s, keep=True, maxsplit=0) == [s]
|
cinp/python
|
cinp/common.py
|
Python
|
apache-2.0
| 4,279
| 0.038093
|
import re
import sys
class URI():
def __init__( self, root_p
|
ath ):
super().__init__()
if root_path[-1]
|
!= '/' or root_path[0] != '/':
raise ValueError( 'root_path must start and end with "/"' )
self.root_path = root_path
self.uri_regex = re.compile( r'^({0}|/)(([a-zA-Z0-9\-_.!~*<>]+/)*)([a-zA-Z0-9\-_.!~*<>]+)?(:([a-zA-Z0-9\-_.!~*\'<>]*:)*)?(\([a-zA-Z0-9\-_.!~*<>]+\))?$'.format( self.root_path ) )
def split( self, uri, root_optional=False ):
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( root, namespace, _, model, rec_id, _, action ) = uri_match.groups()
if root != self.root_path and not root_optional:
raise ValueError( 'URI does not start in the root_path' )
if namespace != '':
namespace_list = namespace.rstrip( '/' ).split( '/' )
else:
namespace_list = []
if rec_id is not None:
id_list = rec_id.strip( ':' ).split( ':' )
multi = len( id_list ) > 1
else:
id_list = None # id_list = [] is an empty list of ids, where None means the list is not even present
multi = False
if action is not None:
action = action[ 1:-1 ]
return ( namespace_list, model, action, id_list, multi )
def build( self, namespace=None, model=None, action=None, id_list=None, in_root=True ):
"""
build a uri, NOTE: if model is None, id_list and action are skiped
"""
if in_root:
result = self.root_path
else:
result = '/'
if namespace is not None:
if not isinstance( namespace, list ):
namespace = [ namespace ]
if len( namespace ) > 0:
result = '{0}{1}/'.format( result, '/'.join( namespace ) )
if model is None:
return result
result = '{0}{1}'.format( result, model )
if id_list is not None and id_list != []:
if not isinstance( id_list, list ):
id_list = [ id_list ]
result = '{0}:{1}:'.format( result, ':'.join( id_list ) )
if action is not None:
result = '{0}({1})'.format( result, action )
return result
def extractIds( self, uri_list ): # TODO: should we make sure the namespace/model do not change in the list?
"""
extract the record IDs from the URI's in uri_list, can handle some/all/none
of the URIs having multiple IDs in them allready, does not force uniqunes
order should remain intact
"""
if isinstance( uri_list, str ):
uri_list = [ uri_list ]
if not isinstance( uri_list, list ):
raise ValueError( 'uri_list must be string or list of strings' )
result = []
for uri in uri_list:
uri_match = self.uri_regex.match( uri )
if not uri_match:
raise ValueError( 'Unable to parse URI "{0}"'.format( uri ) )
( _, _, _, _, rec_id, _, _ ) = uri_match.groups()
if rec_id is None:
continue
result += rec_id.strip( ':' ).split( ':' )
return result
def uriListToMultiURI( self, uri_list ):
"""
runs extract Ids on the list, then takes the first uri and applies all
the ids to it
"""
if not uri_list:
return []
id_list = self.extractIds( uri_list )
if not id_list:
return []
( namespace_list, model, action, _, _ ) = self.split( uri_list[0] )
return self.build( namespace_list, model, action, id_list, True )
# barrowed from https://www.python.org/dev/peps/pep-0257/
def doccstring_prep( docstring ):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[ 1: ]:
stripped = line.lstrip()
if stripped:
indent = min( indent, len( line ) - len( stripped ) )
# Remove indentation (first line is special):
trimmed = [ lines[0].strip() ]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append( line[ indent: ].rstrip() )
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop( 0 )
# Return a single string:
return '\n'.join( trimmed )
|
sgerhart/ansible
|
test/units/modules/network/cnos/cnos_module.py
|
Python
|
mit
| 3,502
| 0.000571
|
# Copyrig
|
ht (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free
|
Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import tempfile
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestCnosModule(unittest.TestCase):
def setUp(self):
super(TestCnosModule, self).setUp()
self.test_log = tempfile.mkstemp(prefix='ansible-test-cnos-module-', suffix='.log')[1]
def tearDown(self):
super(TestCnosModule, self).tearDown()
os.remove(self.test_log)
def execute_module(self, failed=False, changed=False, commands=None,
sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']),
result['commands'])
else:
self.assertEqual(commands, result['commands'],
result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
eunchong/build
|
masters/master.client.mojo/master_source_cfg.py
|
Python
|
bsd-3-clause
| 386
| 0.005181
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is govern
|
ed by a BSD-style license that can be
# found in the LICENSE file.
from master import gitiles_poller
def Update(config, active_master, c):
master_poller = gitiles_poller.GitilesPoller(
'https://chromium.googlesource.com/external/mojo
|
')
c['change_source'].append(master_poller)
|
bm2-lab/MLClass
|
cgh_deep_learning/mnist_mlp.py
|
Python
|
apache-2.0
| 1,723
| 0.001741
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data', one_hot=True)
mnist_train = mnist.train
mnist_val = mnist.validation
p = 28 * 28
n = 10
h1 = 300
func_act = tf.nn.sigmoid
x_pl = tf.placeholder(dtype=tf.float32, shape=[None, p])
y_pl = tf.placeholder(dtype=tf.float32, shape=[None, n])
w1 = tf.Variable(tf.truncated_normal(shape=[p, h1], stddev=0.1))
b1 = tf.Variable(tf.zeros(shape=[h1]))
w2 = tf.Variable(tf.truncated_normal(shape=[h1, n], stddev=0.1))
b2 = tf.Variable(tf.zeros(shape=[n]))
hidden1 = func_act(tf.matmul(x_pl, w1) + b1)
y_pre = tf.matmul(hidden1, w2) + b2
y_ = tf.nn.softmax(y_pre)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_pl, logits=y_pre))
correct_prediction = tf.equal(tf.argmax(y_pl, 1), tf.argmax(y_, 1))
a
|
ccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
eta = 0.3
train_op = tf.train.AdagradOptimizer(learning_rate=0.3).minimize(cross_entropy)
batch_size = 50
batch_per_epoch = mnist_train.num_examples // batch_size
epoch = 2
with tf.Session() as sess:
tf.global_variables_initializer().run()
x_val = mnist_val.images
y_val = mnist_val.labels
val_fd = {x_pl: x_val, y_pl: y_val}
for ep in range(epoch):
print(f'Epoch {ep+1}:')
|
for sp in range(batch_per_epoch):
xtr, ytr = mnist_train.next_batch(batch_size)
loss_value, _ = sess.run([cross_entropy, train_op], feed_dict={x_pl: xtr, y_pl: ytr})
if sp == 0 or (sp + 1) % 100 == 0:
print(f'Loss: {loss_value:.4f}')
acc = sess.run(accuracy, feed_dict=val_fd)
print(f'Validation Acc: {acc:.4f}')
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/carpet/baxis/_tickvals.py
|
Python
|
mit
| 407
| 0.002457
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="t
|
ickvals", parent_name="carpet.baxis", **kwargs):
super(TickvalsValidator, self).__init__(
|
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
StepicOrg/django-oauth-toolkit
|
oauth2_provider/admin.py
|
Python
|
bsd-2-clause
| 1,114
| 0.000898
|
from django.contrib import admin
from .models import Grant, AccessToken, RefreshToken, get_application_model
class ApplicationAdmin(admin.ModelAdmin):
list_display = ("name", "user", "client_type", "authorization_grant_ty
|
pe")
list_filter = ("client_type", "authorization_grant_type", "skip_authorization")
radio_fields = {
"client_type": admin.HORIZONTAL,
"authorization_grant_type": admin.VERTICAL,
}
raw_id_fields = ("user", )
class GrantAdmin(admin.ModelAdmin):
list_display = ("code", "application", "user", "expires")
raw_id_fields = ("user", )
class AccessTokenAdmin(admin.ModelAdmin):
list_disp
|
lay = ("token", "user", "application", "expires")
raw_id_fields = ("user", )
class RefreshTokenAdmin(admin.ModelAdmin):
list_display = ("token", "user", "application")
raw_id_fields = ("user", "access_token")
Application = get_application_model()
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Grant, GrantAdmin)
admin.site.register(AccessToken, AccessTokenAdmin)
admin.site.register(RefreshToken, RefreshTokenAdmin)
|
lmotta/Roam
|
tests/__init__.py
|
Python
|
gpl-2.0
| 234
| 0.008547
|
import os
import sys
# Add parent directory to path to make test aware of other modules
srcfolder = os.path.abspath(os.path.jo
|
in(os.path.dirname(__file__), '..', "src"))
if srcfolder not in sys.path:
sys.path.a
|
ppend(srcfolder)
|
gamechanger/kafka-python
|
test/service.py
|
Python
|
apache-2.0
| 3,648
| 0.003289
|
import logging
import os
import re
import select
import subprocess
import threading
import time
__all__ = [
'ExternalService',
'SpawnedService',
]
log = logging.getLogger(__name__)
class ExternalService(object):
def __init__(self, host, port):
log.info("Using already running service at %s:%d", host, port)
self.host = host
self.port = port
def open(self):
pass
def close(self):
pass
class SpawnedService(threading.Thread):
def __init__(self, args=None, env=None):
threading.Thread.__init__(self)
if args is None:
raise TypeError("args parameter is required")
self.args = args
self.env = env
self.captured_stdout = []
self.captured_stderr = []
self.should_die = threading.Event()
self.child = None
self.alive = False
def run(self):
self.run_with_handles()
def _spawn(self):
if self.alive: return
if self.child and self.child.poll() is None: return
self.child = subprocess.Popen(
self.args,
preexec_fn=os.setsid, # to avoid propagating signals
env=self.env,
bufsize=1,
stdout=subprocess.PIPE,
|
stderr=subprocess.PIPE)
self.alive = True
def _despawn(s
|
elf):
if self.child.poll() is None:
self.child.terminate()
self.alive = False
for _ in range(50):
if self.child.poll() is not None:
self.child = None
break
time.sleep(0.1)
else:
self.child.kill()
def run_with_handles(self):
self._spawn()
while True:
(rds, _, _) = select.select([self.child.stdout, self.child.stderr], [], [], 1)
if self.child.stdout in rds:
line = self.child.stdout.readline()
self.captured_stdout.append(line.decode('utf-8'))
if self.child.stderr in rds:
line = self.child.stderr.readline()
self.captured_stderr.append(line.decode('utf-8'))
if self.child.poll() is not None:
self.dump_logs()
self._spawn()
if self.should_die.is_set():
self._despawn()
break
def dump_logs(self):
log.critical('stderr')
for line in self.captured_stderr:
log.critical(line.rstrip())
log.critical('stdout')
for line in self.captured_stdout:
log.critical(line.rstrip())
def wait_for(self, pattern, timeout=30):
t1 = time.time()
while True:
t2 = time.time()
if t2 - t1 >= timeout:
try:
self.child.kill()
except:
log.exception("Received exception when killing child process")
self.dump_logs()
log.error("Waiting for %r timed out after %d seconds", pattern, timeout)
return False
if re.search(pattern, '\n'.join(self.captured_stdout), re.IGNORECASE) is not None:
log.info("Found pattern %r in %d seconds via stdout", pattern, (t2 - t1))
return True
if re.search(pattern, '\n'.join(self.captured_stderr), re.IGNORECASE) is not None:
log.info("Found pattern %r in %d seconds via stderr", pattern, (t2 - t1))
return True
time.sleep(0.1)
def start(self):
threading.Thread.start(self)
def stop(self):
self.should_die.set()
self.join()
|
yejia/order_system
|
service_order/urls.py
|
Python
|
mit
| 542
| 0.00369
|
from django.conf.urls import patterns, include, url
from service_order import views, data_views
urlpatterns = patterns('',
url(r'^$', views.index),
url(r'^order_state_machi
|
ne/$', views.order_state_machine),
url(r'^make_order/$', views.make_order),
url(r'^make_order2/$', views.make_order2),
url(r'^make_order3/$', views.make_order3),
url(r'^create_order/$', views.create_order),
url(r'^view_refund_sheet/$', views.view_refund_sheet),
url(r'^data/validate_code/', data_
|
views.validate_code),
)
|
PetePriority/home-assistant
|
tests/components/google/test_calendar.py
|
Python
|
apache-2.0
| 16,362
| 0
|
"""The tests for the google calendar component."""
# pylint: disable=protected-access
import logging
import unittest
from unittest.mock import patch, Mock
import pytest
import homeassistant.components.calendar as calendar_base
from homeassistant.components.google import calendar
import homeassistant.util.dt as dt_util
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from tests.common import get_test_home_assistant, MockDependency
TEST_PLATFORM = {calendar_base.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
class TestComponentsGoogleCalendar(unittest.TestCase):
"""Test the Google calendar."""
hass = None # HomeAssistant
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.http = Mock()
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone('America/Regina'))
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
dt_util.set_default_time_zone(dt_util.get_time_zone('UTC'))
self.hass.stop()
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_all_day_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() \
+ dt_util.dt.timedelta(days=7)
event = {
'summary': 'Test All Day Event',
'start': {
'date': week_from_today.isoformat()
},
'end': {
'date': (week_from_today + dt_util.dt.timedelta(days=1))
.isoformat()
},
'location': 'Test Cases',
'description': 'We\'re just testing that all day events get setup '
'correctly',
'kind': 'calendar#event',
'created': '2016-06-23T16:37:57.000Z',
'transparency': 'transparent',
'updated': '2016-06-24T01:57:21.045Z',
'reminders': {'useDefault': True},
'organizer': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
'self': True
},
'sequence': 0,
'creator': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
'self': True
},
'id': '_c8rinwq863h45qnucyoi43ny8',
'etag': '"2933466882090000"',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
'iCalUID': 'cydrevtfuybguinhomj@google.com',
'status': 'confirmed'
}
mock_next_event.return_value.event = ev
|
ent
device_name = 'Test All Day'
cal = calendar.GoogleCalendarEventDevice(self.hass, None,
'', {'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': True,
|
'offset_reached': False,
'start_time': '{} 00:00:00'.format(event['start']['date']),
'end_time': '{} 00:00:00'.format(event['end']['date']),
'location': event['location'],
'description': event['description'],
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_future_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() \
+ dt_util.dt.timedelta(minutes=30)
event = {
'start': {
'dateTime': one_hour_from_now.isoformat()
},
'end': {
'dateTime': (one_hour_from_now
+ dt_util.dt.timedelta(minutes=60))
.isoformat()
},
'summary': 'Test Event in 30 minutes',
'reminders': {'useDefault': True},
'id': 'aioehgni435lihje',
'status': 'confirmed',
'updated': '2016-11-05T15:52:07.329Z',
'organizer': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
'self': True,
},
'created': '2016-11-05T15:52:07.000Z',
'iCalUID': 'dsfohuygtfvgbhnuju@google.com',
'sequence': 0,
'creator': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
},
'etag': '"2956722254658000"',
'kind': 'calendar#event',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
}
mock_next_event.return_value.event = event
device_name = 'Test Future Event'
device_id = 'test_future_event'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_OFF
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': False,
'offset_reached': False,
'start_time': one_hour_from_now.strftime(DATE_STR_FORMAT),
'end_time':
(one_hour_from_now + dt_util.dt.timedelta(minutes=60))
.strftime(DATE_STR_FORMAT),
'location': '',
'description': '',
}
@patch('homeassistant.components.google.calendar.GoogleCalendarData')
def test_in_progress_event(self, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() \
- dt_util.dt.timedelta(minutes=30)
event = {
'start': {
'dateTime': middle_of_event.isoformat()
},
'end': {
'dateTime': (middle_of_event + dt_util.dt
.timedelta(minutes=60))
.isoformat()
},
'summary': 'Test Event in Progress',
'reminders': {'useDefault': True},
'id': 'aioehgni435lihje',
'status': 'confirmed',
'updated': '2016-11-05T15:52:07.329Z',
'organizer': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
'self': True,
},
'created': '2016-11-05T15:52:07.000Z',
'iCalUID': 'dsfohuygtfvgbhnuju@google.com',
'sequence': 0,
'creator': {
'email': 'uvrttabwegnui4gtia3vyqb@import.calendar.google.com',
'displayName': 'Organizer Name',
},
'etag': '"2956722254658000"',
'kind': 'calendar#event',
'htmlLink': 'https://www.google.com/calendar/event?eid=*******',
}
mock_next_event.return_value.event = event
device_name = 'Test Event in Progress'
device_id = 'test_event_in_progress'
cal = calendar.GoogleCalendarEventDevice(self.hass, None, device_id,
{'name': device_name})
assert cal.name == device_name
assert cal.state == STATE_ON
assert not cal.offset_reached()
assert cal.device_state_attributes == {
'message': event['summary'],
'all_day': False,
'offset_reached': False,
'start_time': middle_of_event.strftime(DATE_STR_FORMAT),
'end_time':
|
AdrianGaudebert/socorro
|
socorro/unittest/cron/jobs/base.py
|
Python
|
mpl-2.0
| 1,178
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from crontabber.tests import base
class IntegrationTestBase(base.IntegrationTestCaseBase):
@classmethod
def get_standard_config(cls):
"""this method overrides the crontabber version of the same name.
It is not used by Socorro clients directly, but the base crontabber
class uses this method during setup. By o
|
verriding the implementation
here, we get a default Socorro configuration file with many of the
standard Socorro defaults already in place: logging, executors, etc.
This allows the bootstraping of the integration tests to participate
fully with the environment variables, commandline arguments, and
configurations files that the Socorro installation/test system of
Makefiles and shell scrip
|
ts offers"""
config = get_config_manager_for_crontabber().get_config()
return config
|
uweschmitt/emzed_optimizations
|
tests/test_sample.py
|
Python
|
bsd-3-clause
| 77
| 0
|
rai
|
se Exception("tests wh
|
ere moved to emzed to avoid circular dependencies")
|
rbranson/craystack
|
upload.py
|
Python
|
bsd-3-clause
| 343
| 0.002915
|
import sys
from craystack import cf
if len(sys.argv) < 4:
print "Usage: %s <key> <subkey> <path>" % sys.argv[0]
sys.exit(2)
_,
|
key, subkey, filename = sys.argv
with open(filename) as f:
content = f.read()
cf.insert(key, {subkey: content})
print "Uploaded %s to %s/%s (%s bytes)" % (filename, key, subkey, len(con
|
tent))
|
liamfraser/lantex
|
lantex/types.py
|
Python
|
bsd-3-clause
| 19,172
| 0.002921
|
import re
import svgwrite
import math
class LantexBase(object):
def __init__(self):
self.identifier = None
self.description = None
self.properties = [ 'description' ]
def __repr__(self):
out = "{0} {1}:\n".format(type(self).__name__, self.identifier)
for p in self.properties:
val = getattr(self, p)
if val != None:
out += "{0}: {1}\n".format(p, val)
return out + "\n"
def valid_property(self, p):
return p in self.properties
class Drawable(object):
"""
Base class for Drawable things. For simplicity, we're going to use a
rectangle with ports and an identifier as a starting point for every
object.
"""
def __init__(self):
# Holds drawing data
self.drawing = {'margin' : 4,
'port_size': 10}
self.__drawinit__()
def __drawinit__(self):
"""
Adds drawing constants to the drawing dict
"""
raise NotImplementedError("Function hasn't been implemented")
def calc_size(self, env):
"""
Returns the width and height of the object that will be drawn. Used to
work out where to position it. Returns (width, height). If the child
hasn't implemented it we'll just use our base function
"""
return self.calc_size_base(env)
def calc_size_base(self, env):
"""
The width will either be ports_per_row * ports or the width of the
identifier, whichever is larger.
"""
m = self.drawing['margin']
# Identifier width
id_width = len(self.identifier) * env.font.width
id_width_m = m + id_width + m
ppr = None
if 'ports_per_row' in self.drawing:
ppr = self.drawing['ports_per_row']
else:
# Work out the number of ports that can fit on each row if we use
# the identifier width
ppr = math.floor(id_width / (m + self.drawing['port_size']))
self.drawing['ports_per_row'] = ppr
rows = math.ceil(len(self.ports) / ppr)
ports_width = (self.drawing['port_size'] + m) * len(self.ports)
ports_width -= m
port_w = m + ports_width + m
h = m + env.font.height + m + ((self.drawing['port_size'] + m) * rows)
if id_width_m >= port_w:
w = id_width_m
else:
w = port_w
self.drawing['rows'] = rows
self.drawing['w'] = w
self.drawing['h'] = h
self.drawing['ports_width'] = ports_width
return w, h
def draw(self, env):
"""
Env is an instance of DrawEnv. If the child class hasn't implemented the
draw function then we'll use use our base draw function
"""
self.draw_base(env)
def draw_base(self, env):
# Create a group for the object
g = env.dwg.add(env.dwg.g(id='{0}-{1}'.format(self.__class__.__name__,
self.identifier)))
env.dwg_g = g
# Draw the outside rectangle
x, y = env.x, env.y
bgcol = env.colors['bg']['base2'].rgb
stcol = env.colors['bg']['base02'].rgb
g.add(env.dwg.rect(insert=(x, y),
size=(self.drawing['w'], self.drawing['h']),
fill=bgcol,
stroke=stcol))
# Add it's identifier
x += self.drawing['margin']
y += self.drawing['margin'] + env.font.height
g.add(env.dwg.text(self.identifier, insert=(x,y)))
# Work out where to draw first port by working out the width and
# where x needs to be to center it
ports_width = self.drawing['ports_width']
row_startx = env.x + int(round((self.drawing['w'] - ports_width) / 2))
x = row_startx
y += self.drawing['margin']
# Draw each port
for p in self.ports:
# If we're on a new row then
if p.identifier % (self.drawing['ports_per_row'] + 1) == 0:
y += (self.drawing['port_size'] + self.drawing['margin'])
x = row_startx
fgcol = env.colors['fg']['green'].rgb
g.add(env.dwg.rect(insert=(x, y),
size=(self.drawing['port_size'],
self.drawing['port_size']),
fill=fgcol,
stroke=stcol))
x += (self.drawing['port_size'] + self.drawing['margin'])
class UnresolvedIdentifier(object):
"""
Represents a variable that might exist in the future but we can't find
it yet.
"""
instance_list = []
@staticmethod
def new(identifier):
"""
If we already have an instance for this identifier, don't make a new
one
"""
for i in UnresolvedIdentifier.instance_list:
if i.identifier == identifier:
return i
# Didn't find it so make a new one
ui = UnresolvedIdentifier(identifier)
UnresolvedIdentifier.instance_list.append(ui)
return ui
@staticmethod
def resolve_all(entities, instance_list):
if len(instance_list) == 0:
raise ValueError("No unresolved identifiers")
for i in instance_list:
found = False
for e in entities:
if i.identifier == e.identifier:
# Found the entity we want
i.resolved = e
found = True
if found == False:
raise ValueError("Couldn't resolve identifier"
" {0}".format(i.identifier))
def __init__(self, identifier):
"""
Should only be called by our static method new
"""
self.identifier = identifier
self.resolved = None
def __repr__(self):
if self.resolved != None:
return "ResolvedIdentifier {0}".format(self.resolved.__repr__())
else:
return "UnresolvedIdentifier {0}".format(self.identifier)
class Connection(object):
"""
Connects a port of an entity to the port of another entity
"""
def __init__
|
(self):
self.from_e = None
self.to_e = None
# 1 based port indexes
self.from_i = None
self.to_i = None
def __repr__(self):
out = "Connection: {0}->{1} : ".format(self.from_e.identifier,
self.from_i)
if self.to_i == None:
out += self.to_e.identifier
else:
out += "{0}->{1}".format(self.to_e.identifier, self.to_i)
return o
|
ut
def update_ports(self):
"""
Update the port.networks entries for the relevant entity
"""
if self.to_i != None:
self.from_e.ports[self.from_i - 1].networks = self.to_e.ports[self.to_i - 1].networks
else:
self.from_e.ports[self.from_i - 1].networks = [self.to_e]
class Addressable(LantexBase):
def __init__(self):
super().__init__()
self._v4 = None
self._v6 = None
self._v4_range = None
self._v6_range = None
self._v4_gateway = None
self._v6_gateway = None
self.properties.append('v4')
self.properties.append('v6')
self.properties.append('v4_range')
self.properties.append('v6_range')
self.properties.append('v4_gateway')
self.properties.append('v6_gateway')
@property
def v4_gateway(self):
return self._v4_gateway
@v4_gateway.setter
def v4_gateway(self, value):
"""
Can either be an UnresolvedIdentifier or IPv4Addr
"""
try:
ip = IPv4Addr(value)
self._v4_gateway = ip
except ValueError:
self._v4_gateway = UnresolvedIdentifier(value)
@property
def v6_gateway(self):
return self._v6_gateway
@v6_gateway.setter
def v6_gateway(self, value):
"""
Can either be an UnresolvedIdentifier or IPv6Addr
"""
|
jclgoodwin/bustimes.org.uk
|
vosa/management/commands/import_vosa.py
|
Python
|
mpl-2.0
| 11,021
| 0.002087
|
import csv
from datetime import datetime
from django.conf import settings
from django.core.management import BaseCommand
from bustimes.utils import download_if_changed
from ...models import Licence, Registration, Variation
def parse_date(date_string):
if date_string:
return datetime.strptime(date_string, '%d/%m/%y').date()
def download_if_modified(path):
url = f"https://content.mgmt.dvsacloud.uk/olcs.prod.dvsa.aws/data-gov-uk-export/{path}"
return download_if_changed(settings.DATA_DIR / path, url)
class Command(BaseCommand):
@staticmethod
def add_arguments(parser):
parser.add_argument('regions', nargs='?', type=str, default="FBCMKGDH")
def get_rows(self, path):
with open(settings.DATA_DIR / path) as open_file:
yield from csv.DictReader(open_file)
def handle(self, regions, **kwargs):
for region in regions:
modified_1, last_modified_1 = download_if_modified(f"Bus_RegisteredOnly_{region}.csv")
modified_2, last_modified_2 = download_if_modified(f"Bus_Variation_{region}.csv")
if modified_1 or modified_2:
print(region, last_modified_1, last_modified_2)
self.handle_region(region)
def handle_region(self, region):
lics = Licence.objects.filter(traffic_area=region)
lics = lics.in_bulk(field_name="licence_number")
lics_to_update = set()
lics_to_create = []
regs = Registration.objects.filter(licence__traffic_area=region)
regs = regs.in_bulk(field_name="registration_number")
regs_to_update = set()
regs_to_create = []
variations = Variation.objects.filter(registration__licence__traffic_area=region)
variations = variations.select_related('registration').all()
variations_dict = {}
for variation in variations:
reg_no = variation.registration.registration_number
if reg_no in variations_dict:
variations_dict[reg_no][variation.variation_number] = variation
else:
variations_dict[reg_no] = {
variation.variation_number: variation
}
# vars_to_update = set()
vars_to_create = []
# previous_line = None
# cardinals = set()
for line in self.get_rows(f"Bus_Variation_{region}.csv"):
reg_no = line["Reg_No"]
var_no = int(line["Variation Number"])
lic_no = line["Lic_No"]
if lic_no in lics:
licence = lics[lic_no]
if licence.id and licence not in lics_to_update:
licence.trading_name = ''
lics_to_update.add(licence)
else:
licence = Licence(licence_number=lic_no)
lics_to_create.append(licence)
lics[lic_no] = licence
licence.name = line['Op_Name']
# a licence can have multiple trading names
if line['trading_name'] not in licence.trading_name:
if licence.trading_name:
licence.trading_name = f"{licence.trading_name}\n{line['trading_name']}"
else:
licence.trading_name = line['trading_name']
if licence.address != line['Address']:
if licence.address:
print(licence.address, line['Address'])
licence.address = line['Address']
if licence.traffic_area:
assert licence.traffic_area == line['Current Traffic Area']
else:
licence.traffic_area = line['Current Traffic Area']
licence.discs = line['Discs in Possession'] or 0
licence.authorised_discs = line['AUTHDISCS'] or 0
licence.description = line['Description']
licence.granted_date = parse_date(line['Granted_Date'])
licence.expiry_date = parse_date(line['Exp_Date'])
if len(reg_no) > 20:
# PK0000098/PK0000098/364
parts = reg_no.split('/')
assert parts[0] == parts[1]
reg_no = f'{parts[1]}/{parts[2]}'
if reg_no in regs:
registration = regs[reg_no]
if registration.id and registration not in regs_to_update:
regs_to_update.add(registration)
else:
registration = Registration(
registration_number=reg_no,
registered=False
)
regs_to_create.append(registration)
regs[reg_no] = registration
registration.licence = licence
status = line['Registration Status']
registration.registration_status = status
if var_no == 0 and status == 'New':
registration.registered = True
elif status == 'Registered':
registration.registered = True
elif status == 'Cancelled' or status == 'Admin Cancelled' or status == 'Cancellation':
registration.registered = False
registration.start_point = line['start_point']
registration.finish_point = line['finish_point']
registration.via = line['via']
registration.subsidies_description = line['Subsidies_Description']
registration.subsidies_details = line['Subsidies_Details']
registration.traffic_area_office_covered_by_area = line['TAO Covered BY Area']
# a registration can have multiple numbers
if registration.service_number:
if line['Service Number'] not in registration.service_number:
registration.service_number = f"{registration.service_number}\n{line['Service Number']}"
else:
registration.service_number = line['Service Number']
# a registration can have multiple types
if registration.service_type_description:
if line['Service_Type_Description'] not in registration.service_type_description:
registration.service_type_description += f"\n{line['Service_Type_Description']}"
else:
registration.service_type_description = line['Service_Type_Description']
if registration.authority_description:
if line['Auth_Description'] not in registration.authority_description:
registration.authority_description += f"\n{line['Auth_Description']}"
if len(registration.authority_description) > 255:
# some National Express coach services cover many authorities
# print(reg_no)
registration.authority_description = registration.authority_description[:255]
else:
registration.authority_description = line['Auth_Description']
# if previous_line:
# if previous_line["Reg_No"] == reg_no:
# if int(previous_line["Variation Number"]) == var_no:
# for key in line:
# prev = previous_line[key]
# value = line[key]
# if prev != value:
# if key not in (
# 'Auth_Description', 'TAO Covered BY Area',
# 'trading_name', 'Pub_Text', 'Registration Status', 'end_date', 'received_date'
# 'effective_date', 'short_notice', 'Service_Type_Descriptio
|
n'
# ):
# print(reg_no)
# print(f"'{key}': '{prev}', '{value}'")
# cardinals.add(key)
#
|
# print(line)
variation = Variation(registration=registration, variation_number=var_no)
if reg_no in variations_dict:
if var_no in variations_dict[reg_no]:
continue # ?
e
|
tmxdyf/CouchPotatoServer
|
couchpotato/core/providers/info/_modifier/main.py
|
Python
|
gpl-3.0
| 3,402
| 0.004409
|
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import mergeDicts, randomString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library
import copy
import traceback
log = CPLog(__name__)
class MovieResultModifier(Plugin):
default_info = {
'tmdb_id': 0,
'titles': [],
'original_title': '',
'year': 0,
'images': {
'poster': [],
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
'actors': {}
|
},
'runtime': 0,
'plot': '',
'tagline': '',
'imdb': '',
'genres': [],
'mpaa': None,
'actors': [],
'actor_roles': {}
}
def __init__(self):
addEvent('result.modify.info.
|
search', self.returnByType)
addEvent('result.modify.movie.search', self.combineOnIMDB)
addEvent('result.modify.movie.info', self.checkLibrary)
def returnByType(self, results):
new_results = {}
for r in results:
type_name = r.get('type', 'movie') + 's'
if type_name not in new_results:
new_results[type_name] = []
new_results[type_name].append(r)
# Combine movies, needs a cleaner way..
if 'movies' in new_results:
new_results['movies'] = self.combineOnIMDB(new_results['movies'])
return new_results
def combineOnIMDB(self, results):
temp = {}
order = []
# Combine on imdb id
for item in results:
random_string = randomString()
imdb = item.get('imdb', random_string)
imdb = imdb if imdb else random_string
if not temp.get(imdb):
temp[imdb] = self.getLibraryTags(imdb)
order.append(imdb)
# Merge dicts
temp[imdb] = mergeDicts(temp[imdb], item)
# Make it a list again
temp_list = [temp[x] for x in order]
return temp_list
def getLibraryTags(self, imdb):
temp = {
'in_wanted': False,
'in_library': False,
}
# Add release info from current library
db = get_session()
try:
l = db.query(Library).filter_by(identifier = imdb).first()
if l:
# Statuses
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
for movie in l.movies:
if movie.status_id == active_status['id']:
temp['in_wanted'] = fireEvent('media.get', movie.id, single = True)
for release in movie.releases:
if release.status_id == done_status['id']:
temp['in_library'] = fireEvent('media.get', movie.id, single = True)
except:
log.error('Tried getting more info on searched movies: %s', traceback.format_exc())
return temp
def checkLibrary(self, result):
result = mergeDicts(copy.deepcopy(self.default_info), copy.deepcopy(result))
if result and result.get('imdb'):
return mergeDicts(result, self.getLibraryTags(result['imdb']))
return result
|
TylerKirby/cltk
|
cltk/tests/test_nlp/test_prosody.py
|
Python
|
mit
| 3,978
| 0.001078
|
"""Test cltk.prosody."""
__license__ = 'MIT License. See LICENSE.'
from cltk.prosody.latin.scanner import Scansion as ScansionLatin
from cltk.prosody.latin.clausulae_analysis import Clausulae
from cltk.prosody.greek.scanner import Scansion as ScansionGreek
from cltk.prosody.latin.macronizer import Macronizer
import unittest
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
"""greek/scanner.py"""
# Test string for Greek prosody module unit testing
test = "νέος μὲν καὶ ἄπειρος, δικῶν ἔγωγε ἔτι. μὲν καὶ ἄπειρος."
def test_clean_text_greek(self):
"""Test _clean_text method."""
correct = "νέος μὲν καὶ ἄπειρος δικῶν ἔγωγε ἔτι. μὲν καὶ ἄπειρος."
current = ScansionGreek()._clean_text(self.test)
self.assertEqual(current, correct)
def test_clean_accents_greek(self):
"""Test _clean_accents method."""
correct = "νεος μεν και απειρος δικων εγωγε ετι. μεν και απειρος."
current = ScansionGreek()._clean_accents(self.test)
self.assertEqual(current, correct)
def test_tokenize_greek(self):
"""Test _tokenize method."""
correct = [['νεος', 'μεν', 'και', 'απειρος', 'δικων', 'εγωγε', 'ετι.'],
['μεν', 'και', 'απειρος.']]
current = ScansionGreek()._tokenize(self.test)
self.assertEqual(current, correct)
def test_make_syllables_greek(self):
"""Test _make_syllables method."""
correct = [[['νε', 'ος'], ['μεν'], ['και'], ['α', 'πει', 'ρος'],
['δι', 'κων'], ['ε', 'γω', 'γε'], ['ε', 'τι']], [['μεν'],
['και'], ['α', 'πει', 'ρος']]]
current = ScansionGreek()._make_syllables(self.test)
self.assertEqual(current, correct)
def test_scan_text_greek(self):
"""Test scan_text method."""
correct = ['˘¯¯¯˘¯¯˘¯˘¯˘˘x', '¯¯˘¯x']
current = ScansionGreek().scan_text(self.test)
self.assertEqual(current, correct)
"""latin/macronizer.py"""
def test_retrieve_morpheus_entry(self):
""" Text Macronizer()._retrieve_morpheus_tag()"""
correct = [('n-s---fb-', 'puella', 'puellā'), ('n-s---fn-', 'puella', 'puella'), ('n-s---fv-', 'puella', 'puella')]
current = Macronizer("tag_ngram_123_backoff")._retrieve_morpheus_entry("puella")
self.assertEqual(current, correct)
def test_macronize_word(self):
"""Test Macronizer()._macronize_word()"""
correct = ('flumine', 'n-s---nb-', 'flūmine')
current = Macronizer("tag_ngram_123_backoff")._macronize_word(('flumine', 'n-s---nb-'))
self.assertEqual(current, correct)
def test_macronize_tags(self):
"""Test Macronizer().macronize_tags()"""
text = "Quo usque tandem, O Catilina, abutere nostra patientia?"
correct = [('quo', 'd--------', 'quō'), ('usque', 'd--------', 'usque'), ('tandem', 'd--------', 'tandem'), (',', 'u--------', ','), ('o', 'e--------', 'ō'), ('catilina', 'n-s---mb-', 'catilīnā'), (',', 'u--------', ','), ('abutere', 'v2sfip---', 'abūtēre'), ('nostra', 'a-s---fb-', 'nostrā'), ('patientia', 'n-s---fn-', 'patientia'), ('?', None, '?')]
current = Macronizer("tag_ngram_123_backoff").macronize_tags(text)
self.assertEqual(current, correct)
def test_macronize_text(self):
"""Test Macronizer().macronize_text()"""
text = "Quo usque tandem, O Catilina, abutere nostra patientia?"
correct = "quō usque
|
tandem , ō catilīnā , abūtēre nostrā patientia ?"
curr
|
ent = Macronizer("tag_ngram_123_backoff").macronize_text(text)
self.assertEqual(current, correct)
if __name__ == '__main__':
unittest.main()
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/pandas/tseries/tests/test_frequencies.py
|
Python
|
mit
| 16,705
| 0.004071
|
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result = frequencies.to_offset('Q')
expected = frequencies.to_offset('Q-DEC')
assert(result == expected)
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
|
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/
|
2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN'
|
cydenix/OpenGLCffi
|
OpenGLCffi/GL/EXT/GREMEDY/string_marker.py
|
Python
|
mit
| 123
| 0.02439
|
from OpenGLCf
|
fi.GL import params
@param
|
s(api='gl', prms=['len', 'string'])
def glStringMarkerGREMEDY(len, string):
pass
|
pepincho/Python101-and-Algo1-Courses
|
Programming-101-v3/week6/1-Who-Follows-You-Back/github_person_test.py
|
Python
|
mit
| 185
| 0
|
import unittest
from github_person import GithubPerson
class Test_GithubPerson(unittest.TestCase):
|
def setUp(self):
pass
if __name__ == '__main__':
unitt
|
est.main()
|
CauldronDevelopmentLLC/dockbot
|
dockbot/Image.py
|
Python
|
gpl-3.0
| 6,147
| 0.00667
|
import shutil
import os
import hashlib
import dockbot
def gen_hash(data):
return hashlib.sha256(data).hexdigest()
class Image(object):
def __init__(self, root, name, path, platform = None, projects = [],
modes = None, slave = False, remote = False):
self.root = root
self.conf = root.conf
self.name = name
self.qname = self.conf['namespace'] + '-' + name
self.platform = platform
self.modes = modes
if not remote:
self.path = path
self.dir = os.path.dirname(path)
self.context = self.conf.get_list('context', platform, slave)
self.containers = []
if not slave:
self.context += [
dockbot.get_resource('dockbot/data/master/nginx.conf')]
self.containers.append(dockbot.Master(self))
return
# Slave only from here
for mode in modes:
self.containers.append(self.create_slave(mode))
# Slave projects
self.projects = set()
for project in projects:
self.projects.update(self.conf.get_project_deps(project))
# Get project overrides
self.project_overrides = \
self.conf.get_sub_key(platform).get('projects', {})
def __eq__(self, other): return self.name == other.name
def __ne__(self, other): return not self.__eq__(other)
def kind(self): return 'Image'
def create_slave(self, mode): return dockbot.Slave(self, mode)
def is_running(self):
for container in self.containers:
if container.is_running(): return True
return False
def get_context_path(self):
return 'run/docker/' + self.name
def get_hash_path(self):
return self.get_context_path() + '.sha256'
def get_data_hash(self):
path = self.get_hash_path()
if os.path.exists(path):
f = None
try:
f = open(path, 'rt')
return f.read()
finally:
if f is not None: f.close()
def is_dirty(self):
if dockbot.args.force: return False
return self.get_data_hash() != gen_hash(self.gen_dockerfile())
def gen_dockerfile(self):
libpath = [os.path.dirname(self.path)]
libpath += self.conf.get('libpath', ['lib'])
libpath += [dockbot.get_resource('dockbot/data/lib')]
cmd = ['m4'] + sum([['-I', x] for x in libpath], []) + [self.path]
ret, out, err = dockbot.system(cmd, True)
if ret:
raise dockbot.Error('Failed to construct Docker file: ' +
err.decode('utf-8'))
return out
def get_project(self, name):
import copy
for project in self.conf.projects:
if project['name'] == name:
p = copy.deepcopy(project)
p.update(self.project_overrides.get(name, {}))
return p
raise dockbot.Error('Project "%s" not found' % name)
def exists(self):
return dockbot.inspect(self.qname) != dockbot.NOT_FOUND
def cmd_delete(self):
if self.exists():
for container in self.containers:
container.cmd_delete()
dockbot.status_line(self.qname, *dockbot.DELETING)
dockbot.system(['docker', 'rmi', '--no-prune', self.qname], True,
'remove image')
def container_exists(self):
for container in self.containers:
if container.exists(): return Tru
|
e
return False
def cmd_status(self):
for container in self.containers:
container.cmd_status()
def cmd_config(self):
for container in self.containers:
container.cmd_config()
def cmd_shell(self):
raise dockbot.Error('Cannot open shell in image')
def cmd_start(self):
for container in self.containers:
contai
|
ner.cmd_start()
def cmd_stop(self):
for container in self.containers:
container.cmd_stop()
def cmd_restart(self):
self.cmd_stop()
self.cmd_start()
def cmd_build(self):
# Check if image is running
if self.is_running():
if dockbot.args.all and (self.is_dirty() or dockbot.args.force):
self.cmd_stop()
else:
dockbot.status_line(self.qname, *dockbot.RUNNING)
return
if self.is_dirty() or dockbot.args.force:
self.cmd_delete() # Delete image if it exists
elif self.exists():
dockbot.status_line(self.qname, *dockbot.BUILT)
return
dockbot.status_line(self.qname, *dockbot.BUILDING)
# Generate Dockerfile
data = self.gen_dockerfile()
data_hash = gen_hash(data)
# Clean up old context
ctx_path = self.get_context_path()
if os.path.exists(ctx_path): shutil.rmtree(ctx_path)
# Construct Dockerfile
os.makedirs(ctx_path)
dockerfile = ctx_path + '/Dockerfile'
f = None
try:
f = open(dockerfile, 'w')
f.write(data.decode('utf-8'))
f.close()
f = open(self.get_hash_path(), 'w')
f.write(data_hash)
finally:
if f is not None: f.close()
# Link context
for path in self.context:
target = os.path.join(ctx_path, os.path.basename(path))
if dockbot.args.verbose: print('%s -> %s' % (path, target))
shutil.copy(path, target)
# Build command
cmd = ['docker', 'build', '--rm', '-t', self.qname]
# Extra args
cmd += dockbot.args.args
# Do build
dockbot.system(cmd + ['.'], False, 'build ' + self.qname,
cwd = ctx_path)
def cmd_trigger(self):
for container in self.containers:
if isinstance(container, dockbot.Slave):
container.cmd_trigger()
def cmd_publish(self):
for container in self.containers:
container.cmd_publish()
|
ketancmaheshwari/hello-goog
|
src/python/newtonraphson.py
|
Python
|
apache-2.0
| 209
| 0.004785
|
#!/bin/env python
# mainly for sys.argv[], sys.argv[0] is the name of the program
import sys
# mainly for arrays
import nump
|
y as np
def newtraph(fx, fxprime):
if __name__ == '__main__':
p
|
rint 'hello'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.