commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdea8195a3f06c3e69c655419b63dbcf925a6382
|
test_install.py
|
test_install.py
|
# -*- coding: utf-8 -*-
import requests
import sys
from insight_reloaded.insight_settings import CROP_SIZE
def main():
args = {}
args['crop'] = CROP_SIZE
args['url'] = 'http://www.novapost.fr/emploi/ruby.pdf'
if len(sys.argv) > 1:
args['url'] = sys.argv[1]
if len(sys.argv) > 2:
args['callback'] = sys.argv[2]
if len(sys.argv) > 3:
args['crop'] = sys.argv[3]
print "Running test on: %s" % args
try:
raw_input('Continue?')
except KeyboardInterrupt:
print
print "USAGE: %s [url] [callback] [crop]" % sys.argv[0]
sys.exit(0)
# On lance un job
response = requests.get('http://localhost:8888/', params=args)
# On vérifie que les logs on compris.
if 'Job added to queue' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(1)
response = requests.get('http://localhost:8888/status')
if 'There is' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(2)
# On vérifie que le fichier existe au bon endroit
print "The API works. Check the log file or the callback response"
if __name__ == '__main__':
main()
|
Add a functional test for insight.
|
Add a functional test for insight.
|
Python
|
bsd-3-clause
|
novapost/insight-reloaded
|
Add a functional test for insight.
|
# -*- coding: utf-8 -*-
import requests
import sys
from insight_reloaded.insight_settings import CROP_SIZE
def main():
args = {}
args['crop'] = CROP_SIZE
args['url'] = 'http://www.novapost.fr/emploi/ruby.pdf'
if len(sys.argv) > 1:
args['url'] = sys.argv[1]
if len(sys.argv) > 2:
args['callback'] = sys.argv[2]
if len(sys.argv) > 3:
args['crop'] = sys.argv[3]
print "Running test on: %s" % args
try:
raw_input('Continue?')
except KeyboardInterrupt:
print
print "USAGE: %s [url] [callback] [crop]" % sys.argv[0]
sys.exit(0)
# On lance un job
response = requests.get('http://localhost:8888/', params=args)
# On vérifie que les logs on compris.
if 'Job added to queue' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(1)
response = requests.get('http://localhost:8888/status')
if 'There is' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(2)
# On vérifie que le fichier existe au bon endroit
print "The API works. Check the log file or the callback response"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a functional test for insight.<commit_after>
|
# -*- coding: utf-8 -*-
import requests
import sys
from insight_reloaded.insight_settings import CROP_SIZE
def main():
args = {}
args['crop'] = CROP_SIZE
args['url'] = 'http://www.novapost.fr/emploi/ruby.pdf'
if len(sys.argv) > 1:
args['url'] = sys.argv[1]
if len(sys.argv) > 2:
args['callback'] = sys.argv[2]
if len(sys.argv) > 3:
args['crop'] = sys.argv[3]
print "Running test on: %s" % args
try:
raw_input('Continue?')
except KeyboardInterrupt:
print
print "USAGE: %s [url] [callback] [crop]" % sys.argv[0]
sys.exit(0)
# On lance un job
response = requests.get('http://localhost:8888/', params=args)
# On vérifie que les logs on compris.
if 'Job added to queue' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(1)
response = requests.get('http://localhost:8888/status')
if 'There is' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(2)
# On vérifie que le fichier existe au bon endroit
print "The API works. Check the log file or the callback response"
if __name__ == '__main__':
main()
|
Add a functional test for insight.# -*- coding: utf-8 -*-
import requests
import sys
from insight_reloaded.insight_settings import CROP_SIZE
def main():
args = {}
args['crop'] = CROP_SIZE
args['url'] = 'http://www.novapost.fr/emploi/ruby.pdf'
if len(sys.argv) > 1:
args['url'] = sys.argv[1]
if len(sys.argv) > 2:
args['callback'] = sys.argv[2]
if len(sys.argv) > 3:
args['crop'] = sys.argv[3]
print "Running test on: %s" % args
try:
raw_input('Continue?')
except KeyboardInterrupt:
print
print "USAGE: %s [url] [callback] [crop]" % sys.argv[0]
sys.exit(0)
# On lance un job
response = requests.get('http://localhost:8888/', params=args)
# On vérifie que les logs on compris.
if 'Job added to queue' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(1)
response = requests.get('http://localhost:8888/status')
if 'There is' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(2)
# On vérifie que le fichier existe au bon endroit
print "The API works. Check the log file or the callback response"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a functional test for insight.<commit_after># -*- coding: utf-8 -*-
import requests
import sys
from insight_reloaded.insight_settings import CROP_SIZE
def main():
args = {}
args['crop'] = CROP_SIZE
args['url'] = 'http://www.novapost.fr/emploi/ruby.pdf'
if len(sys.argv) > 1:
args['url'] = sys.argv[1]
if len(sys.argv) > 2:
args['callback'] = sys.argv[2]
if len(sys.argv) > 3:
args['crop'] = sys.argv[3]
print "Running test on: %s" % args
try:
raw_input('Continue?')
except KeyboardInterrupt:
print
print "USAGE: %s [url] [callback] [crop]" % sys.argv[0]
sys.exit(0)
# On lance un job
response = requests.get('http://localhost:8888/', params=args)
# On vérifie que les logs on compris.
if 'Job added to queue' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(1)
response = requests.get('http://localhost:8888/status')
if 'There is' not in response.text:
print "Error: %s - %s" % (response.status_code,
response.text)
sys.exit(2)
# On vérifie que le fichier existe au bon endroit
print "The API works. Check the log file or the callback response"
if __name__ == '__main__':
main()
|
|
fef1191dc42a5710f14635182f59b4bc643b549e
|
senlinclient/tests/unit/test_sdk.py
|
senlinclient/tests/unit/test_sdk.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import testtools
from senlinclient.common import sdk
class TestSdk(testtools.TestCase):
@mock.patch('senlinclient.common.sdk.ProfileAction.set_option')
def test_env(self, mock_set_option):
os.environ['test_senlin_sdk_env'] = '1'
sdk.ProfileAction.env('test_senlin_sdk_env')
mock_set_option.assert_called_once_with('test_senlin_sdk_env', '1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_name(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('name', 'test=val1')
mock_prof.set_name.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('name', 'val2')
mock_prof.set_name.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_region(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('OS_REGION_NAME', 'test=val1')
mock_prof.set_region.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('OS_REGION_NAME', 'val2')
mock_prof.set_region.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_version(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('version', 'test=val1')
mock_prof.set_version.assert_called_once_with('test', 'val1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_visibility(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('visibility', 'test=val1')
mock_prof.set_visibility.assert_called_once_with('test', 'val1')
|
Add unit test cases for sdk module
|
Add unit test cases for sdk module
Add unit test cases for sdk module for class ProfileAction
Still need unit tests for class Resource
Change-Id: I6328e6346bbc2e615d1824aeb223e76cb5c8adf0
Partial-Bug: #1484496
|
Python
|
apache-2.0
|
stackforge/python-senlinclient,stackforge/python-senlinclient,openstack/python-senlinclient,openstack/python-senlinclient
|
Add unit test cases for sdk module
Add unit test cases for sdk module for class ProfileAction
Still need unit tests for class Resource
Change-Id: I6328e6346bbc2e615d1824aeb223e76cb5c8adf0
Partial-Bug: #1484496
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import testtools
from senlinclient.common import sdk
class TestSdk(testtools.TestCase):
@mock.patch('senlinclient.common.sdk.ProfileAction.set_option')
def test_env(self, mock_set_option):
os.environ['test_senlin_sdk_env'] = '1'
sdk.ProfileAction.env('test_senlin_sdk_env')
mock_set_option.assert_called_once_with('test_senlin_sdk_env', '1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_name(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('name', 'test=val1')
mock_prof.set_name.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('name', 'val2')
mock_prof.set_name.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_region(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('OS_REGION_NAME', 'test=val1')
mock_prof.set_region.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('OS_REGION_NAME', 'val2')
mock_prof.set_region.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_version(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('version', 'test=val1')
mock_prof.set_version.assert_called_once_with('test', 'val1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_visibility(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('visibility', 'test=val1')
mock_prof.set_visibility.assert_called_once_with('test', 'val1')
|
<commit_before><commit_msg>Add unit test cases for sdk module
Add unit test cases for sdk module for class ProfileAction
Still need unit tests for class Resource
Change-Id: I6328e6346bbc2e615d1824aeb223e76cb5c8adf0
Partial-Bug: #1484496<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import testtools
from senlinclient.common import sdk
class TestSdk(testtools.TestCase):
@mock.patch('senlinclient.common.sdk.ProfileAction.set_option')
def test_env(self, mock_set_option):
os.environ['test_senlin_sdk_env'] = '1'
sdk.ProfileAction.env('test_senlin_sdk_env')
mock_set_option.assert_called_once_with('test_senlin_sdk_env', '1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_name(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('name', 'test=val1')
mock_prof.set_name.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('name', 'val2')
mock_prof.set_name.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_region(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('OS_REGION_NAME', 'test=val1')
mock_prof.set_region.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('OS_REGION_NAME', 'val2')
mock_prof.set_region.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_version(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('version', 'test=val1')
mock_prof.set_version.assert_called_once_with('test', 'val1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_visibility(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('visibility', 'test=val1')
mock_prof.set_visibility.assert_called_once_with('test', 'val1')
|
Add unit test cases for sdk module
Add unit test cases for sdk module for class ProfileAction
Still need unit tests for class Resource
Change-Id: I6328e6346bbc2e615d1824aeb223e76cb5c8adf0
Partial-Bug: #1484496# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import testtools
from senlinclient.common import sdk
class TestSdk(testtools.TestCase):
@mock.patch('senlinclient.common.sdk.ProfileAction.set_option')
def test_env(self, mock_set_option):
os.environ['test_senlin_sdk_env'] = '1'
sdk.ProfileAction.env('test_senlin_sdk_env')
mock_set_option.assert_called_once_with('test_senlin_sdk_env', '1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_name(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('name', 'test=val1')
mock_prof.set_name.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('name', 'val2')
mock_prof.set_name.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_region(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('OS_REGION_NAME', 'test=val1')
mock_prof.set_region.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('OS_REGION_NAME', 'val2')
mock_prof.set_region.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_version(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('version', 'test=val1')
mock_prof.set_version.assert_called_once_with('test', 'val1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_visibility(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('visibility', 'test=val1')
mock_prof.set_visibility.assert_called_once_with('test', 'val1')
|
<commit_before><commit_msg>Add unit test cases for sdk module
Add unit test cases for sdk module for class ProfileAction
Still need unit tests for class Resource
Change-Id: I6328e6346bbc2e615d1824aeb223e76cb5c8adf0
Partial-Bug: #1484496<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import testtools
from senlinclient.common import sdk
class TestSdk(testtools.TestCase):
@mock.patch('senlinclient.common.sdk.ProfileAction.set_option')
def test_env(self, mock_set_option):
os.environ['test_senlin_sdk_env'] = '1'
sdk.ProfileAction.env('test_senlin_sdk_env')
mock_set_option.assert_called_once_with('test_senlin_sdk_env', '1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_name(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('name', 'test=val1')
mock_prof.set_name.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('name', 'val2')
mock_prof.set_name.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_region(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('OS_REGION_NAME', 'test=val1')
mock_prof.set_region.assert_called_once_with('test', 'val1')
mock_prof.reset_mock()
sdk.ProfileAction.set_option('OS_REGION_NAME', 'val2')
mock_prof.set_region.assert_called_once_with(mock_prof.ALL, 'val2')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_version(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('version', 'test=val1')
mock_prof.set_version.assert_called_once_with('test', 'val1')
@mock.patch('senlinclient.common.sdk.ProfileAction.prof')
def test_set_option_set_visibility(self, mock_prof):
mock_prof.ALL = 'mock_prof.ALL'
sdk.ProfileAction.set_option('visibility', 'test=val1')
mock_prof.set_visibility.assert_called_once_with('test', 'val1')
|
|
2c7b64ea22ebe7e35945550f5726a72a721213b4
|
minitests/litex/src.yosys/missing_bit_report.py
|
minitests/litex/src.yosys/missing_bit_report.py
|
""" Generates a missing feature/bit report for LiteX design.
This script is fairly fragile, because it depends on the specific observation
that all of the remaining bits appear to either belong to HCLK_IOI or IOI3
tiles. A more general version of this script could be created, but that was
not the point of this script.
"""
from fasm import parse_fasm_filename
def main():
fasm_file = 'top.fasm'
fasm_model = list(parse_fasm_filename(fasm_file))
unknown_bits = {
'HCLK_IOI': {},
'IOI3': {},
}
total_unknown = 0
for l in fasm_model:
if l.annotations is None:
continue
annotations = {}
for annotation in l.annotations:
annotations[annotation.name] = annotation.value
if 'unknown_bit' not in annotations:
continue
total_unknown += 1
frame, word, bit = annotations['unknown_bit'].split('_')
frame = int(frame, 16)
word = int(word)
bit = int(bit)
frame_offset = frame % 0x80
base_frame = frame - frame_offset
# All remaining LiteX bits appear to be in this one IO bank, so limit
# the tool this this one IO bank.
assert base_frame == 0x00401580, hex(frame)
SIZE = 4
INITIAL_OFFSET = -2
if word == 50:
group = 'HCLK_IOI'
offset = 50
elif word < 50:
group = 'IOI3'
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
else:
group = 'IOI3'
word -= 1
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
offset += 1
word += 1
bit = '{}_{:02d}'.format(
frame_offset,
(word - offset) * 32 + bit,
)
if bit not in unknown_bits[group]:
unknown_bits[group][bit] = 0
unknown_bits[group][bit] += 1
print('Total unknown bits: {}'.format(total_unknown))
for group in unknown_bits:
print('Group {} (count = {}):'.format(group, len(unknown_bits[group])))
for bit in sorted(unknown_bits[group]):
print(' {} (count = {})'.format(bit, unknown_bits[group][bit]))
if __name__ == "__main__":
main()
|
Create script for generating remaining bit report.
|
Create script for generating remaining bit report.
This report is fairly fragile, but works well enough for the remaining
LiteX bits.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
Python
|
isc
|
SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray
|
Create script for generating remaining bit report.
This report is fairly fragile, but works well enough for the remaining
LiteX bits.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
""" Generates a missing feature/bit report for LiteX design.
This script is fairly fragile, because it depends on the specific observation
that all of the remaining bits appear to either belong to HCLK_IOI or IOI3
tiles. A more general version of this script could be created, but that was
not the point of this script.
"""
from fasm import parse_fasm_filename
def main():
fasm_file = 'top.fasm'
fasm_model = list(parse_fasm_filename(fasm_file))
unknown_bits = {
'HCLK_IOI': {},
'IOI3': {},
}
total_unknown = 0
for l in fasm_model:
if l.annotations is None:
continue
annotations = {}
for annotation in l.annotations:
annotations[annotation.name] = annotation.value
if 'unknown_bit' not in annotations:
continue
total_unknown += 1
frame, word, bit = annotations['unknown_bit'].split('_')
frame = int(frame, 16)
word = int(word)
bit = int(bit)
frame_offset = frame % 0x80
base_frame = frame - frame_offset
# All remaining LiteX bits appear to be in this one IO bank, so limit
# the tool this this one IO bank.
assert base_frame == 0x00401580, hex(frame)
SIZE = 4
INITIAL_OFFSET = -2
if word == 50:
group = 'HCLK_IOI'
offset = 50
elif word < 50:
group = 'IOI3'
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
else:
group = 'IOI3'
word -= 1
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
offset += 1
word += 1
bit = '{}_{:02d}'.format(
frame_offset,
(word - offset) * 32 + bit,
)
if bit not in unknown_bits[group]:
unknown_bits[group][bit] = 0
unknown_bits[group][bit] += 1
print('Total unknown bits: {}'.format(total_unknown))
for group in unknown_bits:
print('Group {} (count = {}):'.format(group, len(unknown_bits[group])))
for bit in sorted(unknown_bits[group]):
print(' {} (count = {})'.format(bit, unknown_bits[group][bit]))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create script for generating remaining bit report.
This report is fairly fragile, but works well enough for the remaining
LiteX bits.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>
|
""" Generates a missing feature/bit report for LiteX design.
This script is fairly fragile, because it depends on the specific observation
that all of the remaining bits appear to either belong to HCLK_IOI or IOI3
tiles. A more general version of this script could be created, but that was
not the point of this script.
"""
from fasm import parse_fasm_filename
def main():
fasm_file = 'top.fasm'
fasm_model = list(parse_fasm_filename(fasm_file))
unknown_bits = {
'HCLK_IOI': {},
'IOI3': {},
}
total_unknown = 0
for l in fasm_model:
if l.annotations is None:
continue
annotations = {}
for annotation in l.annotations:
annotations[annotation.name] = annotation.value
if 'unknown_bit' not in annotations:
continue
total_unknown += 1
frame, word, bit = annotations['unknown_bit'].split('_')
frame = int(frame, 16)
word = int(word)
bit = int(bit)
frame_offset = frame % 0x80
base_frame = frame - frame_offset
# All remaining LiteX bits appear to be in this one IO bank, so limit
# the tool this this one IO bank.
assert base_frame == 0x00401580, hex(frame)
SIZE = 4
INITIAL_OFFSET = -2
if word == 50:
group = 'HCLK_IOI'
offset = 50
elif word < 50:
group = 'IOI3'
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
else:
group = 'IOI3'
word -= 1
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
offset += 1
word += 1
bit = '{}_{:02d}'.format(
frame_offset,
(word - offset) * 32 + bit,
)
if bit not in unknown_bits[group]:
unknown_bits[group][bit] = 0
unknown_bits[group][bit] += 1
print('Total unknown bits: {}'.format(total_unknown))
for group in unknown_bits:
print('Group {} (count = {}):'.format(group, len(unknown_bits[group])))
for bit in sorted(unknown_bits[group]):
print(' {} (count = {})'.format(bit, unknown_bits[group][bit]))
if __name__ == "__main__":
main()
|
Create script for generating remaining bit report.
This report is fairly fragile, but works well enough for the remaining
LiteX bits.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>""" Generates a missing feature/bit report for LiteX design.
This script is fairly fragile, because it depends on the specific observation
that all of the remaining bits appear to either belong to HCLK_IOI or IOI3
tiles. A more general version of this script could be created, but that was
not the point of this script.
"""
from fasm import parse_fasm_filename
def main():
fasm_file = 'top.fasm'
fasm_model = list(parse_fasm_filename(fasm_file))
unknown_bits = {
'HCLK_IOI': {},
'IOI3': {},
}
total_unknown = 0
for l in fasm_model:
if l.annotations is None:
continue
annotations = {}
for annotation in l.annotations:
annotations[annotation.name] = annotation.value
if 'unknown_bit' not in annotations:
continue
total_unknown += 1
frame, word, bit = annotations['unknown_bit'].split('_')
frame = int(frame, 16)
word = int(word)
bit = int(bit)
frame_offset = frame % 0x80
base_frame = frame - frame_offset
# All remaining LiteX bits appear to be in this one IO bank, so limit
# the tool this this one IO bank.
assert base_frame == 0x00401580, hex(frame)
SIZE = 4
INITIAL_OFFSET = -2
if word == 50:
group = 'HCLK_IOI'
offset = 50
elif word < 50:
group = 'IOI3'
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
else:
group = 'IOI3'
word -= 1
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
offset += 1
word += 1
bit = '{}_{:02d}'.format(
frame_offset,
(word - offset) * 32 + bit,
)
if bit not in unknown_bits[group]:
unknown_bits[group][bit] = 0
unknown_bits[group][bit] += 1
print('Total unknown bits: {}'.format(total_unknown))
for group in unknown_bits:
print('Group {} (count = {}):'.format(group, len(unknown_bits[group])))
for bit in sorted(unknown_bits[group]):
print(' {} (count = {})'.format(bit, unknown_bits[group][bit]))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create script for generating remaining bit report.
This report is fairly fragile, but works well enough for the remaining
LiteX bits.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>""" Generates a missing feature/bit report for LiteX design.
This script is fairly fragile, because it depends on the specific observation
that all of the remaining bits appear to either belong to HCLK_IOI or IOI3
tiles. A more general version of this script could be created, but that was
not the point of this script.
"""
from fasm import parse_fasm_filename
def main():
fasm_file = 'top.fasm'
fasm_model = list(parse_fasm_filename(fasm_file))
unknown_bits = {
'HCLK_IOI': {},
'IOI3': {},
}
total_unknown = 0
for l in fasm_model:
if l.annotations is None:
continue
annotations = {}
for annotation in l.annotations:
annotations[annotation.name] = annotation.value
if 'unknown_bit' not in annotations:
continue
total_unknown += 1
frame, word, bit = annotations['unknown_bit'].split('_')
frame = int(frame, 16)
word = int(word)
bit = int(bit)
frame_offset = frame % 0x80
base_frame = frame - frame_offset
# All remaining LiteX bits appear to be in this one IO bank, so limit
# the tool this this one IO bank.
assert base_frame == 0x00401580, hex(frame)
SIZE = 4
INITIAL_OFFSET = -2
if word == 50:
group = 'HCLK_IOI'
offset = 50
elif word < 50:
group = 'IOI3'
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
else:
group = 'IOI3'
word -= 1
offset = ((word - INITIAL_OFFSET) // SIZE) * SIZE + INITIAL_OFFSET
offset += 1
word += 1
bit = '{}_{:02d}'.format(
frame_offset,
(word - offset) * 32 + bit,
)
if bit not in unknown_bits[group]:
unknown_bits[group][bit] = 0
unknown_bits[group][bit] += 1
print('Total unknown bits: {}'.format(total_unknown))
for group in unknown_bits:
print('Group {} (count = {}):'.format(group, len(unknown_bits[group])))
for bit in sorted(unknown_bits[group]):
print(' {} (count = {})'.format(bit, unknown_bits[group][bit]))
if __name__ == "__main__":
main()
|
|
e84abb1b67586d3a3620ce6a21e3314527083218
|
doc/examples/plot_threshold_adaptive.py
|
doc/examples/plot_threshold_adaptive.py
|
"""
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.text()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
|
Add example of adaptive thresholding.
|
DOC: Add example of adaptive thresholding.
|
Python
|
bsd-3-clause
|
Hiyorimi/scikit-image,ofgulban/scikit-image,rjeli/scikit-image,newville/scikit-image,almarklein/scikit-image,youprofit/scikit-image,chintak/scikit-image,dpshelio/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,ClinicalGraphics/scikit-image,juliusbierk/scikit-image,robintw/scikit-image,keflavich/scikit-image,ajaybhat/scikit-image,keflavich/scikit-image,warmspringwinds/scikit-image,Midafi/scikit-image,SamHames/scikit-image,robintw/scikit-image,chintak/scikit-image,bennlich/scikit-image,SamHames/scikit-image,paalge/scikit-image,WarrenWeckesser/scikits-image,almarklein/scikit-image,paalge/scikit-image,chriscrosscutler/scikit-image,michaelaye/scikit-image,chintak/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,chriscrosscutler/scikit-image,youprofit/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,juliusbierk/scikit-image,michaelpacer/scikit-image,newville/scikit-image,oew1v07/scikit-image,emmanuelle/scikits.image,dpshelio/scikit-image,bsipocz/scikit-image,bennlich/scikit-image,pratapvardhan/scikit-image,Britefury/scikit-image,emmanuelle/scikits.image,emmanuelle/scikits.image,vighneshbirodkar/scikit-image,ajaybhat/scikit-image,ClinicalGraphics/scikit-image,rjeli/scikit-image,rjeli/scikit-image,emmanuelle/scikits.image,WarrenWeckesser/scikits-image,bsipocz/scikit-image,chintak/scikit-image,Hiyorimi/scikit-image,vighneshbirodkar/scikit-image,GaZ3ll3/scikit-image,almarklein/scikit-image,ofgulban/scikit-image,GaZ3ll3/scikit-image,paalge/scikit-image,Britefury/scikit-image,pratapvardhan/scikit-image,Midafi/scikit-image,warmspringwinds/scikit-image,SamHames/scikit-image,michaelpacer/scikit-image,michaelaye/scikit-image,blink1073/scikit-image,emon10005/scikit-image,blink1073/scikit-image,jwiggins/scikit-image,oew1v07/scikit-image
|
DOC: Add example of adaptive thresholding.
|
"""
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.text()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
|
<commit_before><commit_msg>DOC: Add example of adaptive thresholding.<commit_after>
|
"""
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.text()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
|
DOC: Add example of adaptive thresholding."""
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.text()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
|
<commit_before><commit_msg>DOC: Add example of adaptive thresholding.<commit_after>"""
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.text()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
|
|
2f37f0ae1fc024b97de4c783c8f162c02c2c3a2d
|
hemresadapter.py
|
hemresadapter.py
|
# coding=utf-8
# description: functions to interact with the newsletter subscriptions in jdwebsite (Hemres),
# via Hemres management commands
import os
import subprocess
import configparser
class HemresAdapter(object):
def __init__(self):
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(SCRIPTDIR, 'ledenlijst.cfg')
assert(os.path.exists(config_path))
config = configparser.RawConfigParser()
config.read(config_path)
website_config = dict(config.items('jdwebsite'))
self.python_bin_filepath = website_config['python_bin_filepath']
assert(os.path.exists(self.python_bin_filepath))
self.jdwebsite_manage_filepath = website_config['jdwebsite_manage_filepath']
assert(os.path.exists(self.jdwebsite_manage_filepath))
def add_member_to_list(self, member_id, list_label):
print('add_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_subscribe", str(member_id), list_label])
def remove_member_from_list(self, member_id, list_label):
print('remove_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_unsubscribe", str(member_id), list_label])
def move_member(self, member_id, list_label_from, list_label_to):
print('move_member')
self.remove_member_from_list(member_id, list_label_from)
self.add_member_to_list(member_id, list_label_to)
def test():
hemres = HemresAdapter()
hemres.add_member_to_list(1, 'UTRECHT')
hemres.remove_member_from_list(1, 'UTRECHT')
hemres.move_member(1, 'AMSTERDAM', 'UTRECHT')
if __name__ == '__main__':
print('main()')
test();
|
Create hemres (jdwebsite newsletter) interface adapter
|
Create hemres (jdwebsite newsletter) interface adapter
|
Python
|
mit
|
jonge-democraten/jdleden,jonge-democraten/jdleden
|
Create hemres (jdwebsite newsletter) interface adapter
|
# coding=utf-8
# description: functions to interact with the newsletter subscriptions in jdwebsite (Hemres),
# via Hemres management commands
import os
import subprocess
import configparser
class HemresAdapter(object):
def __init__(self):
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(SCRIPTDIR, 'ledenlijst.cfg')
assert(os.path.exists(config_path))
config = configparser.RawConfigParser()
config.read(config_path)
website_config = dict(config.items('jdwebsite'))
self.python_bin_filepath = website_config['python_bin_filepath']
assert(os.path.exists(self.python_bin_filepath))
self.jdwebsite_manage_filepath = website_config['jdwebsite_manage_filepath']
assert(os.path.exists(self.jdwebsite_manage_filepath))
def add_member_to_list(self, member_id, list_label):
print('add_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_subscribe", str(member_id), list_label])
def remove_member_from_list(self, member_id, list_label):
print('remove_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_unsubscribe", str(member_id), list_label])
def move_member(self, member_id, list_label_from, list_label_to):
print('move_member')
self.remove_member_from_list(member_id, list_label_from)
self.add_member_to_list(member_id, list_label_to)
def test():
hemres = HemresAdapter()
hemres.add_member_to_list(1, 'UTRECHT')
hemres.remove_member_from_list(1, 'UTRECHT')
hemres.move_member(1, 'AMSTERDAM', 'UTRECHT')
if __name__ == '__main__':
print('main()')
test();
|
<commit_before><commit_msg>Create hemres (jdwebsite newsletter) interface adapter<commit_after>
|
# coding=utf-8
# description: functions to interact with the newsletter subscriptions in jdwebsite (Hemres),
# via Hemres management commands
import os
import subprocess
import configparser
class HemresAdapter(object):
def __init__(self):
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(SCRIPTDIR, 'ledenlijst.cfg')
assert(os.path.exists(config_path))
config = configparser.RawConfigParser()
config.read(config_path)
website_config = dict(config.items('jdwebsite'))
self.python_bin_filepath = website_config['python_bin_filepath']
assert(os.path.exists(self.python_bin_filepath))
self.jdwebsite_manage_filepath = website_config['jdwebsite_manage_filepath']
assert(os.path.exists(self.jdwebsite_manage_filepath))
def add_member_to_list(self, member_id, list_label):
print('add_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_subscribe", str(member_id), list_label])
def remove_member_from_list(self, member_id, list_label):
print('remove_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_unsubscribe", str(member_id), list_label])
def move_member(self, member_id, list_label_from, list_label_to):
print('move_member')
self.remove_member_from_list(member_id, list_label_from)
self.add_member_to_list(member_id, list_label_to)
def test():
hemres = HemresAdapter()
hemres.add_member_to_list(1, 'UTRECHT')
hemres.remove_member_from_list(1, 'UTRECHT')
hemres.move_member(1, 'AMSTERDAM', 'UTRECHT')
if __name__ == '__main__':
print('main()')
test();
|
Create hemres (jdwebsite newsletter) interface adapter# coding=utf-8
# description: functions to interact with the newsletter subscriptions in jdwebsite (Hemres),
# via Hemres management commands
import os
import subprocess
import configparser
class HemresAdapter(object):
def __init__(self):
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(SCRIPTDIR, 'ledenlijst.cfg')
assert(os.path.exists(config_path))
config = configparser.RawConfigParser()
config.read(config_path)
website_config = dict(config.items('jdwebsite'))
self.python_bin_filepath = website_config['python_bin_filepath']
assert(os.path.exists(self.python_bin_filepath))
self.jdwebsite_manage_filepath = website_config['jdwebsite_manage_filepath']
assert(os.path.exists(self.jdwebsite_manage_filepath))
def add_member_to_list(self, member_id, list_label):
print('add_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_subscribe", str(member_id), list_label])
def remove_member_from_list(self, member_id, list_label):
print('remove_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_unsubscribe", str(member_id), list_label])
def move_member(self, member_id, list_label_from, list_label_to):
print('move_member')
self.remove_member_from_list(member_id, list_label_from)
self.add_member_to_list(member_id, list_label_to)
def test():
hemres = HemresAdapter()
hemres.add_member_to_list(1, 'UTRECHT')
hemres.remove_member_from_list(1, 'UTRECHT')
hemres.move_member(1, 'AMSTERDAM', 'UTRECHT')
if __name__ == '__main__':
print('main()')
test();
|
<commit_before><commit_msg>Create hemres (jdwebsite newsletter) interface adapter<commit_after># coding=utf-8
# description: functions to interact with the newsletter subscriptions in jdwebsite (Hemres),
# via Hemres management commands
import os
import subprocess
import configparser
class HemresAdapter(object):
def __init__(self):
SCRIPTDIR = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(SCRIPTDIR, 'ledenlijst.cfg')
assert(os.path.exists(config_path))
config = configparser.RawConfigParser()
config.read(config_path)
website_config = dict(config.items('jdwebsite'))
self.python_bin_filepath = website_config['python_bin_filepath']
assert(os.path.exists(self.python_bin_filepath))
self.jdwebsite_manage_filepath = website_config['jdwebsite_manage_filepath']
assert(os.path.exists(self.jdwebsite_manage_filepath))
def add_member_to_list(self, member_id, list_label):
print('add_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_subscribe", str(member_id), list_label])
def remove_member_from_list(self, member_id, list_label):
print('remove_member_from_list')
subprocess.call([self.python_bin_filepath, self.jdwebsite_manage_filepath, "janeus_unsubscribe", str(member_id), list_label])
def move_member(self, member_id, list_label_from, list_label_to):
print('move_member')
self.remove_member_from_list(member_id, list_label_from)
self.add_member_to_list(member_id, list_label_to)
def test():
hemres = HemresAdapter()
hemres.add_member_to_list(1, 'UTRECHT')
hemres.remove_member_from_list(1, 'UTRECHT')
hemres.move_member(1, 'AMSTERDAM', 'UTRECHT')
if __name__ == '__main__':
print('main()')
test();
|
|
38db1be31407ecab2237c3cd75160f5ffeb59cb2
|
Split_Multipoint_CZI.py
|
Split_Multipoint_CZI.py
|
# @File(label="Input file") input
# @File(label="Output folder") output
# Splits multi-point CZI files into multiple TIFFs using Bio-Formats.
#
# Stefan Helfrich (University of Konstaz), 05/09/2016
from ij import IJ
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
import os
srcPath = input.getAbsolutePath()
# using LOCI BioFormats
settings = ImporterOptions()
settings.setId(srcPath)
settings.setOpenAllSeries(True)
settings.setVirtual(True)
settings.setWindowless(True)
imps = BF.openImagePlus(settings)
for i in range(0, len(imps)):
currentImp = imps[i]
filename = os.path.split(srcPath)[1]
filenameWithoutExtension = os.path.splitext(filename)[0]
IJ.saveAs(currentImp, "TIFF", output.getAbsolutePath() + "/" + filenameWithoutExtension + "-" + str(i) + ".tif")
|
Add script to split multi-point CZIs
|
Add script to split multi-point CZIs
|
Python
|
bsd-2-clause
|
bic-kn/imagej-scripts
|
Add script to split multi-point CZIs
|
# @File(label="Input file") input
# @File(label="Output folder") output
# Splits multi-point CZI files into multiple TIFFs using Bio-Formats.
#
# Stefan Helfrich (University of Konstaz), 05/09/2016
from ij import IJ
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
import os
srcPath = input.getAbsolutePath()
# using LOCI BioFormats
settings = ImporterOptions()
settings.setId(srcPath)
settings.setOpenAllSeries(True)
settings.setVirtual(True)
settings.setWindowless(True)
imps = BF.openImagePlus(settings)
for i in range(0, len(imps)):
currentImp = imps[i]
filename = os.path.split(srcPath)[1]
filenameWithoutExtension = os.path.splitext(filename)[0]
IJ.saveAs(currentImp, "TIFF", output.getAbsolutePath() + "/" + filenameWithoutExtension + "-" + str(i) + ".tif")
|
<commit_before><commit_msg>Add script to split multi-point CZIs<commit_after>
|
# @File(label="Input file") input
# @File(label="Output folder") output
# Splits multi-point CZI files into multiple TIFFs using Bio-Formats.
#
# Stefan Helfrich (University of Konstaz), 05/09/2016
from ij import IJ
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
import os
srcPath = input.getAbsolutePath()
# using LOCI BioFormats
settings = ImporterOptions()
settings.setId(srcPath)
settings.setOpenAllSeries(True)
settings.setVirtual(True)
settings.setWindowless(True)
imps = BF.openImagePlus(settings)
for i in range(0, len(imps)):
currentImp = imps[i]
filename = os.path.split(srcPath)[1]
filenameWithoutExtension = os.path.splitext(filename)[0]
IJ.saveAs(currentImp, "TIFF", output.getAbsolutePath() + "/" + filenameWithoutExtension + "-" + str(i) + ".tif")
|
Add script to split multi-point CZIs# @File(label="Input file") input
# @File(label="Output folder") output
# Splits multi-point CZI files into multiple TIFFs using Bio-Formats.
#
# Stefan Helfrich (University of Konstaz), 05/09/2016
from ij import IJ
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
import os
srcPath = input.getAbsolutePath()
# using LOCI BioFormats
settings = ImporterOptions()
settings.setId(srcPath)
settings.setOpenAllSeries(True)
settings.setVirtual(True)
settings.setWindowless(True)
imps = BF.openImagePlus(settings)
for i in range(0, len(imps)):
currentImp = imps[i]
filename = os.path.split(srcPath)[1]
filenameWithoutExtension = os.path.splitext(filename)[0]
IJ.saveAs(currentImp, "TIFF", output.getAbsolutePath() + "/" + filenameWithoutExtension + "-" + str(i) + ".tif")
|
<commit_before><commit_msg>Add script to split multi-point CZIs<commit_after># @File(label="Input file") input
# @File(label="Output folder") output
# Splits multi-point CZI files into multiple TIFFs using Bio-Formats.
#
# Stefan Helfrich (University of Konstaz), 05/09/2016
from ij import IJ
from loci.plugins import BF
from loci.plugins.in import ImporterOptions
import os
srcPath = input.getAbsolutePath()
# using LOCI BioFormats
settings = ImporterOptions()
settings.setId(srcPath)
settings.setOpenAllSeries(True)
settings.setVirtual(True)
settings.setWindowless(True)
imps = BF.openImagePlus(settings)
for i in range(0, len(imps)):
currentImp = imps[i]
filename = os.path.split(srcPath)[1]
filenameWithoutExtension = os.path.splitext(filename)[0]
IJ.saveAs(currentImp, "TIFF", output.getAbsolutePath() + "/" + filenameWithoutExtension + "-" + str(i) + ".tif")
|
|
d8168c182efb9ca6e05c9f8fdf253cb25ff6599d
|
Python/Soma2Numeros.py
|
Python/Soma2Numeros.py
|
# Problem
# Given an array/list A of n numbers and another number x, determines
# if exists two elements in A whose sum is exactly x
# Works only for different values
def solution1(values, expected):
dic = {}
for index, value in enumerate(values):
dic[value] = index
diff = expected - value
if diff not in dic:
continue
if dic[diff] != index:
return True
return False
# Works with repeated values
def solution2(values, expected):
dic = {}
for index, value in enumerate(values):
diff = expected - value
if diff not in dic:
dic[value] = index
continue
return True
return False
if __name__ == "__main__":
values = [42,5,9,9,16,16]
print("Solution 1")
print("Should be TRUE")
print(solution1(values, 14))
print(solution1(values, 25))
print(solution1(values, 47))
print(solution1(values, 58))
print("Should be FALSE")
print(solution1(values, 32))
print(solution1(values, 9))
print(solution1(values, 18))
print(solution1(values, 59))
print(solution2(values, 5))
print(solution2(values, 10))
print(solution2(values, 100))
print("Solution 2")
print("Should be TRUE")
print(solution2(values, 14))
print(solution2(values, 25))
print(solution2(values, 47))
print(solution2(values, 58))
print(solution2(values, 32))
print(solution2(values, 18))
print("Should be FALSE")
print(solution2(values, 10))
print(solution2(values, 9))
print(solution2(values, 59))
print(solution2(values, 5))
print(solution2(values, 100))
|
Add problema da soma de 2 numeros
|
Add problema da soma de 2 numeros
|
Python
|
mit
|
kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados,kelvins/Algoritmos-e-Estruturas-de-Dados
|
Add problema da soma de 2 numeros
|
# Problem
# Given an array/list A of n numbers and another number x, determines
# if exists two elements in A whose sum is exactly x
# Works only for different values
def solution1(values, expected):
dic = {}
for index, value in enumerate(values):
dic[value] = index
diff = expected - value
if diff not in dic:
continue
if dic[diff] != index:
return True
return False
# Works with repeated values
def solution2(values, expected):
dic = {}
for index, value in enumerate(values):
diff = expected - value
if diff not in dic:
dic[value] = index
continue
return True
return False
if __name__ == "__main__":
values = [42,5,9,9,16,16]
print("Solution 1")
print("Should be TRUE")
print(solution1(values, 14))
print(solution1(values, 25))
print(solution1(values, 47))
print(solution1(values, 58))
print("Should be FALSE")
print(solution1(values, 32))
print(solution1(values, 9))
print(solution1(values, 18))
print(solution1(values, 59))
print(solution2(values, 5))
print(solution2(values, 10))
print(solution2(values, 100))
print("Solution 2")
print("Should be TRUE")
print(solution2(values, 14))
print(solution2(values, 25))
print(solution2(values, 47))
print(solution2(values, 58))
print(solution2(values, 32))
print(solution2(values, 18))
print("Should be FALSE")
print(solution2(values, 10))
print(solution2(values, 9))
print(solution2(values, 59))
print(solution2(values, 5))
print(solution2(values, 100))
|
<commit_before><commit_msg>Add problema da soma de 2 numeros<commit_after>
|
# Problem
# Given an array/list A of n numbers and another number x, determines
# if exists two elements in A whose sum is exactly x
# Works only for different values
def solution1(values, expected):
dic = {}
for index, value in enumerate(values):
dic[value] = index
diff = expected - value
if diff not in dic:
continue
if dic[diff] != index:
return True
return False
# Works with repeated values
def solution2(values, expected):
dic = {}
for index, value in enumerate(values):
diff = expected - value
if diff not in dic:
dic[value] = index
continue
return True
return False
if __name__ == "__main__":
values = [42,5,9,9,16,16]
print("Solution 1")
print("Should be TRUE")
print(solution1(values, 14))
print(solution1(values, 25))
print(solution1(values, 47))
print(solution1(values, 58))
print("Should be FALSE")
print(solution1(values, 32))
print(solution1(values, 9))
print(solution1(values, 18))
print(solution1(values, 59))
print(solution2(values, 5))
print(solution2(values, 10))
print(solution2(values, 100))
print("Solution 2")
print("Should be TRUE")
print(solution2(values, 14))
print(solution2(values, 25))
print(solution2(values, 47))
print(solution2(values, 58))
print(solution2(values, 32))
print(solution2(values, 18))
print("Should be FALSE")
print(solution2(values, 10))
print(solution2(values, 9))
print(solution2(values, 59))
print(solution2(values, 5))
print(solution2(values, 100))
|
Add problema da soma de 2 numeros
# Problem
# Given an array/list A of n numbers and another number x, determines
# if exists two elements in A whose sum is exactly x
# Works only for different values
def solution1(values, expected):
dic = {}
for index, value in enumerate(values):
dic[value] = index
diff = expected - value
if diff not in dic:
continue
if dic[diff] != index:
return True
return False
# Works with repeated values
def solution2(values, expected):
dic = {}
for index, value in enumerate(values):
diff = expected - value
if diff not in dic:
dic[value] = index
continue
return True
return False
if __name__ == "__main__":
values = [42,5,9,9,16,16]
print("Solution 1")
print("Should be TRUE")
print(solution1(values, 14))
print(solution1(values, 25))
print(solution1(values, 47))
print(solution1(values, 58))
print("Should be FALSE")
print(solution1(values, 32))
print(solution1(values, 9))
print(solution1(values, 18))
print(solution1(values, 59))
print(solution2(values, 5))
print(solution2(values, 10))
print(solution2(values, 100))
print("Solution 2")
print("Should be TRUE")
print(solution2(values, 14))
print(solution2(values, 25))
print(solution2(values, 47))
print(solution2(values, 58))
print(solution2(values, 32))
print(solution2(values, 18))
print("Should be FALSE")
print(solution2(values, 10))
print(solution2(values, 9))
print(solution2(values, 59))
print(solution2(values, 5))
print(solution2(values, 100))
|
<commit_before><commit_msg>Add problema da soma de 2 numeros<commit_after>
# Problem
# Given an array/list A of n numbers and another number x, determines
# if exists two elements in A whose sum is exactly x
# Works only for different values
def solution1(values, expected):
dic = {}
for index, value in enumerate(values):
dic[value] = index
diff = expected - value
if diff not in dic:
continue
if dic[diff] != index:
return True
return False
# Works with repeated values
def solution2(values, expected):
dic = {}
for index, value in enumerate(values):
diff = expected - value
if diff not in dic:
dic[value] = index
continue
return True
return False
if __name__ == "__main__":
values = [42,5,9,9,16,16]
print("Solution 1")
print("Should be TRUE")
print(solution1(values, 14))
print(solution1(values, 25))
print(solution1(values, 47))
print(solution1(values, 58))
print("Should be FALSE")
print(solution1(values, 32))
print(solution1(values, 9))
print(solution1(values, 18))
print(solution1(values, 59))
print(solution2(values, 5))
print(solution2(values, 10))
print(solution2(values, 100))
print("Solution 2")
print("Should be TRUE")
print(solution2(values, 14))
print(solution2(values, 25))
print(solution2(values, 47))
print(solution2(values, 58))
print(solution2(values, 32))
print(solution2(values, 18))
print("Should be FALSE")
print(solution2(values, 10))
print(solution2(values, 9))
print(solution2(values, 59))
print(solution2(values, 5))
print(solution2(values, 100))
|
|
5f0788be20bad0cef4a31a88b7513da58822a157
|
buildbucket.py
|
buildbucket.py
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for interacting with Buildbucket.
Usage:
$ depot-tools-auth login https://cr-buildbucket.appspot.com
$ buildbucket.py \
put \
--bucket master.tryserver.chromium.linux \
--builder my-builder \
Puts a build into buildbucket for my-builder on tryserver.chromium.linux.
"""
import argparse
import json
import urlparse
import os
import sys
from third_party import httplib2
import auth
BUILDBUCKET_URL = 'https://cr-buildbucket.appspot.com'
PUT_BUILD_URL = urlparse.urljoin(
BUILDBUCKET_URL,
'_ah/api/buildbucket/v1/builds',
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
action='store_true',
)
subparsers = parser.add_subparsers(dest='command')
put_parser = subparsers.add_parser('put')
put_parser.add_argument(
'--bucket',
help=(
'The bucket to schedule the build on. Typically the master name, e.g.'
' master.tryserver.chromium.linux.'
),
required=True,
)
put_parser.add_argument(
'-n',
'--builder-name',
help='The builder to schedule the build on.',
required=True,
)
put_parser.add_argument(
'-p',
'--properties',
help='A file to load a JSON dict of properties from.',
)
args = parser.parse_args()
# TODO(smut): When more commands are implemented, refactor this.
assert args.command == 'put'
properties = {}
if args.properties:
try:
with open(args.properties) as fp:
properties.update(json.load(fp))
except (TypeError, ValueError):
sys.stderr.write('%s contained invalid JSON dict.\n' % args.properties)
raise
authenticator = auth.get_authenticator_for_host(
BUILDBUCKET_URL,
auth.make_auth_config(use_oauth2=True),
)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
response, content = http.request(
PUT_BUILD_URL,
'PUT',
body=json.dumps({
'bucket': args.bucket,
'parameters_json': json.dumps({
'builder_name': args.builder_name,
'properties': properties,
}),
}),
headers={'Content-Type': 'application/json'},
)
if args.verbose:
print content
return response.status != 200
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for triggering Buildbucket builds
|
Add script for triggering Buildbucket builds
BUG=493885
TESTED=See https://paste.googleplex.com/5622248052359168
Review URL: https://codereview.chromium.org/1164363003
git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@295569 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
duongbaoduy/gtools,mlufei/depot_tools,aleonliao/depot_tools,azunite/chrome_build,azunite/chrome_build,disigma/depot_tools,hsharsha/depot_tools,chinmaygarde/depot_tools,kaiix/depot_tools,fracting/depot_tools,disigma/depot_tools,fracting/depot_tools,mlufei/depot_tools,aleonliao/depot_tools,ajohnson23/depot_tools,mlufei/depot_tools,Midrya/chromium,CoherentLabs/depot_tools,kaiix/depot_tools,azureplus/chromium_depot_tools,aleonliao/depot_tools,duongbaoduy/gtools,CoherentLabs/depot_tools,duanwujie/depot_tools,gcodetogit/depot_tools,SuYiling/chrome_depot_tools,chinmaygarde/depot_tools,azureplus/chromium_depot_tools,disigma/depot_tools,primiano/depot_tools,chinmaygarde/depot_tools,gcodetogit/depot_tools,SuYiling/chrome_depot_tools,primiano/depot_tools,hsharsha/depot_tools,azunite/chrome_build,fracting/depot_tools,duanwujie/depot_tools,duanwujie/depot_tools,primiano/depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,Midrya/chromium,duongbaoduy/gtools,kaiix/depot_tools,hsharsha/depot_tools,SuYiling/chrome_depot_tools,Midrya/chromium,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,gcodetogit/depot_tools,ajohnson23/depot_tools,ajohnson23/depot_tools,azureplus/chromium_depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools
|
Add script for triggering Buildbucket builds
BUG=493885
TESTED=See https://paste.googleplex.com/5622248052359168
Review URL: https://codereview.chromium.org/1164363003
git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@295569 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for interacting with Buildbucket.
Usage:
$ depot-tools-auth login https://cr-buildbucket.appspot.com
$ buildbucket.py \
put \
--bucket master.tryserver.chromium.linux \
--builder my-builder \
Puts a build into buildbucket for my-builder on tryserver.chromium.linux.
"""
import argparse
import json
import urlparse
import os
import sys
from third_party import httplib2
import auth
BUILDBUCKET_URL = 'https://cr-buildbucket.appspot.com'
PUT_BUILD_URL = urlparse.urljoin(
BUILDBUCKET_URL,
'_ah/api/buildbucket/v1/builds',
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
action='store_true',
)
subparsers = parser.add_subparsers(dest='command')
put_parser = subparsers.add_parser('put')
put_parser.add_argument(
'--bucket',
help=(
'The bucket to schedule the build on. Typically the master name, e.g.'
' master.tryserver.chromium.linux.'
),
required=True,
)
put_parser.add_argument(
'-n',
'--builder-name',
help='The builder to schedule the build on.',
required=True,
)
put_parser.add_argument(
'-p',
'--properties',
help='A file to load a JSON dict of properties from.',
)
args = parser.parse_args()
# TODO(smut): When more commands are implemented, refactor this.
assert args.command == 'put'
properties = {}
if args.properties:
try:
with open(args.properties) as fp:
properties.update(json.load(fp))
except (TypeError, ValueError):
sys.stderr.write('%s contained invalid JSON dict.\n' % args.properties)
raise
authenticator = auth.get_authenticator_for_host(
BUILDBUCKET_URL,
auth.make_auth_config(use_oauth2=True),
)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
response, content = http.request(
PUT_BUILD_URL,
'PUT',
body=json.dumps({
'bucket': args.bucket,
'parameters_json': json.dumps({
'builder_name': args.builder_name,
'properties': properties,
}),
}),
headers={'Content-Type': 'application/json'},
)
if args.verbose:
print content
return response.status != 200
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for triggering Buildbucket builds
BUG=493885
TESTED=See https://paste.googleplex.com/5622248052359168
Review URL: https://codereview.chromium.org/1164363003
git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@295569 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for interacting with Buildbucket.
Usage:
$ depot-tools-auth login https://cr-buildbucket.appspot.com
$ buildbucket.py \
put \
--bucket master.tryserver.chromium.linux \
--builder my-builder \
Puts a build into buildbucket for my-builder on tryserver.chromium.linux.
"""
import argparse
import json
import urlparse
import os
import sys
from third_party import httplib2
import auth
BUILDBUCKET_URL = 'https://cr-buildbucket.appspot.com'
PUT_BUILD_URL = urlparse.urljoin(
BUILDBUCKET_URL,
'_ah/api/buildbucket/v1/builds',
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
action='store_true',
)
subparsers = parser.add_subparsers(dest='command')
put_parser = subparsers.add_parser('put')
put_parser.add_argument(
'--bucket',
help=(
'The bucket to schedule the build on. Typically the master name, e.g.'
' master.tryserver.chromium.linux.'
),
required=True,
)
put_parser.add_argument(
'-n',
'--builder-name',
help='The builder to schedule the build on.',
required=True,
)
put_parser.add_argument(
'-p',
'--properties',
help='A file to load a JSON dict of properties from.',
)
args = parser.parse_args()
# TODO(smut): When more commands are implemented, refactor this.
assert args.command == 'put'
properties = {}
if args.properties:
try:
with open(args.properties) as fp:
properties.update(json.load(fp))
except (TypeError, ValueError):
sys.stderr.write('%s contained invalid JSON dict.\n' % args.properties)
raise
authenticator = auth.get_authenticator_for_host(
BUILDBUCKET_URL,
auth.make_auth_config(use_oauth2=True),
)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
response, content = http.request(
PUT_BUILD_URL,
'PUT',
body=json.dumps({
'bucket': args.bucket,
'parameters_json': json.dumps({
'builder_name': args.builder_name,
'properties': properties,
}),
}),
headers={'Content-Type': 'application/json'},
)
if args.verbose:
print content
return response.status != 200
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for triggering Buildbucket builds
BUG=493885
TESTED=See https://paste.googleplex.com/5622248052359168
Review URL: https://codereview.chromium.org/1164363003
git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@295569 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for interacting with Buildbucket.
Usage:
$ depot-tools-auth login https://cr-buildbucket.appspot.com
$ buildbucket.py \
put \
--bucket master.tryserver.chromium.linux \
--builder my-builder \
Puts a build into buildbucket for my-builder on tryserver.chromium.linux.
"""
import argparse
import json
import urlparse
import os
import sys
from third_party import httplib2
import auth
BUILDBUCKET_URL = 'https://cr-buildbucket.appspot.com'
PUT_BUILD_URL = urlparse.urljoin(
BUILDBUCKET_URL,
'_ah/api/buildbucket/v1/builds',
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
action='store_true',
)
subparsers = parser.add_subparsers(dest='command')
put_parser = subparsers.add_parser('put')
put_parser.add_argument(
'--bucket',
help=(
'The bucket to schedule the build on. Typically the master name, e.g.'
' master.tryserver.chromium.linux.'
),
required=True,
)
put_parser.add_argument(
'-n',
'--builder-name',
help='The builder to schedule the build on.',
required=True,
)
put_parser.add_argument(
'-p',
'--properties',
help='A file to load a JSON dict of properties from.',
)
args = parser.parse_args()
# TODO(smut): When more commands are implemented, refactor this.
assert args.command == 'put'
properties = {}
if args.properties:
try:
with open(args.properties) as fp:
properties.update(json.load(fp))
except (TypeError, ValueError):
sys.stderr.write('%s contained invalid JSON dict.\n' % args.properties)
raise
authenticator = auth.get_authenticator_for_host(
BUILDBUCKET_URL,
auth.make_auth_config(use_oauth2=True),
)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
response, content = http.request(
PUT_BUILD_URL,
'PUT',
body=json.dumps({
'bucket': args.bucket,
'parameters_json': json.dumps({
'builder_name': args.builder_name,
'properties': properties,
}),
}),
headers={'Content-Type': 'application/json'},
)
if args.verbose:
print content
return response.status != 200
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for triggering Buildbucket builds
BUG=493885
TESTED=See https://paste.googleplex.com/5622248052359168
Review URL: https://codereview.chromium.org/1164363003
git-svn-id: fd409f4bdeea2bb50a5d34bb4d4bfc2046a5a3dd@295569 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for interacting with Buildbucket.
Usage:
$ depot-tools-auth login https://cr-buildbucket.appspot.com
$ buildbucket.py \
put \
--bucket master.tryserver.chromium.linux \
--builder my-builder \
Puts a build into buildbucket for my-builder on tryserver.chromium.linux.
"""
import argparse
import json
import urlparse
import os
import sys
from third_party import httplib2
import auth
BUILDBUCKET_URL = 'https://cr-buildbucket.appspot.com'
PUT_BUILD_URL = urlparse.urljoin(
BUILDBUCKET_URL,
'_ah/api/buildbucket/v1/builds',
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-v',
'--verbose',
action='store_true',
)
subparsers = parser.add_subparsers(dest='command')
put_parser = subparsers.add_parser('put')
put_parser.add_argument(
'--bucket',
help=(
'The bucket to schedule the build on. Typically the master name, e.g.'
' master.tryserver.chromium.linux.'
),
required=True,
)
put_parser.add_argument(
'-n',
'--builder-name',
help='The builder to schedule the build on.',
required=True,
)
put_parser.add_argument(
'-p',
'--properties',
help='A file to load a JSON dict of properties from.',
)
args = parser.parse_args()
# TODO(smut): When more commands are implemented, refactor this.
assert args.command == 'put'
properties = {}
if args.properties:
try:
with open(args.properties) as fp:
properties.update(json.load(fp))
except (TypeError, ValueError):
sys.stderr.write('%s contained invalid JSON dict.\n' % args.properties)
raise
authenticator = auth.get_authenticator_for_host(
BUILDBUCKET_URL,
auth.make_auth_config(use_oauth2=True),
)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
response, content = http.request(
PUT_BUILD_URL,
'PUT',
body=json.dumps({
'bucket': args.bucket,
'parameters_json': json.dumps({
'builder_name': args.builder_name,
'properties': properties,
}),
}),
headers={'Content-Type': 'application/json'},
)
if args.verbose:
print content
return response.status != 200
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
d49b23365a972931502329f47a3aa65b9170477e
|
openstack/common/middleware/catch_errors.py
|
openstack/common/middleware/catch_errors.py
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
Update oslo log messages with translation domains
|
Update oslo log messages with translation domains
Update the incubator code to use different domains for log
messages at different levels.
Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.
bp log-messages-translation-domain
Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19
|
Python
|
apache-2.0
|
varunarya10/oslo.middleware,openstack/oslo.middleware,chungg/oslo.middleware,JioCloud/oslo.middleware
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
Update oslo log messages with translation domains
Update the incubator code to use different domains for log
messages at different levels.
Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.
bp log-messages-translation-domain
Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
<commit_before># Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
<commit_msg>Update oslo log messages with translation domains
Update the incubator code to use different domains for log
messages at different levels.
Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.
bp log-messages-translation-domain
Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19<commit_after>
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
Update oslo log messages with translation domains
Update the incubator code to use different domains for log
messages at different levels.
Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.
bp log-messages-translation-domain
Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
<commit_before># Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _ # noqa
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
<commit_msg>Update oslo log messages with translation domains
Update the incubator code to use different domains for log
messages at different levels.
Update the import exceptions setting for hacking to allow
multiple functions to be imported from gettextutils on one
line.
bp log-messages-translation-domain
Change-Id: I6ce0f4a59438612ce74c46b3ee9398bef24c0c19<commit_after># Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware that provides high-level error handling.
It catches all exceptions from subsequent applications in WSGI pipeline
to hide internal errors from API response.
"""
import webob.dec
import webob.exc
from openstack.common.gettextutils import _LE
from openstack.common import log as logging
from openstack.common.middleware import base
LOG = logging.getLogger(__name__)
class CatchErrorsMiddleware(base.Middleware):
@webob.dec.wsgify
def __call__(self, req):
try:
response = req.get_response(self.application)
except Exception:
LOG.exception(_LE('An error occurred during '
'processing the request: %s'))
response = webob.exc.HTTPInternalServerError()
return response
|
0cbce0ddc139dde1367155398d0c6a186408fab3
|
hug/test.py
|
hug/test.py
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params)), response)
if result:
return json.loads(result[0].decode('utf8'))
else:
return response.status
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params), body=body),
response)
if result:
response.data = result[0].decode('utf8')
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
Update call method to return entire response object, and work with non json data
|
Update call method to return entire response object, and work with non json data
|
Python
|
mit
|
shaunstanislaus/hug,janusnic/hug,origingod/hug,shaunstanislaus/hug,giserh/hug,yasoob/hug,timothycrosley/hug,timothycrosley/hug,philiptzou/hug,alisaifee/hug,yasoob/hug,jean/hug,MuhammadAlkarouri/hug,gbn972/hug,giserh/hug,gbn972/hug,alisaifee/hug,MuhammadAlkarouri/hug,STANAPO/hug,janusnic/hug,jean/hug,origingod/hug,STANAPO/hug,MuhammadAlkarouri/hug,philiptzou/hug,timothycrosley/hug
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params)), response)
if result:
return json.loads(result[0].decode('utf8'))
else:
return response.status
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
Update call method to return entire response object, and work with non json data
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params), body=body),
response)
if result:
response.data = result[0].decode('utf8')
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
<commit_before>from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params)), response)
if result:
return json.loads(result[0].decode('utf8'))
else:
return response.status
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
<commit_msg>Update call method to return entire response object, and work with non json data<commit_after>
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params), body=body),
response)
if result:
response.data = result[0].decode('utf8')
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params)), response)
if result:
return json.loads(result[0].decode('utf8'))
else:
return response.status
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
Update call method to return entire response object, and work with non json datafrom falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params), body=body),
response)
if result:
response.data = result[0].decode('utf8')
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
<commit_before>from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params)), response)
if result:
return json.loads(result[0].decode('utf8'))
else:
return response.status
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
<commit_msg>Update call method to return entire response object, and work with non json data<commit_after>from falcon.testing import StartResponseMock, create_environ
from falcon import HTTP_METHODS
from urllib.parse import urlencode
import json
from hug.run import server
from functools import partial
def call(method, api_module, url, body='', headers=None, **params):
api = server(api_module)
response = StartResponseMock()
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params), body=body),
response)
if result:
response.data = result[0].decode('utf8')
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
globals()[method.lower()] = partial(call, method)
|
e388f4c982d629da12f59626440d0d88e940db8d
|
hug/this.py
|
hug/this.py
|
"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
|
Add initial Zen of Hug to satisfy HOPE-20
|
Add initial Zen of Hug to satisfy HOPE-20
|
Python
|
mit
|
timothycrosley/hug,timothycrosley/hug,timothycrosley/hug
|
Add initial Zen of Hug to satisfy HOPE-20
|
"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
|
<commit_before><commit_msg>Add initial Zen of Hug to satisfy HOPE-20<commit_after>
|
"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
|
Add initial Zen of Hug to satisfy HOPE-20"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
|
<commit_before><commit_msg>Add initial Zen of Hug to satisfy HOPE-20<commit_after>"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
|
|
11f16de1876e76b3c52494397d5613aa80530736
|
sidewinder/streaming/map_and_reduce.py
|
sidewinder/streaming/map_and_reduce.py
|
#!/usr/bin/env python2.7
# coding=utf-8
LST = [4, 2, 3, 2, 1, 1, 5]
def map_reduce(lst):
"""
Map values and reduce based on key.
:param lst: list of values.
:return: map of values.
"""
dct = {}
for num in lst:
if num in dct:
print '{} key exists. updating value'.format(num)
dct[num] += 1
else:
print 'new key {}'.format(num)
dct[num] = 1
return dct
print map_reduce(LST)
|
Add map and recude example.
|
Add map and recude example.
|
Python
|
mit
|
vsamov/sidewinder,vsamov/sidewinder,vsamov/sidewinder
|
Add map and recude example.
|
#!/usr/bin/env python2.7
# coding=utf-8
LST = [4, 2, 3, 2, 1, 1, 5]
def map_reduce(lst):
"""
Map values and reduce based on key.
:param lst: list of values.
:return: map of values.
"""
dct = {}
for num in lst:
if num in dct:
print '{} key exists. updating value'.format(num)
dct[num] += 1
else:
print 'new key {}'.format(num)
dct[num] = 1
return dct
print map_reduce(LST)
|
<commit_before><commit_msg>Add map and recude example.<commit_after>
|
#!/usr/bin/env python2.7
# coding=utf-8
LST = [4, 2, 3, 2, 1, 1, 5]
def map_reduce(lst):
"""
Map values and reduce based on key.
:param lst: list of values.
:return: map of values.
"""
dct = {}
for num in lst:
if num in dct:
print '{} key exists. updating value'.format(num)
dct[num] += 1
else:
print 'new key {}'.format(num)
dct[num] = 1
return dct
print map_reduce(LST)
|
Add map and recude example.#!/usr/bin/env python2.7
# coding=utf-8
LST = [4, 2, 3, 2, 1, 1, 5]
def map_reduce(lst):
"""
Map values and reduce based on key.
:param lst: list of values.
:return: map of values.
"""
dct = {}
for num in lst:
if num in dct:
print '{} key exists. updating value'.format(num)
dct[num] += 1
else:
print 'new key {}'.format(num)
dct[num] = 1
return dct
print map_reduce(LST)
|
<commit_before><commit_msg>Add map and recude example.<commit_after>#!/usr/bin/env python2.7
# coding=utf-8
LST = [4, 2, 3, 2, 1, 1, 5]
def map_reduce(lst):
"""
Map values and reduce based on key.
:param lst: list of values.
:return: map of values.
"""
dct = {}
for num in lst:
if num in dct:
print '{} key exists. updating value'.format(num)
dct[num] += 1
else:
print 'new key {}'.format(num)
dct[num] = 1
return dct
print map_reduce(LST)
|
|
f8d86e2339102232fe31aa4828f22a4338f803a0
|
src/poliastro/tests/test_frames.py
|
src/poliastro/tests/test_frames.py
|
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = frame(vector, obstime=epoch).transform_to(ICRS).represent_as(CartesianRepresentation)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = ICRS(vector).transform_to(frame(obstime=epoch)).represent_as(CartesianRepresentation)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
|
Add tests for ICRS frames
|
Add tests for ICRS frames
|
Python
|
mit
|
Juanlu001/poliastro,poliastro/poliastro,newlawrence/poliastro,Juanlu001/poliastro,Juanlu001/poliastro,newlawrence/poliastro,newlawrence/poliastro
|
Add tests for ICRS frames
|
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = frame(vector, obstime=epoch).transform_to(ICRS).represent_as(CartesianRepresentation)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = ICRS(vector).transform_to(frame(obstime=epoch)).represent_as(CartesianRepresentation)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
|
<commit_before><commit_msg>Add tests for ICRS frames<commit_after>
|
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = frame(vector, obstime=epoch).transform_to(ICRS).represent_as(CartesianRepresentation)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = ICRS(vector).transform_to(frame(obstime=epoch)).represent_as(CartesianRepresentation)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
|
Add tests for ICRS framesimport pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = frame(vector, obstime=epoch).transform_to(ICRS).represent_as(CartesianRepresentation)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = ICRS(vector).transform_to(frame(obstime=epoch)).represent_as(CartesianRepresentation)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
|
<commit_before><commit_msg>Add tests for ICRS frames<commit_after>import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric, solar_system_ephemeris
)
from astropy.tests.helper import assert_quantity_allclose
from poliastro.constants import J2000
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune, Pluto,
)
from poliastro.frames import (
ICRS,
HCRS, MercuryICRS, VenusICRS, GCRS, MarsICRS, JupiterICRS, SaturnICRS, UranusICRS, NeptuneICRS, PlutoICRS
)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = frame(vector, obstime=epoch).transform_to(ICRS).represent_as(CartesianRepresentation)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize("body, frame", [
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
(Pluto, PlutoICRS),
])
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("de432s"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = ICRS(vector).transform_to(frame(obstime=epoch)).represent_as(CartesianRepresentation)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
|
|
2648944859e4b727855eb31eb337f0d27258b472
|
iatidq/dqpublishercondition.py
|
iatidq/dqpublishercondition.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def configure_organisation_condition(pc):
with db.session.begin():
pc.description = request.form['description']
pc.organisation_id = int(request.form['organisation_id'])
pc.test_id = int(request.form['test_id'])
pc.operation = int(request.form['operation'])
pc.condition = request.form['condition']
pc.condition_value = request.form['condition_value']
pc.file = request.form['file']
pc.line = int(request.form['line'])
pc.active = bool(request.form['active'])
db.session.add(pc)
def get_publisher_condition(pc_id):
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.OrganisationCondition.operation,
models.OrganisationCondition.condition,
models.OrganisationCondition.condition_value,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).filter_by(id=pc_id
).join(models.Organisation, models.Test).first()
def get_publisher_conditions():
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).order_by(
models.OrganisationCondition.id
).join(models.Organisation, models.Test
).all()
def get_publisher_feedback():
return db.session.query(
models.OrganisationConditionFeedback,
models.Organisation
).join(models.Organisation
).all()
def delete_publisher_condition(id):
pc = db.session.query(
models.OrganisationCondition
).filter(
models.OrganisationCondition.id==id
).first()
with db.session.begin():
db.session.delete(pc)
def delete_publisher_feedback(feedback):
with db.session.begin():
for fb in feedback:
db.session.delete(fb.OrganisationConditionFeedback)
|
Move database functions out to iatidq
|
Move database functions out to iatidq
|
Python
|
agpl-3.0
|
pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality
|
Move database functions out to iatidq
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def configure_organisation_condition(pc):
with db.session.begin():
pc.description = request.form['description']
pc.organisation_id = int(request.form['organisation_id'])
pc.test_id = int(request.form['test_id'])
pc.operation = int(request.form['operation'])
pc.condition = request.form['condition']
pc.condition_value = request.form['condition_value']
pc.file = request.form['file']
pc.line = int(request.form['line'])
pc.active = bool(request.form['active'])
db.session.add(pc)
def get_publisher_condition(pc_id):
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.OrganisationCondition.operation,
models.OrganisationCondition.condition,
models.OrganisationCondition.condition_value,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).filter_by(id=pc_id
).join(models.Organisation, models.Test).first()
def get_publisher_conditions():
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).order_by(
models.OrganisationCondition.id
).join(models.Organisation, models.Test
).all()
def get_publisher_feedback():
return db.session.query(
models.OrganisationConditionFeedback,
models.Organisation
).join(models.Organisation
).all()
def delete_publisher_condition(id):
pc = db.session.query(
models.OrganisationCondition
).filter(
models.OrganisationCondition.id==id
).first()
with db.session.begin():
db.session.delete(pc)
def delete_publisher_feedback(feedback):
with db.session.begin():
for fb in feedback:
db.session.delete(fb.OrganisationConditionFeedback)
|
<commit_before><commit_msg>Move database functions out to iatidq<commit_after>
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def configure_organisation_condition(pc):
with db.session.begin():
pc.description = request.form['description']
pc.organisation_id = int(request.form['organisation_id'])
pc.test_id = int(request.form['test_id'])
pc.operation = int(request.form['operation'])
pc.condition = request.form['condition']
pc.condition_value = request.form['condition_value']
pc.file = request.form['file']
pc.line = int(request.form['line'])
pc.active = bool(request.form['active'])
db.session.add(pc)
def get_publisher_condition(pc_id):
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.OrganisationCondition.operation,
models.OrganisationCondition.condition,
models.OrganisationCondition.condition_value,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).filter_by(id=pc_id
).join(models.Organisation, models.Test).first()
def get_publisher_conditions():
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).order_by(
models.OrganisationCondition.id
).join(models.Organisation, models.Test
).all()
def get_publisher_feedback():
return db.session.query(
models.OrganisationConditionFeedback,
models.Organisation
).join(models.Organisation
).all()
def delete_publisher_condition(id):
pc = db.session.query(
models.OrganisationCondition
).filter(
models.OrganisationCondition.id==id
).first()
with db.session.begin():
db.session.delete(pc)
def delete_publisher_feedback(feedback):
with db.session.begin():
for fb in feedback:
db.session.delete(fb.OrganisationConditionFeedback)
|
Move database functions out to iatidq
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def configure_organisation_condition(pc):
with db.session.begin():
pc.description = request.form['description']
pc.organisation_id = int(request.form['organisation_id'])
pc.test_id = int(request.form['test_id'])
pc.operation = int(request.form['operation'])
pc.condition = request.form['condition']
pc.condition_value = request.form['condition_value']
pc.file = request.form['file']
pc.line = int(request.form['line'])
pc.active = bool(request.form['active'])
db.session.add(pc)
def get_publisher_condition(pc_id):
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.OrganisationCondition.operation,
models.OrganisationCondition.condition,
models.OrganisationCondition.condition_value,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).filter_by(id=pc_id
).join(models.Organisation, models.Test).first()
def get_publisher_conditions():
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).order_by(
models.OrganisationCondition.id
).join(models.Organisation, models.Test
).all()
def get_publisher_feedback():
return db.session.query(
models.OrganisationConditionFeedback,
models.Organisation
).join(models.Organisation
).all()
def delete_publisher_condition(id):
pc = db.session.query(
models.OrganisationCondition
).filter(
models.OrganisationCondition.id==id
).first()
with db.session.begin():
db.session.delete(pc)
def delete_publisher_feedback(feedback):
with db.session.begin():
for fb in feedback:
db.session.delete(fb.OrganisationConditionFeedback)
|
<commit_before><commit_msg>Move database functions out to iatidq<commit_after>
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def configure_organisation_condition(pc):
with db.session.begin():
pc.description = request.form['description']
pc.organisation_id = int(request.form['organisation_id'])
pc.test_id = int(request.form['test_id'])
pc.operation = int(request.form['operation'])
pc.condition = request.form['condition']
pc.condition_value = request.form['condition_value']
pc.file = request.form['file']
pc.line = int(request.form['line'])
pc.active = bool(request.form['active'])
db.session.add(pc)
def get_publisher_condition(pc_id):
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.OrganisationCondition.operation,
models.OrganisationCondition.condition,
models.OrganisationCondition.condition_value,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).filter_by(id=pc_id
).join(models.Organisation, models.Test).first()
def get_publisher_conditions():
return db.session.query(
models.OrganisationCondition.id,
models.OrganisationCondition.description,
models.Organisation.organisation_name.label("organisation_name"),
models.Organisation.organisation_code.label("organisation_code"),
models.Organisation.id.label("organisation_id"),
models.Test.name.label("test_name"),
models.Test.description.label("test_description"),
models.Test.id.label("test_id")
).order_by(
models.OrganisationCondition.id
).join(models.Organisation, models.Test
).all()
def get_publisher_feedback():
return db.session.query(
models.OrganisationConditionFeedback,
models.Organisation
).join(models.Organisation
).all()
def delete_publisher_condition(id):
pc = db.session.query(
models.OrganisationCondition
).filter(
models.OrganisationCondition.id==id
).first()
with db.session.begin():
db.session.delete(pc)
def delete_publisher_feedback(feedback):
with db.session.begin():
for fb in feedback:
db.session.delete(fb.OrganisationConditionFeedback)
|
|
18dfdce4307e0f678f9a46fa4bccb51e31c4ff69
|
red.py
|
red.py
|
from logipy import logi_led
import time
import ctypes
logi_led.logi_led_init()
time.sleep(1) # Give the SDK a second to initialize
logi_led.logi_led_set_lighting(100, 0, 0)
logi_led.logi_led_shutdown()
|
Add simple script from the logipy page
|
Add simple script from the logipy page
|
Python
|
mit
|
louismerlin/keyboard-games,louismerlin/keyboard-games
|
Add simple script from the logipy page
|
from logipy import logi_led
import time
import ctypes
logi_led.logi_led_init()
time.sleep(1) # Give the SDK a second to initialize
logi_led.logi_led_set_lighting(100, 0, 0)
logi_led.logi_led_shutdown()
|
<commit_before><commit_msg>Add simple script from the logipy page<commit_after>
|
from logipy import logi_led
import time
import ctypes
logi_led.logi_led_init()
time.sleep(1) # Give the SDK a second to initialize
logi_led.logi_led_set_lighting(100, 0, 0)
logi_led.logi_led_shutdown()
|
Add simple script from the logipy pagefrom logipy import logi_led
import time
import ctypes
logi_led.logi_led_init()
time.sleep(1) # Give the SDK a second to initialize
logi_led.logi_led_set_lighting(100, 0, 0)
logi_led.logi_led_shutdown()
|
<commit_before><commit_msg>Add simple script from the logipy page<commit_after>from logipy import logi_led
import time
import ctypes
logi_led.logi_led_init()
time.sleep(1) # Give the SDK a second to initialize
logi_led.logi_led_set_lighting(100, 0, 0)
logi_led.logi_led_shutdown()
|
|
190dae03cc027e397c387294075e1a38ad8647da
|
migrations/versions/2017-07-14_11:37:46__df33f3613823.py
|
migrations/versions/2017-07-14_11:37:46__df33f3613823.py
|
"""empty message
Revision ID: df33f3613823
Revises: 15741cc426db
Create Date: 2017-07-14 11:37:46.347709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df33f3613823'
down_revision = '15741cc426db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('age', sa.Integer(), nullable=True))
op.add_column('patients', sa.Column('age_unit', sa.String(length=1), nullable=True))
op.add_column('patients', sa.Column('birth_date', sa.Date(), nullable=True))
op.add_column('patients', sa.Column('gender', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
op.drop_column('patients', 'gender')
op.drop_column('patients', 'birth_date')
op.drop_column('patients', 'age_unit')
op.drop_column('patients', 'age')
# ### end Alembic commands ###
|
Add migration for patient model
|
:rocket: Add migration for patient model
|
Python
|
mit
|
gems-uff/labsys,gems-uff/labsys,gems-uff/labsys
|
:rocket: Add migration for patient model
|
"""empty message
Revision ID: df33f3613823
Revises: 15741cc426db
Create Date: 2017-07-14 11:37:46.347709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df33f3613823'
down_revision = '15741cc426db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('age', sa.Integer(), nullable=True))
op.add_column('patients', sa.Column('age_unit', sa.String(length=1), nullable=True))
op.add_column('patients', sa.Column('birth_date', sa.Date(), nullable=True))
op.add_column('patients', sa.Column('gender', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
op.drop_column('patients', 'gender')
op.drop_column('patients', 'birth_date')
op.drop_column('patients', 'age_unit')
op.drop_column('patients', 'age')
# ### end Alembic commands ###
|
<commit_before><commit_msg>:rocket: Add migration for patient model<commit_after>
|
"""empty message
Revision ID: df33f3613823
Revises: 15741cc426db
Create Date: 2017-07-14 11:37:46.347709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df33f3613823'
down_revision = '15741cc426db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('age', sa.Integer(), nullable=True))
op.add_column('patients', sa.Column('age_unit', sa.String(length=1), nullable=True))
op.add_column('patients', sa.Column('birth_date', sa.Date(), nullable=True))
op.add_column('patients', sa.Column('gender', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
op.drop_column('patients', 'gender')
op.drop_column('patients', 'birth_date')
op.drop_column('patients', 'age_unit')
op.drop_column('patients', 'age')
# ### end Alembic commands ###
|
:rocket: Add migration for patient model"""empty message
Revision ID: df33f3613823
Revises: 15741cc426db
Create Date: 2017-07-14 11:37:46.347709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df33f3613823'
down_revision = '15741cc426db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('age', sa.Integer(), nullable=True))
op.add_column('patients', sa.Column('age_unit', sa.String(length=1), nullable=True))
op.add_column('patients', sa.Column('birth_date', sa.Date(), nullable=True))
op.add_column('patients', sa.Column('gender', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
op.drop_column('patients', 'gender')
op.drop_column('patients', 'birth_date')
op.drop_column('patients', 'age_unit')
op.drop_column('patients', 'age')
# ### end Alembic commands ###
|
<commit_before><commit_msg>:rocket: Add migration for patient model<commit_after>"""empty message
Revision ID: df33f3613823
Revises: 15741cc426db
Create Date: 2017-07-14 11:37:46.347709
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df33f3613823'
down_revision = '15741cc426db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('age', sa.Integer(), nullable=True))
op.add_column('patients', sa.Column('age_unit', sa.String(length=1), nullable=True))
op.add_column('patients', sa.Column('birth_date', sa.Date(), nullable=True))
op.add_column('patients', sa.Column('gender', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
op.drop_column('patients', 'gender')
op.drop_column('patients', 'birth_date')
op.drop_column('patients', 'age_unit')
op.drop_column('patients', 'age')
# ### end Alembic commands ###
|
|
3aa10577f292a2962f66926fb021ed4a1b6fc493
|
lib/header/colors.py
|
lib/header/colors.py
|
"""ANSI escape sequence objects.
This is designed to place ANSI escape sequences in format strings.
Colors and attributes are selected by using object attributes in
format strings.
For example,
'{0.red}Hello, {0.green}World!{0.reset}'.format(colors())
This prints "Hello, " in red, and "World" in green, and resets the
color so it doesn't spill onto the next line. You can chain
attributes:
'{0.red.underline}Red, underline{0.reset}'.format(colors())
Colors will also be disabled if stdout is not a tty. If you want to
print to a different file descriptor, you can specify that file
instead, as an argument:
fp.write('{0.red}Hello{0.reset}\n'.format(colors(fp)))
"""
import sys
class ANSIColor(object):
reset = 0
bold = 1
italics = 3
underline = 4
inverse = 7
strikethrough = 9
nobold = 22
noitalics = 23
nounderline = 24
noinverse = 27
nostrikethrough = 29
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
default = 39
bg_black = 40
bg_red = 41
bg_green = 42
bg_yellow = 43
bg_blue = 44
bg_magenta = 45
bg_cyan = 46
bg_white = 47
bg_default = 49
class _ANSIColors(object):
__slots__ = ['_colors']
def __init__(self, colors):
self._colors = tuple(colors)
def __getattr__(self, attr):
value = getattr(ANSIColor, attr)
return _ANSIColors(self._colors + (value,))
def __str__(self):
return '\x1b[{}m'.format(';'.join([str(c) for c in self._colors]))
def __repr__(self):
return '_Colors({!r})'.format(self._colors)
class _NoColors(object):
__slots__ = []
def __getattr__(self, attr):
getattr(ANSIColor, attr)
return self
def __str__(self):
return ''
def __repr__(self):
return '_NoColors()'
def colors(fp=None):
if fp is None:
fp = sys.stdout
if fp.isatty():
return _ANSIColors(())
return _NoColors()
|
Add ANSI escape sequence library
|
Add ANSI escape sequence library
|
Python
|
bsd-2-clause
|
depp/headerfix,depp/headerfix,depp/headerfix,depp/headerfix
|
Add ANSI escape sequence library
|
"""ANSI escape sequence objects.
This is designed to place ANSI escape sequences in format strings.
Colors and attributes are selected by using object attributes in
format strings.
For example,
'{0.red}Hello, {0.green}World!{0.reset}'.format(colors())
This prints "Hello, " in red, and "World" in green, and resets the
color so it doesn't spill onto the next line. You can chain
attributes:
'{0.red.underline}Red, underline{0.reset}'.format(colors())
Colors will also be disabled if stdout is not a tty. If you want to
print to a different file descriptor, you can specify that file
instead, as an argument:
fp.write('{0.red}Hello{0.reset}\n'.format(colors(fp)))
"""
import sys
class ANSIColor(object):
reset = 0
bold = 1
italics = 3
underline = 4
inverse = 7
strikethrough = 9
nobold = 22
noitalics = 23
nounderline = 24
noinverse = 27
nostrikethrough = 29
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
default = 39
bg_black = 40
bg_red = 41
bg_green = 42
bg_yellow = 43
bg_blue = 44
bg_magenta = 45
bg_cyan = 46
bg_white = 47
bg_default = 49
class _ANSIColors(object):
__slots__ = ['_colors']
def __init__(self, colors):
self._colors = tuple(colors)
def __getattr__(self, attr):
value = getattr(ANSIColor, attr)
return _ANSIColors(self._colors + (value,))
def __str__(self):
return '\x1b[{}m'.format(';'.join([str(c) for c in self._colors]))
def __repr__(self):
return '_Colors({!r})'.format(self._colors)
class _NoColors(object):
__slots__ = []
def __getattr__(self, attr):
getattr(ANSIColor, attr)
return self
def __str__(self):
return ''
def __repr__(self):
return '_NoColors()'
def colors(fp=None):
if fp is None:
fp = sys.stdout
if fp.isatty():
return _ANSIColors(())
return _NoColors()
|
<commit_before><commit_msg>Add ANSI escape sequence library<commit_after>
|
"""ANSI escape sequence objects.
This is designed to place ANSI escape sequences in format strings.
Colors and attributes are selected by using object attributes in
format strings.
For example,
'{0.red}Hello, {0.green}World!{0.reset}'.format(colors())
This prints "Hello, " in red, and "World" in green, and resets the
color so it doesn't spill onto the next line. You can chain
attributes:
'{0.red.underline}Red, underline{0.reset}'.format(colors())
Colors will also be disabled if stdout is not a tty. If you want to
print to a different file descriptor, you can specify that file
instead, as an argument:
fp.write('{0.red}Hello{0.reset}\n'.format(colors(fp)))
"""
import sys
class ANSIColor(object):
reset = 0
bold = 1
italics = 3
underline = 4
inverse = 7
strikethrough = 9
nobold = 22
noitalics = 23
nounderline = 24
noinverse = 27
nostrikethrough = 29
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
default = 39
bg_black = 40
bg_red = 41
bg_green = 42
bg_yellow = 43
bg_blue = 44
bg_magenta = 45
bg_cyan = 46
bg_white = 47
bg_default = 49
class _ANSIColors(object):
__slots__ = ['_colors']
def __init__(self, colors):
self._colors = tuple(colors)
def __getattr__(self, attr):
value = getattr(ANSIColor, attr)
return _ANSIColors(self._colors + (value,))
def __str__(self):
return '\x1b[{}m'.format(';'.join([str(c) for c in self._colors]))
def __repr__(self):
return '_Colors({!r})'.format(self._colors)
class _NoColors(object):
__slots__ = []
def __getattr__(self, attr):
getattr(ANSIColor, attr)
return self
def __str__(self):
return ''
def __repr__(self):
return '_NoColors()'
def colors(fp=None):
if fp is None:
fp = sys.stdout
if fp.isatty():
return _ANSIColors(())
return _NoColors()
|
Add ANSI escape sequence library"""ANSI escape sequence objects.
This is designed to place ANSI escape sequences in format strings.
Colors and attributes are selected by using object attributes in
format strings.
For example,
'{0.red}Hello, {0.green}World!{0.reset}'.format(colors())
This prints "Hello, " in red, and "World" in green, and resets the
color so it doesn't spill onto the next line. You can chain
attributes:
'{0.red.underline}Red, underline{0.reset}'.format(colors())
Colors will also be disabled if stdout is not a tty. If you want to
print to a different file descriptor, you can specify that file
instead, as an argument:
fp.write('{0.red}Hello{0.reset}\n'.format(colors(fp)))
"""
import sys
class ANSIColor(object):
reset = 0
bold = 1
italics = 3
underline = 4
inverse = 7
strikethrough = 9
nobold = 22
noitalics = 23
nounderline = 24
noinverse = 27
nostrikethrough = 29
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
default = 39
bg_black = 40
bg_red = 41
bg_green = 42
bg_yellow = 43
bg_blue = 44
bg_magenta = 45
bg_cyan = 46
bg_white = 47
bg_default = 49
class _ANSIColors(object):
__slots__ = ['_colors']
def __init__(self, colors):
self._colors = tuple(colors)
def __getattr__(self, attr):
value = getattr(ANSIColor, attr)
return _ANSIColors(self._colors + (value,))
def __str__(self):
return '\x1b[{}m'.format(';'.join([str(c) for c in self._colors]))
def __repr__(self):
return '_Colors({!r})'.format(self._colors)
class _NoColors(object):
__slots__ = []
def __getattr__(self, attr):
getattr(ANSIColor, attr)
return self
def __str__(self):
return ''
def __repr__(self):
return '_NoColors()'
def colors(fp=None):
if fp is None:
fp = sys.stdout
if fp.isatty():
return _ANSIColors(())
return _NoColors()
|
<commit_before><commit_msg>Add ANSI escape sequence library<commit_after>"""ANSI escape sequence objects.
This is designed to place ANSI escape sequences in format strings.
Colors and attributes are selected by using object attributes in
format strings.
For example,
'{0.red}Hello, {0.green}World!{0.reset}'.format(colors())
This prints "Hello, " in red, and "World" in green, and resets the
color so it doesn't spill onto the next line. You can chain
attributes:
'{0.red.underline}Red, underline{0.reset}'.format(colors())
Colors will also be disabled if stdout is not a tty. If you want to
print to a different file descriptor, you can specify that file
instead, as an argument:
fp.write('{0.red}Hello{0.reset}\n'.format(colors(fp)))
"""
import sys
class ANSIColor(object):
reset = 0
bold = 1
italics = 3
underline = 4
inverse = 7
strikethrough = 9
nobold = 22
noitalics = 23
nounderline = 24
noinverse = 27
nostrikethrough = 29
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
default = 39
bg_black = 40
bg_red = 41
bg_green = 42
bg_yellow = 43
bg_blue = 44
bg_magenta = 45
bg_cyan = 46
bg_white = 47
bg_default = 49
class _ANSIColors(object):
__slots__ = ['_colors']
def __init__(self, colors):
self._colors = tuple(colors)
def __getattr__(self, attr):
value = getattr(ANSIColor, attr)
return _ANSIColors(self._colors + (value,))
def __str__(self):
return '\x1b[{}m'.format(';'.join([str(c) for c in self._colors]))
def __repr__(self):
return '_Colors({!r})'.format(self._colors)
class _NoColors(object):
__slots__ = []
def __getattr__(self, attr):
getattr(ANSIColor, attr)
return self
def __str__(self):
return ''
def __repr__(self):
return '_NoColors()'
def colors(fp=None):
if fp is None:
fp = sys.stdout
if fp.isatty():
return _ANSIColors(())
return _NoColors()
|
|
6ac16fc33a6f887535a143cc7155c7ff910ca835
|
control/utils.py
|
control/utils.py
|
import csv
from datetime import datetime
import itertools
from django.http import StreamingHttpResponse
from django_object_actions import DjangoObjectActions
class Echo(object):
'''An object that implements just the write method of the file-like
interface.'''
def write(self, value):
'''Write the value by returning it, instead of storing it in a
buffer.'''
return value
class CsvExportAdminMixin(DjangoObjectActions):
'''A mix-in class for adding a CSV export button to a Django Admin page.'''
csv_header = None
def clean_csv_line(self, obj):
'''Subclass to override. Gets a model object, and returns a list
representing a line in the CSV file.'''
def get_csv_header(self):
'''Subclass can override. Returns a list representing the header of the
CSV. Can also set the `csv_header` class variable.'''
return self.csv_header
def export_csv(self, request, queryset):
rows = itertools.chain(
(self.get_csv_header(), ),
(self.clean_csv_line(obj) for obj in queryset)
)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse(
(writer.writerow(row) for row in rows),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
self.model.__name__, datetime.now().strftime('%Y-%m-%d'))
return response
export_csv.label = "Download"
export_csv.short_description = "Download an export of the data as CSV"
changelist_actions = ('export_csv', )
|
Add django admin mixin for exporting csvs
|
Add django admin mixin for exporting csvs
|
Python
|
bsd-3-clause
|
praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control
|
Add django admin mixin for exporting csvs
|
import csv
from datetime import datetime
import itertools
from django.http import StreamingHttpResponse
from django_object_actions import DjangoObjectActions
class Echo(object):
'''An object that implements just the write method of the file-like
interface.'''
def write(self, value):
'''Write the value by returning it, instead of storing it in a
buffer.'''
return value
class CsvExportAdminMixin(DjangoObjectActions):
'''A mix-in class for adding a CSV export button to a Django Admin page.'''
csv_header = None
def clean_csv_line(self, obj):
'''Subclass to override. Gets a model object, and returns a list
representing a line in the CSV file.'''
def get_csv_header(self):
'''Subclass can override. Returns a list representing the header of the
CSV. Can also set the `csv_header` class variable.'''
return self.csv_header
def export_csv(self, request, queryset):
rows = itertools.chain(
(self.get_csv_header(), ),
(self.clean_csv_line(obj) for obj in queryset)
)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse(
(writer.writerow(row) for row in rows),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
self.model.__name__, datetime.now().strftime('%Y-%m-%d'))
return response
export_csv.label = "Download"
export_csv.short_description = "Download an export of the data as CSV"
changelist_actions = ('export_csv', )
|
<commit_before><commit_msg>Add django admin mixin for exporting csvs<commit_after>
|
import csv
from datetime import datetime
import itertools
from django.http import StreamingHttpResponse
from django_object_actions import DjangoObjectActions
class Echo(object):
'''An object that implements just the write method of the file-like
interface.'''
def write(self, value):
'''Write the value by returning it, instead of storing it in a
buffer.'''
return value
class CsvExportAdminMixin(DjangoObjectActions):
'''A mix-in class for adding a CSV export button to a Django Admin page.'''
csv_header = None
def clean_csv_line(self, obj):
'''Subclass to override. Gets a model object, and returns a list
representing a line in the CSV file.'''
def get_csv_header(self):
'''Subclass can override. Returns a list representing the header of the
CSV. Can also set the `csv_header` class variable.'''
return self.csv_header
def export_csv(self, request, queryset):
rows = itertools.chain(
(self.get_csv_header(), ),
(self.clean_csv_line(obj) for obj in queryset)
)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse(
(writer.writerow(row) for row in rows),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
self.model.__name__, datetime.now().strftime('%Y-%m-%d'))
return response
export_csv.label = "Download"
export_csv.short_description = "Download an export of the data as CSV"
changelist_actions = ('export_csv', )
|
Add django admin mixin for exporting csvsimport csv
from datetime import datetime
import itertools
from django.http import StreamingHttpResponse
from django_object_actions import DjangoObjectActions
class Echo(object):
'''An object that implements just the write method of the file-like
interface.'''
def write(self, value):
'''Write the value by returning it, instead of storing it in a
buffer.'''
return value
class CsvExportAdminMixin(DjangoObjectActions):
'''A mix-in class for adding a CSV export button to a Django Admin page.'''
csv_header = None
def clean_csv_line(self, obj):
'''Subclass to override. Gets a model object, and returns a list
representing a line in the CSV file.'''
def get_csv_header(self):
'''Subclass can override. Returns a list representing the header of the
CSV. Can also set the `csv_header` class variable.'''
return self.csv_header
def export_csv(self, request, queryset):
rows = itertools.chain(
(self.get_csv_header(), ),
(self.clean_csv_line(obj) for obj in queryset)
)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse(
(writer.writerow(row) for row in rows),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
self.model.__name__, datetime.now().strftime('%Y-%m-%d'))
return response
export_csv.label = "Download"
export_csv.short_description = "Download an export of the data as CSV"
changelist_actions = ('export_csv', )
|
<commit_before><commit_msg>Add django admin mixin for exporting csvs<commit_after>import csv
from datetime import datetime
import itertools
from django.http import StreamingHttpResponse
from django_object_actions import DjangoObjectActions
class Echo(object):
'''An object that implements just the write method of the file-like
interface.'''
def write(self, value):
'''Write the value by returning it, instead of storing it in a
buffer.'''
return value
class CsvExportAdminMixin(DjangoObjectActions):
'''A mix-in class for adding a CSV export button to a Django Admin page.'''
csv_header = None
def clean_csv_line(self, obj):
'''Subclass to override. Gets a model object, and returns a list
representing a line in the CSV file.'''
def get_csv_header(self):
'''Subclass can override. Returns a list representing the header of the
CSV. Can also set the `csv_header` class variable.'''
return self.csv_header
def export_csv(self, request, queryset):
rows = itertools.chain(
(self.get_csv_header(), ),
(self.clean_csv_line(obj) for obj in queryset)
)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse(
(writer.writerow(row) for row in rows),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s-%s.csv' % (
self.model.__name__, datetime.now().strftime('%Y-%m-%d'))
return response
export_csv.label = "Download"
export_csv.short_description = "Download an export of the data as CSV"
changelist_actions = ('export_csv', )
|
|
e107e3b47f015523861bafaab5cacce7deff6366
|
Lib/test/crashers/multithreaded_close.py
|
Lib/test/crashers/multithreaded_close.py
|
# f.close() is not thread-safe: calling it at the same time as another
# operation (or another close) on the same file, but done from another
# thread, causes crashes. The issue is more complicated than it seems,
# witness the discussions in:
#
# http://bugs.python.org/issue595601
# http://bugs.python.org/issue815646
import thread
while 1:
f = open("multithreaded_close.tmp", "w")
thread.start_new_thread(f.close, ())
f.close()
|
Add a crasher for the long-standing issue with closing a file while another thread uses it.
|
Add a crasher for the long-standing issue with closing a file
while another thread uses it.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add a crasher for the long-standing issue with closing a file
while another thread uses it.
|
# f.close() is not thread-safe: calling it at the same time as another
# operation (or another close) on the same file, but done from another
# thread, causes crashes. The issue is more complicated than it seems,
# witness the discussions in:
#
# http://bugs.python.org/issue595601
# http://bugs.python.org/issue815646
import thread
while 1:
f = open("multithreaded_close.tmp", "w")
thread.start_new_thread(f.close, ())
f.close()
|
<commit_before><commit_msg>Add a crasher for the long-standing issue with closing a file
while another thread uses it.<commit_after>
|
# f.close() is not thread-safe: calling it at the same time as another
# operation (or another close) on the same file, but done from another
# thread, causes crashes. The issue is more complicated than it seems,
# witness the discussions in:
#
# http://bugs.python.org/issue595601
# http://bugs.python.org/issue815646
import thread
while 1:
f = open("multithreaded_close.tmp", "w")
thread.start_new_thread(f.close, ())
f.close()
|
Add a crasher for the long-standing issue with closing a file
while another thread uses it.# f.close() is not thread-safe: calling it at the same time as another
# operation (or another close) on the same file, but done from another
# thread, causes crashes. The issue is more complicated than it seems,
# witness the discussions in:
#
# http://bugs.python.org/issue595601
# http://bugs.python.org/issue815646
import thread
while 1:
f = open("multithreaded_close.tmp", "w")
thread.start_new_thread(f.close, ())
f.close()
|
<commit_before><commit_msg>Add a crasher for the long-standing issue with closing a file
while another thread uses it.<commit_after># f.close() is not thread-safe: calling it at the same time as another
# operation (or another close) on the same file, but done from another
# thread, causes crashes. The issue is more complicated than it seems,
# witness the discussions in:
#
# http://bugs.python.org/issue595601
# http://bugs.python.org/issue815646
import thread
while 1:
f = open("multithreaded_close.tmp", "w")
thread.start_new_thread(f.close, ())
f.close()
|
|
c7a4ffbbb2023ebfb470ad79dd8b0d8217af0ff7
|
pyecore/utils.py
|
pyecore/utils.py
|
"""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, BadValueError
from .notification import EObserver, Kind
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=epackage, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
try:
delattr(self, notification.old.name)
except AttributeError:
pass
# REMOVE_MANY is not yet well supported
|
Add new class to ease dynamic metamodel handling
|
Add new class to ease dynamic metamodel handling
The dynamic metamodel manipulation is a little bit cumbersome when it
comes to extract all the existing EClass from a loaded
EPackage. Currently, a solution is to gather all the elements by hand
in dedicated variables:
>>> A = dynamic_epackage.getEClassifier('A')
>>> B = dynamic_epackage.getEClassifier('B')
>>> a1 = A()
>>> b1 = B()
This commit introduces a new class: 'DynamicEPackage' which
constructs, using reflection, an object that has direct references to
each EClass/sub-EPackage by name:
>>> from pyecore.utils import DynamicEPackage
>>> mylib = DynamicEPackage(dynamic_epackage)
>>> a1 = mylib.A()
>>> b1 = mylib.B()
This greatly helps the user to easily call and get EClass from a
freshly loaded dynamic EPackage.
|
Python
|
bsd-3-clause
|
aranega/pyecore,pyecore/pyecore
|
Add new class to ease dynamic metamodel handling
The dynamic metamodel manipulation is a little bit cumbersome when it
comes to extract all the existing EClass from a loaded
EPackage. Currently, a solution is to gather all the elements by hand
in dedicated variables:
>>> A = dynamic_epackage.getEClassifier('A')
>>> B = dynamic_epackage.getEClassifier('B')
>>> a1 = A()
>>> b1 = B()
This commit introduces a new class: 'DynamicEPackage' which
constructs, using reflection, an object that has direct references to
each EClass/sub-EPackage by name:
>>> from pyecore.utils import DynamicEPackage
>>> mylib = DynamicEPackage(dynamic_epackage)
>>> a1 = mylib.A()
>>> b1 = mylib.B()
This greatly helps the user to easily call and get EClass from a
freshly loaded dynamic EPackage.
|
"""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, BadValueError
from .notification import EObserver, Kind
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=epackage, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
try:
delattr(self, notification.old.name)
except AttributeError:
pass
# REMOVE_MANY is not yet well supported
|
<commit_before><commit_msg>Add new class to ease dynamic metamodel handling
The dynamic metamodel manipulation is a little bit cumbersome when it
comes to extract all the existing EClass from a loaded
EPackage. Currently, a solution is to gather all the elements by hand
in dedicated variables:
>>> A = dynamic_epackage.getEClassifier('A')
>>> B = dynamic_epackage.getEClassifier('B')
>>> a1 = A()
>>> b1 = B()
This commit introduces a new class: 'DynamicEPackage' which
constructs, using reflection, an object that has direct references to
each EClass/sub-EPackage by name:
>>> from pyecore.utils import DynamicEPackage
>>> mylib = DynamicEPackage(dynamic_epackage)
>>> a1 = mylib.A()
>>> b1 = mylib.B()
This greatly helps the user to easily call and get EClass from a
freshly loaded dynamic EPackage.<commit_after>
|
"""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, BadValueError
from .notification import EObserver, Kind
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=epackage, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
try:
delattr(self, notification.old.name)
except AttributeError:
pass
# REMOVE_MANY is not yet well supported
|
Add new class to ease dynamic metamodel handling
The dynamic metamodel manipulation is a little bit cumbersome when it
comes to extract all the existing EClass from a loaded
EPackage. Currently, a solution is to gather all the elements by hand
in dedicated variables:
>>> A = dynamic_epackage.getEClassifier('A')
>>> B = dynamic_epackage.getEClassifier('B')
>>> a1 = A()
>>> b1 = B()
This commit introduces a new class: 'DynamicEPackage' which
constructs, using reflection, an object that has direct references to
each EClass/sub-EPackage by name:
>>> from pyecore.utils import DynamicEPackage
>>> mylib = DynamicEPackage(dynamic_epackage)
>>> a1 = mylib.A()
>>> b1 = mylib.B()
This greatly helps the user to easily call and get EClass from a
freshly loaded dynamic EPackage."""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, BadValueError
from .notification import EObserver, Kind
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=epackage, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
try:
delattr(self, notification.old.name)
except AttributeError:
pass
# REMOVE_MANY is not yet well supported
|
<commit_before><commit_msg>Add new class to ease dynamic metamodel handling
The dynamic metamodel manipulation is a little bit cumbersome when it
comes to extract all the existing EClass from a loaded
EPackage. Currently, a solution is to gather all the elements by hand
in dedicated variables:
>>> A = dynamic_epackage.getEClassifier('A')
>>> B = dynamic_epackage.getEClassifier('B')
>>> a1 = A()
>>> b1 = B()
This commit introduces a new class: 'DynamicEPackage' which
constructs, using reflection, an object that has direct references to
each EClass/sub-EPackage by name:
>>> from pyecore.utils import DynamicEPackage
>>> mylib = DynamicEPackage(dynamic_epackage)
>>> a1 = mylib.A()
>>> b1 = mylib.B()
This greatly helps the user to easily call and get EClass from a
freshly loaded dynamic EPackage.<commit_after>"""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, BadValueError
from .notification import EObserver, Kind
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=epackage, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
try:
delattr(self, notification.old.name)
except AttributeError:
pass
# REMOVE_MANY is not yet well supported
|
|
68b17dc4fa6cdf1c04922d9d4fae77252b6712cd
|
theanets/__init__.py
|
theanets/__init__.py
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.6.0pre'
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.7.0pre'
|
Fix package version for git master.
|
Fix package version for git master. [ci skip]
|
Python
|
mit
|
chrinide/theanets,lmjohns3/theanets,devdoer/theanets
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.6.0pre'
Fix package version for git master. [ci skip]
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.7.0pre'
|
<commit_before>'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.6.0pre'
<commit_msg>Fix package version for git master. [ci skip]<commit_after>
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.7.0pre'
|
'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.6.0pre'
Fix package version for git master. [ci skip]'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.7.0pre'
|
<commit_before>'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.6.0pre'
<commit_msg>Fix package version for git master. [ci skip]<commit_after>'''This package groups together a bunch of theano code for neural nets.'''
from .feedforward import Autoencoder, Regressor, Classifier
from .graph import Network
from .main import Experiment
from . import layers
from . import recurrent
from . import trainer
__version__ = '0.7.0pre'
|
22cfab045c15685f18baa8fc5d8b1b9036888f41
|
scrapi/harvesters/cambridge.py
|
scrapi/harvesters/cambridge.py
|
'''
Harvester for the DSpace at Cambridge (production) for the SHARE project
Example API call: https://www.repository.cam.ac.uk/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CambridgeHarvester(OAIHarvester):
short_name = 'cambridge'
long_name = 'DSpace at Cambridge (production)'
url = 'https://www.repository.cam.ac.uk'
base_url = 'https://www.repository.cam.ac.uk/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
|
Add harvester for the University of Cambridge
|
Add harvester for the University of Cambridge
Closes [#SHARE-103]
|
Python
|
apache-2.0
|
fabianvf/scrapi,fabianvf/scrapi,felliott/scrapi,erinspace/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi
|
Add harvester for the University of Cambridge
Closes [#SHARE-103]
|
'''
Harvester for the DSpace at Cambridge (production) for the SHARE project
Example API call: https://www.repository.cam.ac.uk/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CambridgeHarvester(OAIHarvester):
short_name = 'cambridge'
long_name = 'DSpace at Cambridge (production)'
url = 'https://www.repository.cam.ac.uk'
base_url = 'https://www.repository.cam.ac.uk/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
|
<commit_before><commit_msg>Add harvester for the University of Cambridge
Closes [#SHARE-103]<commit_after>
|
'''
Harvester for the DSpace at Cambridge (production) for the SHARE project
Example API call: https://www.repository.cam.ac.uk/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CambridgeHarvester(OAIHarvester):
short_name = 'cambridge'
long_name = 'DSpace at Cambridge (production)'
url = 'https://www.repository.cam.ac.uk'
base_url = 'https://www.repository.cam.ac.uk/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
|
Add harvester for the University of Cambridge
Closes [#SHARE-103]'''
Harvester for the DSpace at Cambridge (production) for the SHARE project
Example API call: https://www.repository.cam.ac.uk/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CambridgeHarvester(OAIHarvester):
short_name = 'cambridge'
long_name = 'DSpace at Cambridge (production)'
url = 'https://www.repository.cam.ac.uk'
base_url = 'https://www.repository.cam.ac.uk/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
|
<commit_before><commit_msg>Add harvester for the University of Cambridge
Closes [#SHARE-103]<commit_after>'''
Harvester for the DSpace at Cambridge (production) for the SHARE project
Example API call: https://www.repository.cam.ac.uk/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CambridgeHarvester(OAIHarvester):
short_name = 'cambridge'
long_name = 'DSpace at Cambridge (production)'
url = 'https://www.repository.cam.ac.uk'
base_url = 'https://www.repository.cam.ac.uk/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
|
|
911edb36fa6810dba3264dc3ba40648aeb9d436f
|
sv-comp-run.py
|
sv-comp-run.py
|
#!/usr/bin/python3
import sys
import glob
import subprocess
import re
import collections
import os.path
def str2bool(s):
return {
"true": True,
"false": False
}[s]
def extract_bool(p, s):
m = re.search(p, s)
return str2bool(m.group(1)) if m else None
set_filename = sys.argv[1]
with open(set_filename) as set_file:
stats = collections.defaultdict(int)
for pattern in set_file:
pattern = pattern.strip()
pattern = os.path.join(os.path.dirname(set_filename), pattern)
if pattern:
for code_filename in glob.iglob(pattern):
print(f"{code_filename}: ", end="", flush=True)
expected = extract_bool(r"_(false|true)-unreach-call", code_filename)
p = subprocess.run(f"~/Desktop/sv-comp/goblint/goblint --enable ana.sv-comp --enable dbg.debug {code_filename}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
actual = extract_bool(r"__VERIFIER_error unreach2: (false|true)", p.stdout)
missing_funcs = False
for m in re.finditer(r"Function definition missing for (__VERIFIER_\S+)", p.stdout):
missing_funcs = True
print(f"MISSING FUNC {m.group(1)}")
if missing_funcs:
sys.exit(0)
text = None
if expected is None or actual is None:
text = f"NONE expected {expected}, actual {actual}"
elif actual == expected:
text = f"CORRECT {expected}"
else:
text = f"INCORRECT expected {expected}, actual {actual}"
print(text)
stats[text] += 1
print("-" * 80)
for text, count in stats.items():
print(f"{text}: {count}")
|
Add own SV-COMP unreach-call set runner script
|
Add own SV-COMP unreach-call set runner script
|
Python
|
mit
|
goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer
|
Add own SV-COMP unreach-call set runner script
|
#!/usr/bin/python3
import sys
import glob
import subprocess
import re
import collections
import os.path
def str2bool(s):
return {
"true": True,
"false": False
}[s]
def extract_bool(p, s):
m = re.search(p, s)
return str2bool(m.group(1)) if m else None
set_filename = sys.argv[1]
with open(set_filename) as set_file:
stats = collections.defaultdict(int)
for pattern in set_file:
pattern = pattern.strip()
pattern = os.path.join(os.path.dirname(set_filename), pattern)
if pattern:
for code_filename in glob.iglob(pattern):
print(f"{code_filename}: ", end="", flush=True)
expected = extract_bool(r"_(false|true)-unreach-call", code_filename)
p = subprocess.run(f"~/Desktop/sv-comp/goblint/goblint --enable ana.sv-comp --enable dbg.debug {code_filename}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
actual = extract_bool(r"__VERIFIER_error unreach2: (false|true)", p.stdout)
missing_funcs = False
for m in re.finditer(r"Function definition missing for (__VERIFIER_\S+)", p.stdout):
missing_funcs = True
print(f"MISSING FUNC {m.group(1)}")
if missing_funcs:
sys.exit(0)
text = None
if expected is None or actual is None:
text = f"NONE expected {expected}, actual {actual}"
elif actual == expected:
text = f"CORRECT {expected}"
else:
text = f"INCORRECT expected {expected}, actual {actual}"
print(text)
stats[text] += 1
print("-" * 80)
for text, count in stats.items():
print(f"{text}: {count}")
|
<commit_before><commit_msg>Add own SV-COMP unreach-call set runner script<commit_after>
|
#!/usr/bin/python3
import sys
import glob
import subprocess
import re
import collections
import os.path
def str2bool(s):
return {
"true": True,
"false": False
}[s]
def extract_bool(p, s):
m = re.search(p, s)
return str2bool(m.group(1)) if m else None
set_filename = sys.argv[1]
with open(set_filename) as set_file:
stats = collections.defaultdict(int)
for pattern in set_file:
pattern = pattern.strip()
pattern = os.path.join(os.path.dirname(set_filename), pattern)
if pattern:
for code_filename in glob.iglob(pattern):
print(f"{code_filename}: ", end="", flush=True)
expected = extract_bool(r"_(false|true)-unreach-call", code_filename)
p = subprocess.run(f"~/Desktop/sv-comp/goblint/goblint --enable ana.sv-comp --enable dbg.debug {code_filename}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
actual = extract_bool(r"__VERIFIER_error unreach2: (false|true)", p.stdout)
missing_funcs = False
for m in re.finditer(r"Function definition missing for (__VERIFIER_\S+)", p.stdout):
missing_funcs = True
print(f"MISSING FUNC {m.group(1)}")
if missing_funcs:
sys.exit(0)
text = None
if expected is None or actual is None:
text = f"NONE expected {expected}, actual {actual}"
elif actual == expected:
text = f"CORRECT {expected}"
else:
text = f"INCORRECT expected {expected}, actual {actual}"
print(text)
stats[text] += 1
print("-" * 80)
for text, count in stats.items():
print(f"{text}: {count}")
|
Add own SV-COMP unreach-call set runner script#!/usr/bin/python3
import sys
import glob
import subprocess
import re
import collections
import os.path
def str2bool(s):
return {
"true": True,
"false": False
}[s]
def extract_bool(p, s):
m = re.search(p, s)
return str2bool(m.group(1)) if m else None
set_filename = sys.argv[1]
with open(set_filename) as set_file:
stats = collections.defaultdict(int)
for pattern in set_file:
pattern = pattern.strip()
pattern = os.path.join(os.path.dirname(set_filename), pattern)
if pattern:
for code_filename in glob.iglob(pattern):
print(f"{code_filename}: ", end="", flush=True)
expected = extract_bool(r"_(false|true)-unreach-call", code_filename)
p = subprocess.run(f"~/Desktop/sv-comp/goblint/goblint --enable ana.sv-comp --enable dbg.debug {code_filename}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
actual = extract_bool(r"__VERIFIER_error unreach2: (false|true)", p.stdout)
missing_funcs = False
for m in re.finditer(r"Function definition missing for (__VERIFIER_\S+)", p.stdout):
missing_funcs = True
print(f"MISSING FUNC {m.group(1)}")
if missing_funcs:
sys.exit(0)
text = None
if expected is None or actual is None:
text = f"NONE expected {expected}, actual {actual}"
elif actual == expected:
text = f"CORRECT {expected}"
else:
text = f"INCORRECT expected {expected}, actual {actual}"
print(text)
stats[text] += 1
print("-" * 80)
for text, count in stats.items():
print(f"{text}: {count}")
|
<commit_before><commit_msg>Add own SV-COMP unreach-call set runner script<commit_after>#!/usr/bin/python3
import sys
import glob
import subprocess
import re
import collections
import os.path
def str2bool(s):
return {
"true": True,
"false": False
}[s]
def extract_bool(p, s):
m = re.search(p, s)
return str2bool(m.group(1)) if m else None
set_filename = sys.argv[1]
with open(set_filename) as set_file:
stats = collections.defaultdict(int)
for pattern in set_file:
pattern = pattern.strip()
pattern = os.path.join(os.path.dirname(set_filename), pattern)
if pattern:
for code_filename in glob.iglob(pattern):
print(f"{code_filename}: ", end="", flush=True)
expected = extract_bool(r"_(false|true)-unreach-call", code_filename)
p = subprocess.run(f"~/Desktop/sv-comp/goblint/goblint --enable ana.sv-comp --enable dbg.debug {code_filename}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
actual = extract_bool(r"__VERIFIER_error unreach2: (false|true)", p.stdout)
missing_funcs = False
for m in re.finditer(r"Function definition missing for (__VERIFIER_\S+)", p.stdout):
missing_funcs = True
print(f"MISSING FUNC {m.group(1)}")
if missing_funcs:
sys.exit(0)
text = None
if expected is None or actual is None:
text = f"NONE expected {expected}, actual {actual}"
elif actual == expected:
text = f"CORRECT {expected}"
else:
text = f"INCORRECT expected {expected}, actual {actual}"
print(text)
stats[text] += 1
print("-" * 80)
for text, count in stats.items():
print(f"{text}: {count}")
|
|
473ad12b2d77b00073c3712e8d7593d290976248
|
technician_tests/test_timeout.py
|
technician_tests/test_timeout.py
|
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import unittest2 as unittest
except ImportError:
import unittest
import makerbot_driver
class TimeoutTest(unittest.TestCase):
def test_timeout(self):
'''Test that the pyserial layer can correctly connect to a bot without
getting any timeout errors.
'''
port = raw_input('specify a real active port on your OS to test> ')
s3g = makerbot_driver.s3g.from_filename(port)
s3g.get_version()
if '__main__' == __name__:
unittest.main()
|
Add technicial test for serial port timeouts.
|
Add technicial test for serial port timeouts.
|
Python
|
agpl-3.0
|
makerbot/s3g,Jnesselr/s3g,makerbot/s3g,makerbot/s3g,Jnesselr/s3g,makerbot/s3g
|
Add technicial test for serial port timeouts.
|
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import unittest2 as unittest
except ImportError:
import unittest
import makerbot_driver
class TimeoutTest(unittest.TestCase):
def test_timeout(self):
'''Test that the pyserial layer can correctly connect to a bot without
getting any timeout errors.
'''
port = raw_input('specify a real active port on your OS to test> ')
s3g = makerbot_driver.s3g.from_filename(port)
s3g.get_version()
if '__main__' == __name__:
unittest.main()
|
<commit_before><commit_msg>Add technicial test for serial port timeouts.<commit_after>
|
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import unittest2 as unittest
except ImportError:
import unittest
import makerbot_driver
class TimeoutTest(unittest.TestCase):
def test_timeout(self):
'''Test that the pyserial layer can correctly connect to a bot without
getting any timeout errors.
'''
port = raw_input('specify a real active port on your OS to test> ')
s3g = makerbot_driver.s3g.from_filename(port)
s3g.get_version()
if '__main__' == __name__:
unittest.main()
|
Add technicial test for serial port timeouts.# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import unittest2 as unittest
except ImportError:
import unittest
import makerbot_driver
class TimeoutTest(unittest.TestCase):
def test_timeout(self):
'''Test that the pyserial layer can correctly connect to a bot without
getting any timeout errors.
'''
port = raw_input('specify a real active port on your OS to test> ')
s3g = makerbot_driver.s3g.from_filename(port)
s3g.get_version()
if '__main__' == __name__:
unittest.main()
|
<commit_before><commit_msg>Add technicial test for serial port timeouts.<commit_after># vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
from __future__ import (absolute_import, print_function, unicode_literals)
try:
import unittest2 as unittest
except ImportError:
import unittest
import makerbot_driver
class TimeoutTest(unittest.TestCase):
def test_timeout(self):
'''Test that the pyserial layer can correctly connect to a bot without
getting any timeout errors.
'''
port = raw_input('specify a real active port on your OS to test> ')
s3g = makerbot_driver.s3g.from_filename(port)
s3g.get_version()
if '__main__' == __name__:
unittest.main()
|
|
413295160d16c99f72ed37794cc823eccdb5676f
|
test/test_panels/test_checker.py
|
test/test_panels/test_checker.py
|
from pyqode.core import modes
from pyqode.core import panels
from pyqode.core.api import TextHelper
from pyqode.core.qt import QtCore
from pyqode.core.qt.QtTest import QTest
from test.helpers import editor_open
def get_panel(editor):
return editor.panels.get(panels.CheckerPanel)
def get_mode(editor):
try:
mode = editor.modes.get(modes.CheckerMode)
except KeyError:
mode = modes.CheckerMode(check)
editor.modes.append(mode)
return mode
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
@editor_open(__file__)
def test_leave_event(editor):
panel = get_panel(editor)
panel.leaveEvent()
@editor_open(__file__)
def test_mouse_press(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(1000, 1000))
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(3, y_pos))
mode.clear_messages()
@editor_open(__file__)
def test_mouse_move(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
QTest.qWait(1000)
QTest.mouseMove(panel, QtCore.QPoint(1000, 1000))
mode.clear_messages()
def check(data):
return True, [('desc', i % 3, i + 1) for i in range(20)]
|
Add basic tests for checker panel (improve coverage)
|
Add basic tests for checker panel (improve coverage)
|
Python
|
mit
|
zwadar/pyqode.core,pyQode/pyqode.core,pyQode/pyqode.core
|
Add basic tests for checker panel (improve coverage)
|
from pyqode.core import modes
from pyqode.core import panels
from pyqode.core.api import TextHelper
from pyqode.core.qt import QtCore
from pyqode.core.qt.QtTest import QTest
from test.helpers import editor_open
def get_panel(editor):
return editor.panels.get(panels.CheckerPanel)
def get_mode(editor):
try:
mode = editor.modes.get(modes.CheckerMode)
except KeyError:
mode = modes.CheckerMode(check)
editor.modes.append(mode)
return mode
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
@editor_open(__file__)
def test_leave_event(editor):
panel = get_panel(editor)
panel.leaveEvent()
@editor_open(__file__)
def test_mouse_press(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(1000, 1000))
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(3, y_pos))
mode.clear_messages()
@editor_open(__file__)
def test_mouse_move(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
QTest.qWait(1000)
QTest.mouseMove(panel, QtCore.QPoint(1000, 1000))
mode.clear_messages()
def check(data):
return True, [('desc', i % 3, i + 1) for i in range(20)]
|
<commit_before><commit_msg>Add basic tests for checker panel (improve coverage)<commit_after>
|
from pyqode.core import modes
from pyqode.core import panels
from pyqode.core.api import TextHelper
from pyqode.core.qt import QtCore
from pyqode.core.qt.QtTest import QTest
from test.helpers import editor_open
def get_panel(editor):
return editor.panels.get(panels.CheckerPanel)
def get_mode(editor):
try:
mode = editor.modes.get(modes.CheckerMode)
except KeyError:
mode = modes.CheckerMode(check)
editor.modes.append(mode)
return mode
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
@editor_open(__file__)
def test_leave_event(editor):
panel = get_panel(editor)
panel.leaveEvent()
@editor_open(__file__)
def test_mouse_press(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(1000, 1000))
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(3, y_pos))
mode.clear_messages()
@editor_open(__file__)
def test_mouse_move(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
QTest.qWait(1000)
QTest.mouseMove(panel, QtCore.QPoint(1000, 1000))
mode.clear_messages()
def check(data):
return True, [('desc', i % 3, i + 1) for i in range(20)]
|
Add basic tests for checker panel (improve coverage)from pyqode.core import modes
from pyqode.core import panels
from pyqode.core.api import TextHelper
from pyqode.core.qt import QtCore
from pyqode.core.qt.QtTest import QTest
from test.helpers import editor_open
def get_panel(editor):
return editor.panels.get(panels.CheckerPanel)
def get_mode(editor):
try:
mode = editor.modes.get(modes.CheckerMode)
except KeyError:
mode = modes.CheckerMode(check)
editor.modes.append(mode)
return mode
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
@editor_open(__file__)
def test_leave_event(editor):
panel = get_panel(editor)
panel.leaveEvent()
@editor_open(__file__)
def test_mouse_press(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(1000, 1000))
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(3, y_pos))
mode.clear_messages()
@editor_open(__file__)
def test_mouse_move(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
QTest.qWait(1000)
QTest.mouseMove(panel, QtCore.QPoint(1000, 1000))
mode.clear_messages()
def check(data):
return True, [('desc', i % 3, i + 1) for i in range(20)]
|
<commit_before><commit_msg>Add basic tests for checker panel (improve coverage)<commit_after>from pyqode.core import modes
from pyqode.core import panels
from pyqode.core.api import TextHelper
from pyqode.core.qt import QtCore
from pyqode.core.qt.QtTest import QTest
from test.helpers import editor_open
def get_panel(editor):
return editor.panels.get(panels.CheckerPanel)
def get_mode(editor):
try:
mode = editor.modes.get(modes.CheckerMode)
except KeyError:
mode = modes.CheckerMode(check)
editor.modes.append(mode)
return mode
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
@editor_open(__file__)
def test_leave_event(editor):
panel = get_panel(editor)
panel.leaveEvent()
@editor_open(__file__)
def test_mouse_press(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(1000, 1000))
QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
QtCore.QPoint(3, y_pos))
mode.clear_messages()
@editor_open(__file__)
def test_mouse_move(editor):
mode = get_mode(editor)
mode.request_analysis()
QTest.qWait(2000)
panel = get_panel(editor)
y_pos = TextHelper(editor).line_pos_from_number(1)
QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
QTest.qWait(1000)
QTest.mouseMove(panel, QtCore.QPoint(1000, 1000))
mode.clear_messages()
def check(data):
return True, [('desc', i % 3, i + 1) for i in range(20)]
|
|
d0008f639a12c384d6d9faa58e6370d834bef2de
|
cms/migrations_django/0002_auto_20140807_2306.py
|
cms/migrations_django/0002_auto_20140807_2306.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='placeholder',
name='slot',
field=models.CharField(verbose_name='slot', max_length=255, editable=False, db_index=True),
),
]
|
Add django 1.7 migration for modified slot length
|
Add django 1.7 migration for modified slot length
|
Python
|
bsd-3-clause
|
cyberintruder/django-cms,nostalgiaz/django-cms,keimlink/django-cms,jeffreylu9/django-cms,wuzhihui1123/django-cms,frnhr/django-cms,SachaMPS/django-cms,kk9599/django-cms,sznekol/django-cms,vstoykov/django-cms,donce/django-cms,bittner/django-cms,stefanw/django-cms,takeshineshiro/django-cms,Vegasvikk/django-cms,benzkji/django-cms,qnub/django-cms,intip/django-cms,chkir/django-cms,SmithsonianEnterprises/django-cms,stefanfoulis/django-cms,Livefyre/django-cms,jrclaramunt/django-cms,josjevv/django-cms,stefanw/django-cms,vad/django-cms,petecummings/django-cms,DylannCordel/django-cms,nimbis/django-cms,irudayarajisawa/django-cms,benzkji/django-cms,datakortet/django-cms,jproffitt/django-cms,isotoma/django-cms,vad/django-cms,Vegasvikk/django-cms,iddqd1/django-cms,irudayarajisawa/django-cms,vad/django-cms,FinalAngel/django-cms,andyzsf/django-cms,wyg3958/django-cms,cyberintruder/django-cms,dhorelik/django-cms,stefanfoulis/django-cms,sephii/django-cms,rscnt/django-cms,liuyisiyisi/django-cms,vstoykov/django-cms,yakky/django-cms,AlexProfi/django-cms,intip/django-cms,philippze/django-cms,youprofit/django-cms,datakortet/django-cms,josjevv/django-cms,evildmp/django-cms,MagicSolutions/django-cms,irudayarajisawa/django-cms,divio/django-cms,andyzsf/django-cms,isotoma/django-cms,yakky/django-cms,vxsx/django-cms,chmberl/django-cms,rscnt/django-cms,farhaadila/django-cms,vad/django-cms,owers19856/django-cms,jeffreylu9/django-cms,stefanw/django-cms,astagi/django-cms,evildmp/django-cms,dhorelik/django-cms,jeffreylu9/django-cms,sephii/django-cms,intip/django-cms,rsalmaso/django-cms,FinalAngel/django-cms,stefanfoulis/django-cms,timgraham/django-cms,divio/django-cms,bittner/django-cms,takeshineshiro/django-cms,Jaccorot/django-cms,nimbis/django-cms,Vegasvikk/django-cms,bittner/django-cms,josjevv/django-cms,owers19856/django-cms,Livefyre/django-cms,nostalgiaz/django-cms,DylannCordel/django-cms,astagi/django-cms,timgraham/django-cms,keimlink/django-cms,SmithsonianEnterprises/django-cms,frnhr/django-cms,jsma/django-cms,saintbird/django-cms,memnonila/django-cms,wuzhihui1123/django-cms,frnhr/django-cms,webu/django-cms,rryan/django-cms,SofiaReis/django-cms,mkoistinen/django-cms,timgraham/django-cms,evildmp/django-cms,netzkolchose/django-cms,datakortet/django-cms,nostalgiaz/django-cms,360youlun/django-cms,vxsx/django-cms,wuzhihui1123/django-cms,nostalgiaz/django-cms,sephii/django-cms,vxsx/django-cms,netzkolchose/django-cms,divio/django-cms,yakky/django-cms,dhorelik/django-cms,wuzhihui1123/django-cms,MagicSolutions/django-cms,rryan/django-cms,sznekol/django-cms,bittner/django-cms,andyzsf/django-cms,benzkji/django-cms,qnub/django-cms,youprofit/django-cms,AlexProfi/django-cms,keimlink/django-cms,MagicSolutions/django-cms,andyzsf/django-cms,czpython/django-cms,SachaMPS/django-cms,youprofit/django-cms,qnub/django-cms,divio/django-cms,jeffreylu9/django-cms,360youlun/django-cms,donce/django-cms,jsma/django-cms,owers19856/django-cms,mkoistinen/django-cms,nimbis/django-cms,AlexProfi/django-cms,Livefyre/django-cms,sznekol/django-cms,netzkolchose/django-cms,360youlun/django-cms,donce/django-cms,mkoistinen/django-cms,kk9599/django-cms,FinalAngel/django-cms,robmagee/django-cms,philippze/django-cms,netzkolchose/django-cms,chkir/django-cms,wyg3958/django-cms,jrclaramunt/django-cms,frnhr/django-cms,stefanw/django-cms,leture/django-cms,jproffitt/django-cms,iddqd1/django-cms,czpython/django-cms,ScholzVolkmer/django-cms,Jaccorot/django-cms,leture/django-cms,wyg3958/django-cms,kk9599/django-cms,czpython/django-cms,chkir/django-cms,petecummings/django-cms,rryan/django-cms,liuyisiyisi/django-cms,jproffitt/django-cms,sephii/django-cms,farhaadila/django-cms,isotoma/django-cms,jproffitt/django-cms,czpython/django-cms,leture/django-cms,petecummings/django-cms,SofiaReis/django-cms,ScholzVolkmer/django-cms,yakky/django-cms,rsalmaso/django-cms,takeshineshiro/django-cms,memnonila/django-cms,liuyisiyisi/django-cms,vstoykov/django-cms,robmagee/django-cms,chmberl/django-cms,webu/django-cms,SachaMPS/django-cms,intip/django-cms,FinalAngel/django-cms,rscnt/django-cms,robmagee/django-cms,rsalmaso/django-cms,ScholzVolkmer/django-cms,philippze/django-cms,datakortet/django-cms,stefanfoulis/django-cms,jsma/django-cms,iddqd1/django-cms,jsma/django-cms,Livefyre/django-cms,benzkji/django-cms,evildmp/django-cms,rsalmaso/django-cms,SofiaReis/django-cms,DylannCordel/django-cms,Jaccorot/django-cms,nimbis/django-cms,astagi/django-cms,vxsx/django-cms,memnonila/django-cms,saintbird/django-cms,saintbird/django-cms,rryan/django-cms,mkoistinen/django-cms,SmithsonianEnterprises/django-cms,chmberl/django-cms,jrclaramunt/django-cms,webu/django-cms,isotoma/django-cms,farhaadila/django-cms,cyberintruder/django-cms
|
Add django 1.7 migration for modified slot length
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='placeholder',
name='slot',
field=models.CharField(verbose_name='slot', max_length=255, editable=False, db_index=True),
),
]
|
<commit_before><commit_msg>Add django 1.7 migration for modified slot length<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='placeholder',
name='slot',
field=models.CharField(verbose_name='slot', max_length=255, editable=False, db_index=True),
),
]
|
Add django 1.7 migration for modified slot length# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='placeholder',
name='slot',
field=models.CharField(verbose_name='slot', max_length=255, editable=False, db_index=True),
),
]
|
<commit_before><commit_msg>Add django 1.7 migration for modified slot length<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='placeholder',
name='slot',
field=models.CharField(verbose_name='slot', max_length=255, editable=False, db_index=True),
),
]
|
|
c4cad482f38fb4dca00d00d8374833268dbc4545
|
testing/test_change_directory.py
|
testing/test_change_directory.py
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import pytest
from create_movie import change_directory
def test_change_directory_existing_directory(tmpdir):
newdir = tmpdir.mkdir('subdir')
with change_directory(str(newdir)):
assert os.getcwd() == os.path.realpath(str(newdir))
assert os.getcwd() != os.path.realpath(str(newdir))
def test_change_directory_non_existing_directory(tmpdir):
pathname = tmpdir.join('subdir')
assert not os.path.exists(str(pathname))
with pytest.raises(OSError) as err:
with change_directory(str(pathname)):
pass
assert str(pathname) in str(err)
|
Add tests for change directory
|
Add tests for change directory
|
Python
|
mit
|
NGTS/frame-movies,NGTS/frame-movies
|
Add tests for change directory
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import pytest
from create_movie import change_directory
def test_change_directory_existing_directory(tmpdir):
newdir = tmpdir.mkdir('subdir')
with change_directory(str(newdir)):
assert os.getcwd() == os.path.realpath(str(newdir))
assert os.getcwd() != os.path.realpath(str(newdir))
def test_change_directory_non_existing_directory(tmpdir):
pathname = tmpdir.join('subdir')
assert not os.path.exists(str(pathname))
with pytest.raises(OSError) as err:
with change_directory(str(pathname)):
pass
assert str(pathname) in str(err)
|
<commit_before><commit_msg>Add tests for change directory<commit_after>
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import pytest
from create_movie import change_directory
def test_change_directory_existing_directory(tmpdir):
newdir = tmpdir.mkdir('subdir')
with change_directory(str(newdir)):
assert os.getcwd() == os.path.realpath(str(newdir))
assert os.getcwd() != os.path.realpath(str(newdir))
def test_change_directory_non_existing_directory(tmpdir):
pathname = tmpdir.join('subdir')
assert not os.path.exists(str(pathname))
with pytest.raises(OSError) as err:
with change_directory(str(pathname)):
pass
assert str(pathname) in str(err)
|
Add tests for change directoryimport sys
import os
sys.path.insert(0, os.path.realpath('.'))
import pytest
from create_movie import change_directory
def test_change_directory_existing_directory(tmpdir):
newdir = tmpdir.mkdir('subdir')
with change_directory(str(newdir)):
assert os.getcwd() == os.path.realpath(str(newdir))
assert os.getcwd() != os.path.realpath(str(newdir))
def test_change_directory_non_existing_directory(tmpdir):
pathname = tmpdir.join('subdir')
assert not os.path.exists(str(pathname))
with pytest.raises(OSError) as err:
with change_directory(str(pathname)):
pass
assert str(pathname) in str(err)
|
<commit_before><commit_msg>Add tests for change directory<commit_after>import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import pytest
from create_movie import change_directory
def test_change_directory_existing_directory(tmpdir):
newdir = tmpdir.mkdir('subdir')
with change_directory(str(newdir)):
assert os.getcwd() == os.path.realpath(str(newdir))
assert os.getcwd() != os.path.realpath(str(newdir))
def test_change_directory_non_existing_directory(tmpdir):
pathname = tmpdir.join('subdir')
assert not os.path.exists(str(pathname))
with pytest.raises(OSError) as err:
with change_directory(str(pathname)):
pass
assert str(pathname) in str(err)
|
|
304c6cf5314ae3050ca5f47d341323c03e75aea6
|
actions/cloudbolt_plugins/multi_user_approval/multi_group_approval.py
|
actions/cloudbolt_plugins/multi_user_approval/multi_group_approval.py
|
"""
Multiple Group Approval
~~~~~~~~~~~~~~~~~~~~~~~
Overrides CloudBolt's standard Order Approval workflow. This Orchestration
Action requires users from two separate Groups approve an Order before it
becomes Active.
Configuration
~~~~~~~~~~~~~
If the user that submitted this Order belongs to Group_A, this plugin requires
one of two scenarios:
1) Both Group_B and Group_C have "Approval Permission" for Group_A;
2) The approving users are Approvers in both Group_A and Group_B/Group_C.
Version Req.
~~~~~~~~~~~~
CloudBolt 8.8
"""
from accounts.models import Group
def run(order, *args, **kwargs):
approval_groups = Group.objects.filter(name__in=["Group_B", "Group_C"])
if len(order.approvers) < 2:
order.set_pending()
if not bool(approval_groups & order.approvers.groups):
order.set_pending()
return "SUCCESS", "", ""
|
Add Multiple Group Approval Orch Action
|
Add Multiple Group Approval Orch Action
[DEV-12140]
|
Python
|
apache-2.0
|
CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge
|
Add Multiple Group Approval Orch Action
[DEV-12140]
|
"""
Multiple Group Approval
~~~~~~~~~~~~~~~~~~~~~~~
Overrides CloudBolt's standard Order Approval workflow. This Orchestration
Action requires users from two separate Groups approve an Order before it
becomes Active.
Configuration
~~~~~~~~~~~~~
If the user that submitted this Order belongs to Group_A, this plugin requires
one of two scenarios:
1) Both Group_B and Group_C have "Approval Permission" for Group_A;
2) The approving users are Approvers in both Group_A and Group_B/Group_C.
Version Req.
~~~~~~~~~~~~
CloudBolt 8.8
"""
from accounts.models import Group
def run(order, *args, **kwargs):
approval_groups = Group.objects.filter(name__in=["Group_B", "Group_C"])
if len(order.approvers) < 2:
order.set_pending()
if not bool(approval_groups & order.approvers.groups):
order.set_pending()
return "SUCCESS", "", ""
|
<commit_before><commit_msg>Add Multiple Group Approval Orch Action
[DEV-12140]<commit_after>
|
"""
Multiple Group Approval
~~~~~~~~~~~~~~~~~~~~~~~
Overrides CloudBolt's standard Order Approval workflow. This Orchestration
Action requires users from two separate Groups approve an Order before it
becomes Active.
Configuration
~~~~~~~~~~~~~
If the user that submitted this Order belongs to Group_A, this plugin requires
one of two scenarios:
1) Both Group_B and Group_C have "Approval Permission" for Group_A;
2) The approving users are Approvers in both Group_A and Group_B/Group_C.
Version Req.
~~~~~~~~~~~~
CloudBolt 8.8
"""
from accounts.models import Group
def run(order, *args, **kwargs):
approval_groups = Group.objects.filter(name__in=["Group_B", "Group_C"])
if len(order.approvers) < 2:
order.set_pending()
if not bool(approval_groups & order.approvers.groups):
order.set_pending()
return "SUCCESS", "", ""
|
Add Multiple Group Approval Orch Action
[DEV-12140]"""
Multiple Group Approval
~~~~~~~~~~~~~~~~~~~~~~~
Overrides CloudBolt's standard Order Approval workflow. This Orchestration
Action requires users from two separate Groups approve an Order before it
becomes Active.
Configuration
~~~~~~~~~~~~~
If the user that submitted this Order belongs to Group_A, this plugin requires
one of two scenarios:
1) Both Group_B and Group_C have "Approval Permission" for Group_A;
2) The approving users are Approvers in both Group_A and Group_B/Group_C.
Version Req.
~~~~~~~~~~~~
CloudBolt 8.8
"""
from accounts.models import Group
def run(order, *args, **kwargs):
approval_groups = Group.objects.filter(name__in=["Group_B", "Group_C"])
if len(order.approvers) < 2:
order.set_pending()
if not bool(approval_groups & order.approvers.groups):
order.set_pending()
return "SUCCESS", "", ""
|
<commit_before><commit_msg>Add Multiple Group Approval Orch Action
[DEV-12140]<commit_after>"""
Multiple Group Approval
~~~~~~~~~~~~~~~~~~~~~~~
Overrides CloudBolt's standard Order Approval workflow. This Orchestration
Action requires users from two separate Groups approve an Order before it
becomes Active.
Configuration
~~~~~~~~~~~~~
If the user that submitted this Order belongs to Group_A, this plugin requires
one of two scenarios:
1) Both Group_B and Group_C have "Approval Permission" for Group_A;
2) The approving users are Approvers in both Group_A and Group_B/Group_C.
Version Req.
~~~~~~~~~~~~
CloudBolt 8.8
"""
from accounts.models import Group
def run(order, *args, **kwargs):
approval_groups = Group.objects.filter(name__in=["Group_B", "Group_C"])
if len(order.approvers) < 2:
order.set_pending()
if not bool(approval_groups & order.approvers.groups):
order.set_pending()
return "SUCCESS", "", ""
|
|
09f077b5bc78d5fe382fdb1b3317ee68b0f619b5
|
streamz/dataframe/tests/test_dataframe_utils.py
|
streamz/dataframe/tests/test_dataframe_utils.py
|
import pytest
from streamz.dataframe.utils import is_dataframe_like, is_series_like, is_index_like, get_base_frame_type
import pandas as pd
import numpy as np
def test_utils_is_dataframe_like():
test_utils_dataframe = pytest.importorskip('dask.dataframe.tests.test_utils_dataframe')
test_utils_dataframe.test_is_dataframe_like()
def test_utils_get_base_frame_type_pandas():
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, None)
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert pd.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert pd.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), pd.Index)
def test_utils_get_base_frame_type_cudf():
cudf = pytest.importorskip("cudf")
df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert cudf.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert cudf.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), cudf.Index)
|
Add tests for dataframe utils
|
Add tests for dataframe utils
|
Python
|
bsd-3-clause
|
mrocklin/streams
|
Add tests for dataframe utils
|
import pytest
from streamz.dataframe.utils import is_dataframe_like, is_series_like, is_index_like, get_base_frame_type
import pandas as pd
import numpy as np
def test_utils_is_dataframe_like():
test_utils_dataframe = pytest.importorskip('dask.dataframe.tests.test_utils_dataframe')
test_utils_dataframe.test_is_dataframe_like()
def test_utils_get_base_frame_type_pandas():
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, None)
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert pd.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert pd.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), pd.Index)
def test_utils_get_base_frame_type_cudf():
cudf = pytest.importorskip("cudf")
df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert cudf.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert cudf.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), cudf.Index)
|
<commit_before><commit_msg>Add tests for dataframe utils<commit_after>
|
import pytest
from streamz.dataframe.utils import is_dataframe_like, is_series_like, is_index_like, get_base_frame_type
import pandas as pd
import numpy as np
def test_utils_is_dataframe_like():
test_utils_dataframe = pytest.importorskip('dask.dataframe.tests.test_utils_dataframe')
test_utils_dataframe.test_is_dataframe_like()
def test_utils_get_base_frame_type_pandas():
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, None)
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert pd.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert pd.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), pd.Index)
def test_utils_get_base_frame_type_cudf():
cudf = pytest.importorskip("cudf")
df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert cudf.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert cudf.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), cudf.Index)
|
Add tests for dataframe utilsimport pytest
from streamz.dataframe.utils import is_dataframe_like, is_series_like, is_index_like, get_base_frame_type
import pandas as pd
import numpy as np
def test_utils_is_dataframe_like():
test_utils_dataframe = pytest.importorskip('dask.dataframe.tests.test_utils_dataframe')
test_utils_dataframe.test_is_dataframe_like()
def test_utils_get_base_frame_type_pandas():
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, None)
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert pd.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert pd.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), pd.Index)
def test_utils_get_base_frame_type_cudf():
cudf = pytest.importorskip("cudf")
df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert cudf.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert cudf.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), cudf.Index)
|
<commit_before><commit_msg>Add tests for dataframe utils<commit_after>import pytest
from streamz.dataframe.utils import is_dataframe_like, is_series_like, is_index_like, get_base_frame_type
import pandas as pd
import numpy as np
def test_utils_is_dataframe_like():
test_utils_dataframe = pytest.importorskip('dask.dataframe.tests.test_utils_dataframe')
test_utils_dataframe.test_is_dataframe_like()
def test_utils_get_base_frame_type_pandas():
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, None)
df = pd.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert pd.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert pd.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), pd.Index)
def test_utils_get_base_frame_type_cudf():
cudf = pytest.importorskip("cudf")
df = cudf.DataFrame({'x': np.arange(10, dtype=float), 'y': [1.0, 2.0] * 5})
assert cudf.DataFrame == get_base_frame_type("DataFrame", is_dataframe_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.x)
assert cudf.Series == get_base_frame_type("Series", is_series_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("Index", is_index_like, df.x)
with pytest.raises(TypeError):
get_base_frame_type("DataFrame", is_dataframe_like, df.index)
with pytest.raises(TypeError):
get_base_frame_type("Series", is_series_like, df.index)
assert issubclass(get_base_frame_type("Index", is_index_like, df.index), cudf.Index)
|
|
b212d7391c07457ed23a499d6fd113e08f173999
|
Algebra_Geometry/2d_homework.py
|
Algebra_Geometry/2d_homework.py
|
#!/usr/bin/python
import os
import numpy as np
import math
print "This script solves the excercises propossed in the Linear Algebra & 2D Geometry Lectures"
# --------------------------------------------------------------------------------------------------
# Length and Normalized Vector
v = np.array([4, 8, -4])
len_v = math.sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2))
print "Length of v=" + str(v) + " is = " + str(len_v)
v_norm = v / len_v
print "Normalized vector" + str(v_norm) + ",length of normalized v =" + str(np.linalg.norm(v_norm))
# --------------------------------------------------------------------------------------------------
# Scalar and cross product
x1 = np.array([2, -4, 1])
x2 = np.array([2, 1, -2])
dot_product = x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
dot_product = np.dot(x1, x2)
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
cross_product = np.cross(x1, x2)
print "The cross product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(cross_product)
|
Add resolution for the 2dGeometry Homework
|
Add resolution for the 2dGeometry Homework
|
Python
|
mit
|
nachovizzo/AUTONAVx,nachovizzo/AUTONAVx,nachovizzo/AUTONAVx
|
Add resolution for the 2dGeometry Homework
|
#!/usr/bin/python
import os
import numpy as np
import math
print "This script solves the excercises propossed in the Linear Algebra & 2D Geometry Lectures"
# --------------------------------------------------------------------------------------------------
# Length and Normalized Vector
v = np.array([4, 8, -4])
len_v = math.sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2))
print "Length of v=" + str(v) + " is = " + str(len_v)
v_norm = v / len_v
print "Normalized vector" + str(v_norm) + ",length of normalized v =" + str(np.linalg.norm(v_norm))
# --------------------------------------------------------------------------------------------------
# Scalar and cross product
x1 = np.array([2, -4, 1])
x2 = np.array([2, 1, -2])
dot_product = x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
dot_product = np.dot(x1, x2)
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
cross_product = np.cross(x1, x2)
print "The cross product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(cross_product)
|
<commit_before><commit_msg>Add resolution for the 2dGeometry Homework<commit_after>
|
#!/usr/bin/python
import os
import numpy as np
import math
print "This script solves the excercises propossed in the Linear Algebra & 2D Geometry Lectures"
# --------------------------------------------------------------------------------------------------
# Length and Normalized Vector
v = np.array([4, 8, -4])
len_v = math.sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2))
print "Length of v=" + str(v) + " is = " + str(len_v)
v_norm = v / len_v
print "Normalized vector" + str(v_norm) + ",length of normalized v =" + str(np.linalg.norm(v_norm))
# --------------------------------------------------------------------------------------------------
# Scalar and cross product
x1 = np.array([2, -4, 1])
x2 = np.array([2, 1, -2])
dot_product = x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
dot_product = np.dot(x1, x2)
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
cross_product = np.cross(x1, x2)
print "The cross product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(cross_product)
|
Add resolution for the 2dGeometry Homework#!/usr/bin/python
import os
import numpy as np
import math
print "This script solves the excercises propossed in the Linear Algebra & 2D Geometry Lectures"
# --------------------------------------------------------------------------------------------------
# Length and Normalized Vector
v = np.array([4, 8, -4])
len_v = math.sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2))
print "Length of v=" + str(v) + " is = " + str(len_v)
v_norm = v / len_v
print "Normalized vector" + str(v_norm) + ",length of normalized v =" + str(np.linalg.norm(v_norm))
# --------------------------------------------------------------------------------------------------
# Scalar and cross product
x1 = np.array([2, -4, 1])
x2 = np.array([2, 1, -2])
dot_product = x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
dot_product = np.dot(x1, x2)
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
cross_product = np.cross(x1, x2)
print "The cross product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(cross_product)
|
<commit_before><commit_msg>Add resolution for the 2dGeometry Homework<commit_after>#!/usr/bin/python
import os
import numpy as np
import math
print "This script solves the excercises propossed in the Linear Algebra & 2D Geometry Lectures"
# --------------------------------------------------------------------------------------------------
# Length and Normalized Vector
v = np.array([4, 8, -4])
len_v = math.sqrt(pow(v[0], 2) + pow(v[1], 2) + pow(v[2], 2))
print "Length of v=" + str(v) + " is = " + str(len_v)
v_norm = v / len_v
print "Normalized vector" + str(v_norm) + ",length of normalized v =" + str(np.linalg.norm(v_norm))
# --------------------------------------------------------------------------------------------------
# Scalar and cross product
x1 = np.array([2, -4, 1])
x2 = np.array([2, 1, -2])
dot_product = x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
dot_product = np.dot(x1, x2)
print "The scalar product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(dot_product)
cross_product = np.cross(x1, x2)
print "The cross product of x1= " + str(x1) + ",and x2= " + str(x2) + " ,is = " + str(cross_product)
|
|
8a797d3abd43a96df1d4dab8ae52cc21da3930a4
|
compatibility_server/loadtest/locustfile.py
|
compatibility_server/loadtest/locustfile.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def double_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
Create a simple load test skeleton
|
Create a simple load test skeleton
|
Python
|
apache-2.0
|
GoogleCloudPlatform/cloud-opensource-python,GoogleCloudPlatform/cloud-opensource-python,GoogleCloudPlatform/cloud-opensource-python,GoogleCloudPlatform/cloud-opensource-python
|
Create a simple load test skeleton
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def double_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
<commit_before><commit_msg>Create a simple load test skeleton<commit_after>
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def double_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
Create a simple load test skeleton# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def double_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
<commit_before><commit_msg>Create a simple load test skeleton<commit_after># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform a load test on the compatibility server. Usage:
$ pip install locustio
$ locust --host=http://104.197.8.72
"""
import random
import urllib.parse
import locust
PYTHON2_PACKAGES = [
'apache-beam[gcp]',
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
PYTHON3_PACKAGES = [
'google-cloud-bigtable',
'google-cloud-dns',
'google-cloud-vision',
'tensorboard',
'tensorflow',
]
class CompatibilityCheck(locust.TaskSet):
@locust.task
def single_python2(self):
query = urllib.parse.urlencode(
{'python-version': '2',
'package': random.choice(PYTHON2_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def single_python3(self):
query = urllib.parse.urlencode(
{'python-version': '3',
'package': random.choice(PYTHON3_PACKAGES)})
self.client.get('/?%s' % query)
@locust.task
def double_python2(self):
package1 = random.choice(PYTHON2_PACKAGES)
package2 = random.choice(list(set(PYTHON2_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '2'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
@locust.task
def double_python3(self):
package1 = random.choice(PYTHON3_PACKAGES)
package2 = random.choice(list(set(PYTHON3_PACKAGES) - {package1}))
query = urllib.parse.urlencode([('python-version', '3'),
('package', package1),
('package', package2)])
self.client.get('/?%s' % query)
class CompatibilityChecker(locust.HttpLocust):
task_set = CompatibilityCheck
min_wait = 0
max_wait = 0
|
|
55a0797aa70ef91ce1ad9d29f313fa58a1db761d
|
read_forms.py
|
read_forms.py
|
import csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--inputpoints',required=True)
parser.add_argument('-ie','--inputeqns',required=True)
parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
print('Input points file: %s' % args.inputpoints)
print('Input equations file: %s' % args.inputeqns)
# Read in points file
filePts = read_csv_rows(args.inputpoints)
# Read in equations file
fileEqns = read_csv_rows(args.inputeqns)
# Create property vectors from points file
for ptsRow in range(0,len(filePts)):
filePts[ptsRow]=filePts[ptsRow].split(",")
if ptsRow>0:
if filePts[ptsRow][0]=='mdot':
mdot_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='h':
h_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='T':
T_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='P':
P_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='s':
s_v=filePts[ptsRow]
print(mdot_v)
print(h_v)
print(T_v)
print(P_v)
print(s_v)
|
Create file to read in parameter vectors from user forms
|
Create file to read in parameter vectors from user forms
|
Python
|
mit
|
ndebuhr/thermo-state-solver,ndebuhr/thermo-state-solver
|
Create file to read in parameter vectors from user forms
|
import csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--inputpoints',required=True)
parser.add_argument('-ie','--inputeqns',required=True)
parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
print('Input points file: %s' % args.inputpoints)
print('Input equations file: %s' % args.inputeqns)
# Read in points file
filePts = read_csv_rows(args.inputpoints)
# Read in equations file
fileEqns = read_csv_rows(args.inputeqns)
# Create property vectors from points file
for ptsRow in range(0,len(filePts)):
filePts[ptsRow]=filePts[ptsRow].split(",")
if ptsRow>0:
if filePts[ptsRow][0]=='mdot':
mdot_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='h':
h_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='T':
T_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='P':
P_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='s':
s_v=filePts[ptsRow]
print(mdot_v)
print(h_v)
print(T_v)
print(P_v)
print(s_v)
|
<commit_before><commit_msg>Create file to read in parameter vectors from user forms<commit_after>
|
import csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--inputpoints',required=True)
parser.add_argument('-ie','--inputeqns',required=True)
parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
print('Input points file: %s' % args.inputpoints)
print('Input equations file: %s' % args.inputeqns)
# Read in points file
filePts = read_csv_rows(args.inputpoints)
# Read in equations file
fileEqns = read_csv_rows(args.inputeqns)
# Create property vectors from points file
for ptsRow in range(0,len(filePts)):
filePts[ptsRow]=filePts[ptsRow].split(",")
if ptsRow>0:
if filePts[ptsRow][0]=='mdot':
mdot_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='h':
h_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='T':
T_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='P':
P_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='s':
s_v=filePts[ptsRow]
print(mdot_v)
print(h_v)
print(T_v)
print(P_v)
print(s_v)
|
Create file to read in parameter vectors from user formsimport csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--inputpoints',required=True)
parser.add_argument('-ie','--inputeqns',required=True)
parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
print('Input points file: %s' % args.inputpoints)
print('Input equations file: %s' % args.inputeqns)
# Read in points file
filePts = read_csv_rows(args.inputpoints)
# Read in equations file
fileEqns = read_csv_rows(args.inputeqns)
# Create property vectors from points file
for ptsRow in range(0,len(filePts)):
filePts[ptsRow]=filePts[ptsRow].split(",")
if ptsRow>0:
if filePts[ptsRow][0]=='mdot':
mdot_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='h':
h_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='T':
T_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='P':
P_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='s':
s_v=filePts[ptsRow]
print(mdot_v)
print(h_v)
print(T_v)
print(P_v)
print(s_v)
|
<commit_before><commit_msg>Create file to read in parameter vectors from user forms<commit_after>import csv
import argparse
import itertools
from thermo_utils import csv_row_writer, read_csv_rows
# Read input/output arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--inputpoints',required=True)
parser.add_argument('-ie','--inputeqns',required=True)
parser.add_argument('-v','--version',required=False)
args = parser.parse_args()
print('Input points file: %s' % args.inputpoints)
print('Input equations file: %s' % args.inputeqns)
# Read in points file
filePts = read_csv_rows(args.inputpoints)
# Read in equations file
fileEqns = read_csv_rows(args.inputeqns)
# Create property vectors from points file
for ptsRow in range(0,len(filePts)):
filePts[ptsRow]=filePts[ptsRow].split(",")
if ptsRow>0:
if filePts[ptsRow][0]=='mdot':
mdot_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='h':
h_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='T':
T_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='P':
P_v=filePts[ptsRow]
elif filePts[ptsRow][0]=='s':
s_v=filePts[ptsRow]
print(mdot_v)
print(h_v)
print(T_v)
print(P_v)
print(s_v)
|
|
1a5806d2a69553d9116836cbc8819ac60756e462
|
test_files_by_user.py
|
test_files_by_user.py
|
import random
from dmp import dmp
users = ["adam", "ben", "chris", "denis", "eric"]
da = dmp()
for u in users:
results = da.get_files_by_user(u)
print u, len(results)
|
Test script for requesting by user
|
Test script for requesting by user
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-dm-api,Multiscale-Genomics/mg-dm-api
|
Test script for requesting by user
|
import random
from dmp import dmp
users = ["adam", "ben", "chris", "denis", "eric"]
da = dmp()
for u in users:
results = da.get_files_by_user(u)
print u, len(results)
|
<commit_before><commit_msg>Test script for requesting by user<commit_after>
|
import random
from dmp import dmp
users = ["adam", "ben", "chris", "denis", "eric"]
da = dmp()
for u in users:
results = da.get_files_by_user(u)
print u, len(results)
|
Test script for requesting by userimport random
from dmp import dmp
users = ["adam", "ben", "chris", "denis", "eric"]
da = dmp()
for u in users:
results = da.get_files_by_user(u)
print u, len(results)
|
<commit_before><commit_msg>Test script for requesting by user<commit_after>import random
from dmp import dmp
users = ["adam", "ben", "chris", "denis", "eric"]
da = dmp()
for u in users:
results = da.get_files_by_user(u)
print u, len(results)
|
|
99b7ff50e77483f7a2ec0d3ca99b9cc74d426edf
|
tests/handler_test.py
|
tests/handler_test.py
|
import requests
from fixtures import test_server
def test(test_server):
response = requests.get(test_server('/'))
assert response.status_code == 200
json = response.json()
assert 'status' in json
assert json['status'] == 'ok'
|
Add test for the Handler
|
Add test for the Handler
|
Python
|
mit
|
piotrekw/tornado-pytest
|
Add test for the Handler
|
import requests
from fixtures import test_server
def test(test_server):
response = requests.get(test_server('/'))
assert response.status_code == 200
json = response.json()
assert 'status' in json
assert json['status'] == 'ok'
|
<commit_before><commit_msg>Add test for the Handler<commit_after>
|
import requests
from fixtures import test_server
def test(test_server):
response = requests.get(test_server('/'))
assert response.status_code == 200
json = response.json()
assert 'status' in json
assert json['status'] == 'ok'
|
Add test for the Handlerimport requests
from fixtures import test_server
def test(test_server):
response = requests.get(test_server('/'))
assert response.status_code == 200
json = response.json()
assert 'status' in json
assert json['status'] == 'ok'
|
<commit_before><commit_msg>Add test for the Handler<commit_after>import requests
from fixtures import test_server
def test(test_server):
response = requests.get(test_server('/'))
assert response.status_code == 200
json = response.json()
assert 'status' in json
assert json['status'] == 'ok'
|
|
4473354da6d029964258996067adbd97eb06baa8
|
py/degree-of-an-array.py
|
py/degree-of-an-array.py
|
from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
Add py solution for 697. Degree of an Array
|
Add py solution for 697. Degree of an Array
697. Degree of an Array: https://leetcode.com/problems/degree-of-an-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 697. Degree of an Array
697. Degree of an Array: https://leetcode.com/problems/degree-of-an-array/
|
from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
<commit_before><commit_msg>Add py solution for 697. Degree of an Array
697. Degree of an Array: https://leetcode.com/problems/degree-of-an-array/<commit_after>
|
from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
Add py solution for 697. Degree of an Array
697. Degree of an Array: https://leetcode.com/problems/degree-of-an-array/from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
<commit_before><commit_msg>Add py solution for 697. Degree of an Array
697. Degree of an Array: https://leetcode.com/problems/degree-of-an-array/<commit_after>from collections import Counter
class Solution(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
first = dict()
last = dict()
c = Counter()
m = 0
possible_values = []
for i, v in enumerate(nums):
first.setdefault(v, i)
last[v] = i
c[v] += 1
if c[v] == m:
possible_values.append(v)
elif c[v] > m:
possible_values = [v]
m = c[v]
return min(last[x] - first[x] + 1 for x in possible_values)
|
|
35aafe82f73c32e2d3db76d2fb3b988a3c4ade01
|
tests/test_project.py
|
tests/test_project.py
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import yaml
from unittest import TestCase
from project_generator.project import Project
from project_generator.workspace import PgenWorkspace
project_1_yaml = {
'common': {
'sources': ['sources/main.cpp'],
'includes': ['includes/header1.h'],
'macros': ['MACRO1', 'MACRO2'],
'target': ['target1'],
'core': ['core1'],
'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],
'output_type': ['exe'],
'debugger': ['debugger_1'],
}
}
projects_yaml = {
'projects': {
'project_1' : ['test_workspace/project_1.yaml']
},
'settings' : {
'definitions_dir': ['notpg/path/somewhere'],
'export_dir': ['not_generated_projects']
}
}
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_yaml, default_flow_style=False))
self.project = Project('project_1',['test_workspace/project_1.yaml'], PgenWorkspace('test_workspace/projects.yaml'))
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
def test_name(self):
assert self.project.name == 'project_1'
|
Test project - basic template for project to be tested
|
Test project - basic template for project to be tested
|
Python
|
apache-2.0
|
sarahmarshy/project_generator,molejar/project_generator,hwfwgrp/project_generator,0xc0170/project_generator,project-generator/project_generator,ohagendorf/project_generator
|
Test project - basic template for project to be tested
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import yaml
from unittest import TestCase
from project_generator.project import Project
from project_generator.workspace import PgenWorkspace
project_1_yaml = {
'common': {
'sources': ['sources/main.cpp'],
'includes': ['includes/header1.h'],
'macros': ['MACRO1', 'MACRO2'],
'target': ['target1'],
'core': ['core1'],
'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],
'output_type': ['exe'],
'debugger': ['debugger_1'],
}
}
projects_yaml = {
'projects': {
'project_1' : ['test_workspace/project_1.yaml']
},
'settings' : {
'definitions_dir': ['notpg/path/somewhere'],
'export_dir': ['not_generated_projects']
}
}
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_yaml, default_flow_style=False))
self.project = Project('project_1',['test_workspace/project_1.yaml'], PgenWorkspace('test_workspace/projects.yaml'))
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
def test_name(self):
assert self.project.name == 'project_1'
|
<commit_before><commit_msg>Test project - basic template for project to be tested<commit_after>
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import yaml
from unittest import TestCase
from project_generator.project import Project
from project_generator.workspace import PgenWorkspace
project_1_yaml = {
'common': {
'sources': ['sources/main.cpp'],
'includes': ['includes/header1.h'],
'macros': ['MACRO1', 'MACRO2'],
'target': ['target1'],
'core': ['core1'],
'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],
'output_type': ['exe'],
'debugger': ['debugger_1'],
}
}
projects_yaml = {
'projects': {
'project_1' : ['test_workspace/project_1.yaml']
},
'settings' : {
'definitions_dir': ['notpg/path/somewhere'],
'export_dir': ['not_generated_projects']
}
}
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_yaml, default_flow_style=False))
self.project = Project('project_1',['test_workspace/project_1.yaml'], PgenWorkspace('test_workspace/projects.yaml'))
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
def test_name(self):
assert self.project.name == 'project_1'
|
Test project - basic template for project to be tested# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import yaml
from unittest import TestCase
from project_generator.project import Project
from project_generator.workspace import PgenWorkspace
project_1_yaml = {
'common': {
'sources': ['sources/main.cpp'],
'includes': ['includes/header1.h'],
'macros': ['MACRO1', 'MACRO2'],
'target': ['target1'],
'core': ['core1'],
'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],
'output_type': ['exe'],
'debugger': ['debugger_1'],
}
}
projects_yaml = {
'projects': {
'project_1' : ['test_workspace/project_1.yaml']
},
'settings' : {
'definitions_dir': ['notpg/path/somewhere'],
'export_dir': ['not_generated_projects']
}
}
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_yaml, default_flow_style=False))
self.project = Project('project_1',['test_workspace/project_1.yaml'], PgenWorkspace('test_workspace/projects.yaml'))
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
def test_name(self):
assert self.project.name == 'project_1'
|
<commit_before><commit_msg>Test project - basic template for project to be tested<commit_after># Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import yaml
from unittest import TestCase
from project_generator.project import Project
from project_generator.workspace import PgenWorkspace
project_1_yaml = {
'common': {
'sources': ['sources/main.cpp'],
'includes': ['includes/header1.h'],
'macros': ['MACRO1', 'MACRO2'],
'target': ['target1'],
'core': ['core1'],
'tools_supported': ['iar_arm', 'uvision', 'coide', 'unknown'],
'output_type': ['exe'],
'debugger': ['debugger_1'],
}
}
projects_yaml = {
'projects': {
'project_1' : ['test_workspace/project_1.yaml']
},
'settings' : {
'definitions_dir': ['notpg/path/somewhere'],
'export_dir': ['not_generated_projects']
}
}
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_yaml, default_flow_style=False))
self.project = Project('project_1',['test_workspace/project_1.yaml'], PgenWorkspace('test_workspace/projects.yaml'))
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
def test_name(self):
assert self.project.name == 'project_1'
|
|
49539431d0e4172c5ce83c20f436b58e41d0fa02
|
heavy-ion-luminosity.py
|
heavy-ion-luminosity.py
|
__author__ = 'jacob'
import ROOT
import numpy as np
import os
from root_numpy import root2array, root2rec, tree2rec
# Look at r284484 data
filename = os.path.join("data", "r284484.root")
# Convert a TTree in a ROOT file into a NumPy structured array
arr = root2array(filename)
for element in arr:
print(element)
# The TTree name is always optional if there is only one TTree in the file
# Convert a TTree in a ROOT file into a NumPy record array
rec = root2rec(filename)
# Get the TTree from the ROOT file
rfile = ROOT.TFile(filename)
|
Add getting data from .root file into Numpy
|
Add getting data from .root file into Numpy
|
Python
|
mit
|
jacobbieker/ATLAS-Luminosity
|
Add getting data from .root file into Numpy
|
__author__ = 'jacob'
import ROOT
import numpy as np
import os
from root_numpy import root2array, root2rec, tree2rec
# Look at r284484 data
filename = os.path.join("data", "r284484.root")
# Convert a TTree in a ROOT file into a NumPy structured array
arr = root2array(filename)
for element in arr:
print(element)
# The TTree name is always optional if there is only one TTree in the file
# Convert a TTree in a ROOT file into a NumPy record array
rec = root2rec(filename)
# Get the TTree from the ROOT file
rfile = ROOT.TFile(filename)
|
<commit_before><commit_msg>Add getting data from .root file into Numpy<commit_after>
|
__author__ = 'jacob'
import ROOT
import numpy as np
import os
from root_numpy import root2array, root2rec, tree2rec
# Look at r284484 data
filename = os.path.join("data", "r284484.root")
# Convert a TTree in a ROOT file into a NumPy structured array
arr = root2array(filename)
for element in arr:
print(element)
# The TTree name is always optional if there is only one TTree in the file
# Convert a TTree in a ROOT file into a NumPy record array
rec = root2rec(filename)
# Get the TTree from the ROOT file
rfile = ROOT.TFile(filename)
|
Add getting data from .root file into Numpy__author__ = 'jacob'
import ROOT
import numpy as np
import os
from root_numpy import root2array, root2rec, tree2rec
# Look at r284484 data
filename = os.path.join("data", "r284484.root")
# Convert a TTree in a ROOT file into a NumPy structured array
arr = root2array(filename)
for element in arr:
print(element)
# The TTree name is always optional if there is only one TTree in the file
# Convert a TTree in a ROOT file into a NumPy record array
rec = root2rec(filename)
# Get the TTree from the ROOT file
rfile = ROOT.TFile(filename)
|
<commit_before><commit_msg>Add getting data from .root file into Numpy<commit_after>__author__ = 'jacob'
import ROOT
import numpy as np
import os
from root_numpy import root2array, root2rec, tree2rec
# Look at r284484 data
filename = os.path.join("data", "r284484.root")
# Convert a TTree in a ROOT file into a NumPy structured array
arr = root2array(filename)
for element in arr:
print(element)
# The TTree name is always optional if there is only one TTree in the file
# Convert a TTree in a ROOT file into a NumPy record array
rec = root2rec(filename)
# Get the TTree from the ROOT file
rfile = ROOT.TFile(filename)
|
|
504930ad343c074d34d4e4ac1774d909d11a5a63
|
tests/functional/test_admin.py
|
tests/functional/test_admin.py
|
from django.contrib.auth.models import User
from django.conf import settings
from website.models import Db
from .testcases import TestCase
def make_default_db():
db = settings.DATABASES['default']
return Db.objects.create(
name_short="default",
name_long="default",
type="MySQL",
host=db['HOST'],
db=db['NAME'],
port=db['PORT'] or "3306",
username=db['NAME'],
password_encrypted=db['PASSWORD'],
)
class AdminQueryPageTest(TestCase):
username = "username"
password = "password"
initial_url = "/admin/"
def setUp(self):
super(AdminQueryPageTest, self).setUp()
self.user = self.create_user()
self.login()
def create_user(self):
return User.objects.create_superuser(
username=self.username,
password=self.password,
email="u@example.com",
)
def login(self):
self.browser.fill('username', self.username)
self.browser.fill('password', self.password)
self.browser.find_by_value('Log in').click()
def test_invalid_query_id(self):
db = make_default_db()
self.browser.find_link_by_href("/admin/website/query/add/").click()
self.browser.fill_form({
'title': "GDP",
'description': "National GDP",
'query_text': """
select
lower(code) country_code,
gdp
from
scratch.country_data
order by
gdp desc""",
'db': str(db.id),
'owner': str(self.user.id),
'chart_type': "country",
'graph_extra': "{}",
})
self.browser.find_by_value("Save").click()
self.assertIn("added", self.browser.find_by_css('.success').text)
|
Add admin test for adding a query
|
Add admin test for adding a query
|
Python
|
mit
|
sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz,sqlviz/sqlviz
|
Add admin test for adding a query
|
from django.contrib.auth.models import User
from django.conf import settings
from website.models import Db
from .testcases import TestCase
def make_default_db():
db = settings.DATABASES['default']
return Db.objects.create(
name_short="default",
name_long="default",
type="MySQL",
host=db['HOST'],
db=db['NAME'],
port=db['PORT'] or "3306",
username=db['NAME'],
password_encrypted=db['PASSWORD'],
)
class AdminQueryPageTest(TestCase):
username = "username"
password = "password"
initial_url = "/admin/"
def setUp(self):
super(AdminQueryPageTest, self).setUp()
self.user = self.create_user()
self.login()
def create_user(self):
return User.objects.create_superuser(
username=self.username,
password=self.password,
email="u@example.com",
)
def login(self):
self.browser.fill('username', self.username)
self.browser.fill('password', self.password)
self.browser.find_by_value('Log in').click()
def test_invalid_query_id(self):
db = make_default_db()
self.browser.find_link_by_href("/admin/website/query/add/").click()
self.browser.fill_form({
'title': "GDP",
'description': "National GDP",
'query_text': """
select
lower(code) country_code,
gdp
from
scratch.country_data
order by
gdp desc""",
'db': str(db.id),
'owner': str(self.user.id),
'chart_type': "country",
'graph_extra': "{}",
})
self.browser.find_by_value("Save").click()
self.assertIn("added", self.browser.find_by_css('.success').text)
|
<commit_before><commit_msg>Add admin test for adding a query<commit_after>
|
from django.contrib.auth.models import User
from django.conf import settings
from website.models import Db
from .testcases import TestCase
def make_default_db():
db = settings.DATABASES['default']
return Db.objects.create(
name_short="default",
name_long="default",
type="MySQL",
host=db['HOST'],
db=db['NAME'],
port=db['PORT'] or "3306",
username=db['NAME'],
password_encrypted=db['PASSWORD'],
)
class AdminQueryPageTest(TestCase):
username = "username"
password = "password"
initial_url = "/admin/"
def setUp(self):
super(AdminQueryPageTest, self).setUp()
self.user = self.create_user()
self.login()
def create_user(self):
return User.objects.create_superuser(
username=self.username,
password=self.password,
email="u@example.com",
)
def login(self):
self.browser.fill('username', self.username)
self.browser.fill('password', self.password)
self.browser.find_by_value('Log in').click()
def test_invalid_query_id(self):
db = make_default_db()
self.browser.find_link_by_href("/admin/website/query/add/").click()
self.browser.fill_form({
'title': "GDP",
'description': "National GDP",
'query_text': """
select
lower(code) country_code,
gdp
from
scratch.country_data
order by
gdp desc""",
'db': str(db.id),
'owner': str(self.user.id),
'chart_type': "country",
'graph_extra': "{}",
})
self.browser.find_by_value("Save").click()
self.assertIn("added", self.browser.find_by_css('.success').text)
|
Add admin test for adding a queryfrom django.contrib.auth.models import User
from django.conf import settings
from website.models import Db
from .testcases import TestCase
def make_default_db():
db = settings.DATABASES['default']
return Db.objects.create(
name_short="default",
name_long="default",
type="MySQL",
host=db['HOST'],
db=db['NAME'],
port=db['PORT'] or "3306",
username=db['NAME'],
password_encrypted=db['PASSWORD'],
)
class AdminQueryPageTest(TestCase):
username = "username"
password = "password"
initial_url = "/admin/"
def setUp(self):
super(AdminQueryPageTest, self).setUp()
self.user = self.create_user()
self.login()
def create_user(self):
return User.objects.create_superuser(
username=self.username,
password=self.password,
email="u@example.com",
)
def login(self):
self.browser.fill('username', self.username)
self.browser.fill('password', self.password)
self.browser.find_by_value('Log in').click()
def test_invalid_query_id(self):
db = make_default_db()
self.browser.find_link_by_href("/admin/website/query/add/").click()
self.browser.fill_form({
'title': "GDP",
'description': "National GDP",
'query_text': """
select
lower(code) country_code,
gdp
from
scratch.country_data
order by
gdp desc""",
'db': str(db.id),
'owner': str(self.user.id),
'chart_type': "country",
'graph_extra': "{}",
})
self.browser.find_by_value("Save").click()
self.assertIn("added", self.browser.find_by_css('.success').text)
|
<commit_before><commit_msg>Add admin test for adding a query<commit_after>from django.contrib.auth.models import User
from django.conf import settings
from website.models import Db
from .testcases import TestCase
def make_default_db():
db = settings.DATABASES['default']
return Db.objects.create(
name_short="default",
name_long="default",
type="MySQL",
host=db['HOST'],
db=db['NAME'],
port=db['PORT'] or "3306",
username=db['NAME'],
password_encrypted=db['PASSWORD'],
)
class AdminQueryPageTest(TestCase):
username = "username"
password = "password"
initial_url = "/admin/"
def setUp(self):
super(AdminQueryPageTest, self).setUp()
self.user = self.create_user()
self.login()
def create_user(self):
return User.objects.create_superuser(
username=self.username,
password=self.password,
email="u@example.com",
)
def login(self):
self.browser.fill('username', self.username)
self.browser.fill('password', self.password)
self.browser.find_by_value('Log in').click()
def test_invalid_query_id(self):
db = make_default_db()
self.browser.find_link_by_href("/admin/website/query/add/").click()
self.browser.fill_form({
'title': "GDP",
'description': "National GDP",
'query_text': """
select
lower(code) country_code,
gdp
from
scratch.country_data
order by
gdp desc""",
'db': str(db.id),
'owner': str(self.user.id),
'chart_type': "country",
'graph_extra': "{}",
})
self.browser.find_by_value("Save").click()
self.assertIn("added", self.browser.find_by_css('.success').text)
|
|
d9b3d312543268d4305b102d5f28bd7446e00da8
|
tests/test_emit_movie_queue.py
|
tests/test_emit_movie_queue.py
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
test_try_every:
emit_movie_queue:
try_every: 1 day
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
def test_try_every(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093')
self.execute_task('test_try_every')
assert len(self.task.entries) == 1
age_last_emit(hours=12)
self.execute_task('test_try_every')
assert not self.task.entries, 'Movie should not be emitted until try_every has passed'
age_last_emit(days=1)
self.execute_task('test_try_every')
assert len(self.task.entries) == 1, 'Movie should be emitted again after try_every has passed'
|
Add some unit tests for emit_movie_queue
|
Add some unit tests for emit_movie_queue
|
Python
|
mit
|
antivirtel/Flexget,crawln45/Flexget,sean797/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,drwyrm/Flexget,tsnoam/Flexget,sean797/Flexget,offbyone/Flexget,qvazzler/Flexget,crawln45/Flexget,ianstalk/Flexget,Danfocus/Flexget,grrr2/Flexget,jacobmetrick/Flexget,vfrc2/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,qk4l/Flexget,vfrc2/Flexget,patsissons/Flexget,spencerjanssen/Flexget,antivirtel/Flexget,qk4l/Flexget,qk4l/Flexget,jawilson/Flexget,ianstalk/Flexget,patsissons/Flexget,vfrc2/Flexget,ibrahimkarahan/Flexget,gazpachoking/Flexget,xfouloux/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,tarzasai/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,tsnoam/Flexget,JorisDeRieck/Flexget,tarzasai/Flexget,v17al/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,drwyrm/Flexget,drwyrm/Flexget,ratoaq2/Flexget,ibrahimkarahan/Flexget,Danfocus/Flexget,dsemi/Flexget,ratoaq2/Flexget,X-dark/Flexget,jawilson/Flexget,camon/Flexget,X-dark/Flexget,tobinjt/Flexget,ZefQ/Flexget,tobinjt/Flexget,Danfocus/Flexget,poulpito/Flexget,malkavi/Flexget,sean797/Flexget,Pretagonist/Flexget,gazpachoking/Flexget,ZefQ/Flexget,tvcsantos/Flexget,oxc/Flexget,Flexget/Flexget,oxc/Flexget,v17al/Flexget,thalamus/Flexget,OmgOhnoes/Flexget,asm0dey/Flexget,Flexget/Flexget,LynxyssCZ/Flexget,tobinjt/Flexget,xfouloux/Flexget,voriux/Flexget,lildadou/Flexget,LynxyssCZ/Flexget,thalamus/Flexget,cvium/Flexget,tarzasai/Flexget,Flexget/Flexget,ratoaq2/Flexget,X-dark/Flexget,qvazzler/Flexget,cvium/Flexget,spencerjanssen/Flexget,jacobmetrick/Flexget,grrr2/Flexget,ianstalk/Flexget,dsemi/Flexget,malkavi/Flexget,crawln45/Flexget,jawilson/Flexget,lildadou/Flexget,offbyone/Flexget,poulpito/Flexget,ibrahimkarahan/Flexget,tvcsantos/Flexget,v17al/Flexget,dsemi/Flexget,patsissons/Flexget,Flexget/Flexget,oxc/Flexget,asm0dey/Flexget,cvium/Flexget,xfouloux/Flexget,ZefQ/Flexget,lildadou/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,crawln45/Flexget,antivirtel/Flexget,poulpito/Flexget,tobinjt/Flexget,offbyone/Flexget,grrr2/Flexget,Pretagonist/Flexget,camon/Flexget,Pretagonist/Flexget,thalamus/Flexget,asm0dey/Flexget,qvazzler/Flexget,voriux/Flexget,LynxyssCZ/Flexget,malkavi/Flexget
|
Add some unit tests for emit_movie_queue
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
test_try_every:
emit_movie_queue:
try_every: 1 day
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
def test_try_every(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093')
self.execute_task('test_try_every')
assert len(self.task.entries) == 1
age_last_emit(hours=12)
self.execute_task('test_try_every')
assert not self.task.entries, 'Movie should not be emitted until try_every has passed'
age_last_emit(days=1)
self.execute_task('test_try_every')
assert len(self.task.entries) == 1, 'Movie should be emitted again after try_every has passed'
|
<commit_before><commit_msg>Add some unit tests for emit_movie_queue<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
test_try_every:
emit_movie_queue:
try_every: 1 day
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
def test_try_every(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093')
self.execute_task('test_try_every')
assert len(self.task.entries) == 1
age_last_emit(hours=12)
self.execute_task('test_try_every')
assert not self.task.entries, 'Movie should not be emitted until try_every has passed'
age_last_emit(days=1)
self.execute_task('test_try_every')
assert len(self.task.entries) == 1, 'Movie should be emitted again after try_every has passed'
|
Add some unit tests for emit_movie_queuefrom __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
test_try_every:
emit_movie_queue:
try_every: 1 day
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
def test_try_every(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093')
self.execute_task('test_try_every')
assert len(self.task.entries) == 1
age_last_emit(hours=12)
self.execute_task('test_try_every')
assert not self.task.entries, 'Movie should not be emitted until try_every has passed'
age_last_emit(days=1)
self.execute_task('test_try_every')
assert len(self.task.entries) == 1, 'Movie should be emitted again after try_every has passed'
|
<commit_before><commit_msg>Add some unit tests for emit_movie_queue<commit_after>from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
test_try_every:
emit_movie_queue:
try_every: 1 day
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
def test_try_every(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093')
self.execute_task('test_try_every')
assert len(self.task.entries) == 1
age_last_emit(hours=12)
self.execute_task('test_try_every')
assert not self.task.entries, 'Movie should not be emitted until try_every has passed'
age_last_emit(days=1)
self.execute_task('test_try_every')
assert len(self.task.entries) == 1, 'Movie should be emitted again after try_every has passed'
|
|
b064392b2e357bb5725c61f6a07389290954e411
|
user_test.py
|
user_test.py
|
#!/usr/bin/env python
try:
import sympy
except ImportError:
print("sympy is required")
else:
if sympy.__version__ < '0.7.5':
print("SymPy version 0.7.5 or newer is required. You have", sympy.__version__)
if sympy.__version__ != '0.7.5':
print("The stable SymPy version 0.7.5 is recommended. You have", sympy.__version__)
try:
import matplotlib
except ImportError:
print("matplotlib is required for the plotting section of the tutorial")
try:
import IPython
except ImportError:
print("IPython notebook is required.")
else:
if IPython.__version__ < '2.1.0':
print("The latest version of IPython is recommended. You have", IPython.__version__)
print("""A fortran and/or C compiler is required for the code generation portion
of the tutorial. However, if you do not have one, you should not worry, as it
will not be a large part of the tutorial.""")
|
Add a user test script
|
Add a user test script
|
Python
|
bsd-3-clause
|
asmeurer/scipy-2014-tutorial,Sumith1896/scipy-2014-tutorial,leosartaj/scipy-2016-tutorial,aktech/scipy-2016-tutorial,Shekharrajak/scipy-2014-tutorial
|
Add a user test script
|
#!/usr/bin/env python
try:
import sympy
except ImportError:
print("sympy is required")
else:
if sympy.__version__ < '0.7.5':
print("SymPy version 0.7.5 or newer is required. You have", sympy.__version__)
if sympy.__version__ != '0.7.5':
print("The stable SymPy version 0.7.5 is recommended. You have", sympy.__version__)
try:
import matplotlib
except ImportError:
print("matplotlib is required for the plotting section of the tutorial")
try:
import IPython
except ImportError:
print("IPython notebook is required.")
else:
if IPython.__version__ < '2.1.0':
print("The latest version of IPython is recommended. You have", IPython.__version__)
print("""A fortran and/or C compiler is required for the code generation portion
of the tutorial. However, if you do not have one, you should not worry, as it
will not be a large part of the tutorial.""")
|
<commit_before><commit_msg>Add a user test script<commit_after>
|
#!/usr/bin/env python
try:
import sympy
except ImportError:
print("sympy is required")
else:
if sympy.__version__ < '0.7.5':
print("SymPy version 0.7.5 or newer is required. You have", sympy.__version__)
if sympy.__version__ != '0.7.5':
print("The stable SymPy version 0.7.5 is recommended. You have", sympy.__version__)
try:
import matplotlib
except ImportError:
print("matplotlib is required for the plotting section of the tutorial")
try:
import IPython
except ImportError:
print("IPython notebook is required.")
else:
if IPython.__version__ < '2.1.0':
print("The latest version of IPython is recommended. You have", IPython.__version__)
print("""A fortran and/or C compiler is required for the code generation portion
of the tutorial. However, if you do not have one, you should not worry, as it
will not be a large part of the tutorial.""")
|
Add a user test script#!/usr/bin/env python
try:
import sympy
except ImportError:
print("sympy is required")
else:
if sympy.__version__ < '0.7.5':
print("SymPy version 0.7.5 or newer is required. You have", sympy.__version__)
if sympy.__version__ != '0.7.5':
print("The stable SymPy version 0.7.5 is recommended. You have", sympy.__version__)
try:
import matplotlib
except ImportError:
print("matplotlib is required for the plotting section of the tutorial")
try:
import IPython
except ImportError:
print("IPython notebook is required.")
else:
if IPython.__version__ < '2.1.0':
print("The latest version of IPython is recommended. You have", IPython.__version__)
print("""A fortran and/or C compiler is required for the code generation portion
of the tutorial. However, if you do not have one, you should not worry, as it
will not be a large part of the tutorial.""")
|
<commit_before><commit_msg>Add a user test script<commit_after>#!/usr/bin/env python
try:
import sympy
except ImportError:
print("sympy is required")
else:
if sympy.__version__ < '0.7.5':
print("SymPy version 0.7.5 or newer is required. You have", sympy.__version__)
if sympy.__version__ != '0.7.5':
print("The stable SymPy version 0.7.5 is recommended. You have", sympy.__version__)
try:
import matplotlib
except ImportError:
print("matplotlib is required for the plotting section of the tutorial")
try:
import IPython
except ImportError:
print("IPython notebook is required.")
else:
if IPython.__version__ < '2.1.0':
print("The latest version of IPython is recommended. You have", IPython.__version__)
print("""A fortran and/or C compiler is required for the code generation portion
of the tutorial. However, if you do not have one, you should not worry, as it
will not be a large part of the tutorial.""")
|
|
ab5c755f0e8aec37e284a0a2c4eacbb2892f5b11
|
jupyter_notebook_config.partial.py
|
jupyter_notebook_config.partial.py
|
# Configuration file for Jupyter-notebook.
# https://github.com/jupyter/docker-demo-images/blob/master/resources/jupyter_notebook_config.partial.py
#c = get_config()
#c.NotebookApp.ip = '*'
#c.NotebookApp.open_browser = False
#c.NotebookApp.port = 8888 #9999
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
#c.NotebookApp.tornado_settings = {
# 'headers': {
# 'X-Frame-Options': 'ALLOW FROM nature.com'
# },
# 'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
# '/srv/ipython/IPython/html/templates']
#}
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'NRN_NMODL_PATH']
# http://www.harrisgeospatial.com/Support/HelpArticlesDetail/TabId/219/ArtMID/900/ArticleID/14776/Integrating-the-Jupyter-Notebook-with-ESE.aspx
# We need to create an exception in the Jupyter Notebook security that will allow the Jupyter web page to be embedded in an HTML iframe
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' http://*.projectpyrho.org "
}
'static_url_prefix': 'https://cdn.jupyter.org/notebook/%s/' % notebook.__version__
}
#'http://yourhostname:9191/Jupyter/'
|
Add extra notebook config to append to base-notebook config
|
Add extra notebook config to append to base-notebook config
|
Python
|
bsd-3-clause
|
ProjectPyRhO/Prometheus,ProjectPyRhO/Prometheus,ProjectPyRhO/Prometheus,ProjectPyRhO/Prometheus
|
Add extra notebook config to append to base-notebook config
|
# Configuration file for Jupyter-notebook.
# https://github.com/jupyter/docker-demo-images/blob/master/resources/jupyter_notebook_config.partial.py
#c = get_config()
#c.NotebookApp.ip = '*'
#c.NotebookApp.open_browser = False
#c.NotebookApp.port = 8888 #9999
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
#c.NotebookApp.tornado_settings = {
# 'headers': {
# 'X-Frame-Options': 'ALLOW FROM nature.com'
# },
# 'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
# '/srv/ipython/IPython/html/templates']
#}
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'NRN_NMODL_PATH']
# http://www.harrisgeospatial.com/Support/HelpArticlesDetail/TabId/219/ArtMID/900/ArticleID/14776/Integrating-the-Jupyter-Notebook-with-ESE.aspx
# We need to create an exception in the Jupyter Notebook security that will allow the Jupyter web page to be embedded in an HTML iframe
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' http://*.projectpyrho.org "
}
'static_url_prefix': 'https://cdn.jupyter.org/notebook/%s/' % notebook.__version__
}
#'http://yourhostname:9191/Jupyter/'
|
<commit_before><commit_msg>Add extra notebook config to append to base-notebook config<commit_after>
|
# Configuration file for Jupyter-notebook.
# https://github.com/jupyter/docker-demo-images/blob/master/resources/jupyter_notebook_config.partial.py
#c = get_config()
#c.NotebookApp.ip = '*'
#c.NotebookApp.open_browser = False
#c.NotebookApp.port = 8888 #9999
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
#c.NotebookApp.tornado_settings = {
# 'headers': {
# 'X-Frame-Options': 'ALLOW FROM nature.com'
# },
# 'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
# '/srv/ipython/IPython/html/templates']
#}
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'NRN_NMODL_PATH']
# http://www.harrisgeospatial.com/Support/HelpArticlesDetail/TabId/219/ArtMID/900/ArticleID/14776/Integrating-the-Jupyter-Notebook-with-ESE.aspx
# We need to create an exception in the Jupyter Notebook security that will allow the Jupyter web page to be embedded in an HTML iframe
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' http://*.projectpyrho.org "
}
'static_url_prefix': 'https://cdn.jupyter.org/notebook/%s/' % notebook.__version__
}
#'http://yourhostname:9191/Jupyter/'
|
Add extra notebook config to append to base-notebook config# Configuration file for Jupyter-notebook.
# https://github.com/jupyter/docker-demo-images/blob/master/resources/jupyter_notebook_config.partial.py
#c = get_config()
#c.NotebookApp.ip = '*'
#c.NotebookApp.open_browser = False
#c.NotebookApp.port = 8888 #9999
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
#c.NotebookApp.tornado_settings = {
# 'headers': {
# 'X-Frame-Options': 'ALLOW FROM nature.com'
# },
# 'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
# '/srv/ipython/IPython/html/templates']
#}
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'NRN_NMODL_PATH']
# http://www.harrisgeospatial.com/Support/HelpArticlesDetail/TabId/219/ArtMID/900/ArticleID/14776/Integrating-the-Jupyter-Notebook-with-ESE.aspx
# We need to create an exception in the Jupyter Notebook security that will allow the Jupyter web page to be embedded in an HTML iframe
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' http://*.projectpyrho.org "
}
'static_url_prefix': 'https://cdn.jupyter.org/notebook/%s/' % notebook.__version__
}
#'http://yourhostname:9191/Jupyter/'
|
<commit_before><commit_msg>Add extra notebook config to append to base-notebook config<commit_after># Configuration file for Jupyter-notebook.
# https://github.com/jupyter/docker-demo-images/blob/master/resources/jupyter_notebook_config.partial.py
#c = get_config()
#c.NotebookApp.ip = '*'
#c.NotebookApp.open_browser = False
#c.NotebookApp.port = 8888 #9999
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Include our extra templates
c.NotebookApp.extra_template_paths = ['/srv/templates/']
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
#c.NotebookApp.tornado_settings = {
# 'headers': {
# 'X-Frame-Options': 'ALLOW FROM nature.com'
# },
# 'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
# '/srv/ipython/IPython/html/templates']
#}
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', 'NRN_NMODL_PATH']
# http://www.harrisgeospatial.com/Support/HelpArticlesDetail/TabId/219/ArtMID/900/ArticleID/14776/Integrating-the-Jupyter-Notebook-with-ESE.aspx
# We need to create an exception in the Jupyter Notebook security that will allow the Jupyter web page to be embedded in an HTML iframe
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' http://*.projectpyrho.org "
}
'static_url_prefix': 'https://cdn.jupyter.org/notebook/%s/' % notebook.__version__
}
#'http://yourhostname:9191/Jupyter/'
|
|
488c0bc477920b7be419f1bfa85be1cbda661db2
|
examples/SteinerTripleSystems.py
|
examples/SteinerTripleSystems.py
|
# Steiner Triple Systems
# The ternary Steiner problem of order n consists of finding a set of n(n-1)/6
# triples of distinct integer elements in {1,...,n} such that any two triples
# have at most one common element. It is a hypergraph problem coming from
# combinatorial mathematics where n modulo 6 has to be equal to 1 or 3.
# CSPLib Problem 044 - http://www.csplib.org/Problems/prob044/
from Numberjack import *
def get_model(N):
assert N >= 3, "Error: N must be at least 3."
assert N % 6 in [1, 3], "Error: N % 6 must be 1 or 3."
N_ROWS = N * (N - 1) / 6
matrix = Matrix(N_ROWS, N, 0, 1)
model = Model()
for row in matrix.row:
model += Sum(row) == 3
for i in range(N_ROWS-1):
for j in range(i + 1, N_ROWS):
model += Sum([matrix[i][k] * matrix[j][k] for k in range(N)]) <= 1
# Symmetry breaking
model += LeqLex(matrix.row[i], matrix.row[i+1])
return matrix, model
def solve(param):
matrix, model = get_model(param['N'])
solver = model.load(param['solver'])
solver.setHeuristic(param["var"], param["val"])
solver.setVerbosity(param['verbose'])
solver.solveAndRestart()
if solver.is_sat():
for row in matrix:
triple = [i + 1 for i, x in enumerate(row) if x.get_value() == 1]
print triple
elif solver.is_unsat():
print "Unsatisfiable"
else:
print "Unknown"
print solver.getNodes()
if __name__ == '__main__':
default = {'N': 7, 'solver': 'Mistral', 'verbose': 0, "var": "Lex", "val": "Lex"}
param = input(default)
solve(param)
|
Add example model of Steiner Triple Systems
|
Add example model of Steiner Triple Systems
|
Python
|
lgpl-2.1
|
eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack
|
Add example model of Steiner Triple Systems
|
# Steiner Triple Systems
# The ternary Steiner problem of order n consists of finding a set of n(n-1)/6
# triples of distinct integer elements in {1,...,n} such that any two triples
# have at most one common element. It is a hypergraph problem coming from
# combinatorial mathematics where n modulo 6 has to be equal to 1 or 3.
# CSPLib Problem 044 - http://www.csplib.org/Problems/prob044/
from Numberjack import *
def get_model(N):
assert N >= 3, "Error: N must be at least 3."
assert N % 6 in [1, 3], "Error: N % 6 must be 1 or 3."
N_ROWS = N * (N - 1) / 6
matrix = Matrix(N_ROWS, N, 0, 1)
model = Model()
for row in matrix.row:
model += Sum(row) == 3
for i in range(N_ROWS-1):
for j in range(i + 1, N_ROWS):
model += Sum([matrix[i][k] * matrix[j][k] for k in range(N)]) <= 1
# Symmetry breaking
model += LeqLex(matrix.row[i], matrix.row[i+1])
return matrix, model
def solve(param):
matrix, model = get_model(param['N'])
solver = model.load(param['solver'])
solver.setHeuristic(param["var"], param["val"])
solver.setVerbosity(param['verbose'])
solver.solveAndRestart()
if solver.is_sat():
for row in matrix:
triple = [i + 1 for i, x in enumerate(row) if x.get_value() == 1]
print triple
elif solver.is_unsat():
print "Unsatisfiable"
else:
print "Unknown"
print solver.getNodes()
if __name__ == '__main__':
default = {'N': 7, 'solver': 'Mistral', 'verbose': 0, "var": "Lex", "val": "Lex"}
param = input(default)
solve(param)
|
<commit_before><commit_msg>Add example model of Steiner Triple Systems<commit_after>
|
# Steiner Triple Systems
# The ternary Steiner problem of order n consists of finding a set of n(n-1)/6
# triples of distinct integer elements in {1,...,n} such that any two triples
# have at most one common element. It is a hypergraph problem coming from
# combinatorial mathematics where n modulo 6 has to be equal to 1 or 3.
# CSPLib Problem 044 - http://www.csplib.org/Problems/prob044/
from Numberjack import *
def get_model(N):
assert N >= 3, "Error: N must be at least 3."
assert N % 6 in [1, 3], "Error: N % 6 must be 1 or 3."
N_ROWS = N * (N - 1) / 6
matrix = Matrix(N_ROWS, N, 0, 1)
model = Model()
for row in matrix.row:
model += Sum(row) == 3
for i in range(N_ROWS-1):
for j in range(i + 1, N_ROWS):
model += Sum([matrix[i][k] * matrix[j][k] for k in range(N)]) <= 1
# Symmetry breaking
model += LeqLex(matrix.row[i], matrix.row[i+1])
return matrix, model
def solve(param):
matrix, model = get_model(param['N'])
solver = model.load(param['solver'])
solver.setHeuristic(param["var"], param["val"])
solver.setVerbosity(param['verbose'])
solver.solveAndRestart()
if solver.is_sat():
for row in matrix:
triple = [i + 1 for i, x in enumerate(row) if x.get_value() == 1]
print triple
elif solver.is_unsat():
print "Unsatisfiable"
else:
print "Unknown"
print solver.getNodes()
if __name__ == '__main__':
default = {'N': 7, 'solver': 'Mistral', 'verbose': 0, "var": "Lex", "val": "Lex"}
param = input(default)
solve(param)
|
Add example model of Steiner Triple Systems# Steiner Triple Systems
# The ternary Steiner problem of order n consists of finding a set of n(n-1)/6
# triples of distinct integer elements in {1,...,n} such that any two triples
# have at most one common element. It is a hypergraph problem coming from
# combinatorial mathematics where n modulo 6 has to be equal to 1 or 3.
# CSPLib Problem 044 - http://www.csplib.org/Problems/prob044/
from Numberjack import *
def get_model(N):
assert N >= 3, "Error: N must be at least 3."
assert N % 6 in [1, 3], "Error: N % 6 must be 1 or 3."
N_ROWS = N * (N - 1) / 6
matrix = Matrix(N_ROWS, N, 0, 1)
model = Model()
for row in matrix.row:
model += Sum(row) == 3
for i in range(N_ROWS-1):
for j in range(i + 1, N_ROWS):
model += Sum([matrix[i][k] * matrix[j][k] for k in range(N)]) <= 1
# Symmetry breaking
model += LeqLex(matrix.row[i], matrix.row[i+1])
return matrix, model
def solve(param):
matrix, model = get_model(param['N'])
solver = model.load(param['solver'])
solver.setHeuristic(param["var"], param["val"])
solver.setVerbosity(param['verbose'])
solver.solveAndRestart()
if solver.is_sat():
for row in matrix:
triple = [i + 1 for i, x in enumerate(row) if x.get_value() == 1]
print triple
elif solver.is_unsat():
print "Unsatisfiable"
else:
print "Unknown"
print solver.getNodes()
if __name__ == '__main__':
default = {'N': 7, 'solver': 'Mistral', 'verbose': 0, "var": "Lex", "val": "Lex"}
param = input(default)
solve(param)
|
<commit_before><commit_msg>Add example model of Steiner Triple Systems<commit_after># Steiner Triple Systems
# The ternary Steiner problem of order n consists of finding a set of n(n-1)/6
# triples of distinct integer elements in {1,...,n} such that any two triples
# have at most one common element. It is a hypergraph problem coming from
# combinatorial mathematics where n modulo 6 has to be equal to 1 or 3.
# CSPLib Problem 044 - http://www.csplib.org/Problems/prob044/
from Numberjack import *
def get_model(N):
assert N >= 3, "Error: N must be at least 3."
assert N % 6 in [1, 3], "Error: N % 6 must be 1 or 3."
N_ROWS = N * (N - 1) / 6
matrix = Matrix(N_ROWS, N, 0, 1)
model = Model()
for row in matrix.row:
model += Sum(row) == 3
for i in range(N_ROWS-1):
for j in range(i + 1, N_ROWS):
model += Sum([matrix[i][k] * matrix[j][k] for k in range(N)]) <= 1
# Symmetry breaking
model += LeqLex(matrix.row[i], matrix.row[i+1])
return matrix, model
def solve(param):
matrix, model = get_model(param['N'])
solver = model.load(param['solver'])
solver.setHeuristic(param["var"], param["val"])
solver.setVerbosity(param['verbose'])
solver.solveAndRestart()
if solver.is_sat():
for row in matrix:
triple = [i + 1 for i, x in enumerate(row) if x.get_value() == 1]
print triple
elif solver.is_unsat():
print "Unsatisfiable"
else:
print "Unknown"
print solver.getNodes()
if __name__ == '__main__':
default = {'N': 7, 'solver': 'Mistral', 'verbose': 0, "var": "Lex", "val": "Lex"}
param = input(default)
solve(param)
|
|
00f0ec1740109cc12815b9aaed88023cc91881fe
|
velruse/store/mongodb_store.py
|
velruse/store/mongodb_store.py
|
"""MongoDB UserStore implementation"""
try:
import cPickle as pickle
except ImportError:
import pickle
import pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from pymongo.binary import Binary
from pymongo.errors import OperationFailure
from velruse.store.interface import UserStore
from velruse.utils import cached_property
class MongoDBStore(UserStore):
"""MongoDB Storage for Auth Provider"""
def __init__(self, host='localhost', port=27017, db="mongo_db_name", collection='velruse_ustore'):
self.host = host
self.port = port
self.db = db
self.collection = collection
@classmethod
def load_from_config(cls, config):
"""Load the MongoDBStore based on the config"""
params = {}
for k, v in config.items():
key = k.lower()
if key not in ['host', 'port', 'db', 'collection']:
continue
params[key] = v
return cls(**params)
@cached_property #Fix this later -htormey
def _conn(self):
"""The MongoDB connection, cached for this call"""
try:
db_conn = Connection(self.host, self.port, slave_okay=False)
except ConnectionFailure:
raise Exception('Unable to connect to MongoDB')
conn = db_conn[self.db]
return conn
def retrieve(self, key):
data = self._conn[self.collection].find_one({'key' : key })
if data:
return pickle.loads(data['value'])
else:
return None
def store(self, key, value, expires=None):
try:
r = self._conn[self.collection].update({ 'key': key},
{ '$set' : { "value" : Binary(pickle.dumps(value)) }},
upsert=True, safe=True)
except OperationFailure:
return False
else:
return True
def delete(self, key):
try:
self._conn[self.collection].remove({'key' : key })
except OperationFailure:
return False
else:
return True
def purge_expired(self):
pass
|
Add in support for mongodb store. Add dependencies for pymongo/redis.
|
Add in support for mongodb store. Add dependencies for pymongo/redis.
|
Python
|
mit
|
miedzinski/velruse,bbangert/velruse,bbangert/velruse,ImaginationForPeople/velruse,miedzinski/velruse,ImaginationForPeople/velruse
|
Add in support for mongodb store. Add dependencies for pymongo/redis.
|
"""MongoDB UserStore implementation"""
try:
import cPickle as pickle
except ImportError:
import pickle
import pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from pymongo.binary import Binary
from pymongo.errors import OperationFailure
from velruse.store.interface import UserStore
from velruse.utils import cached_property
class MongoDBStore(UserStore):
"""MongoDB Storage for Auth Provider"""
def __init__(self, host='localhost', port=27017, db="mongo_db_name", collection='velruse_ustore'):
self.host = host
self.port = port
self.db = db
self.collection = collection
@classmethod
def load_from_config(cls, config):
"""Load the MongoDBStore based on the config"""
params = {}
for k, v in config.items():
key = k.lower()
if key not in ['host', 'port', 'db', 'collection']:
continue
params[key] = v
return cls(**params)
@cached_property #Fix this later -htormey
def _conn(self):
"""The MongoDB connection, cached for this call"""
try:
db_conn = Connection(self.host, self.port, slave_okay=False)
except ConnectionFailure:
raise Exception('Unable to connect to MongoDB')
conn = db_conn[self.db]
return conn
def retrieve(self, key):
data = self._conn[self.collection].find_one({'key' : key })
if data:
return pickle.loads(data['value'])
else:
return None
def store(self, key, value, expires=None):
try:
r = self._conn[self.collection].update({ 'key': key},
{ '$set' : { "value" : Binary(pickle.dumps(value)) }},
upsert=True, safe=True)
except OperationFailure:
return False
else:
return True
def delete(self, key):
try:
self._conn[self.collection].remove({'key' : key })
except OperationFailure:
return False
else:
return True
def purge_expired(self):
pass
|
<commit_before><commit_msg>Add in support for mongodb store. Add dependencies for pymongo/redis.<commit_after>
|
"""MongoDB UserStore implementation"""
try:
import cPickle as pickle
except ImportError:
import pickle
import pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from pymongo.binary import Binary
from pymongo.errors import OperationFailure
from velruse.store.interface import UserStore
from velruse.utils import cached_property
class MongoDBStore(UserStore):
"""MongoDB Storage for Auth Provider"""
def __init__(self, host='localhost', port=27017, db="mongo_db_name", collection='velruse_ustore'):
self.host = host
self.port = port
self.db = db
self.collection = collection
@classmethod
def load_from_config(cls, config):
"""Load the MongoDBStore based on the config"""
params = {}
for k, v in config.items():
key = k.lower()
if key not in ['host', 'port', 'db', 'collection']:
continue
params[key] = v
return cls(**params)
@cached_property #Fix this later -htormey
def _conn(self):
"""The MongoDB connection, cached for this call"""
try:
db_conn = Connection(self.host, self.port, slave_okay=False)
except ConnectionFailure:
raise Exception('Unable to connect to MongoDB')
conn = db_conn[self.db]
return conn
def retrieve(self, key):
data = self._conn[self.collection].find_one({'key' : key })
if data:
return pickle.loads(data['value'])
else:
return None
def store(self, key, value, expires=None):
try:
r = self._conn[self.collection].update({ 'key': key},
{ '$set' : { "value" : Binary(pickle.dumps(value)) }},
upsert=True, safe=True)
except OperationFailure:
return False
else:
return True
def delete(self, key):
try:
self._conn[self.collection].remove({'key' : key })
except OperationFailure:
return False
else:
return True
def purge_expired(self):
pass
|
Add in support for mongodb store. Add dependencies for pymongo/redis."""MongoDB UserStore implementation"""
try:
import cPickle as pickle
except ImportError:
import pickle
import pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from pymongo.binary import Binary
from pymongo.errors import OperationFailure
from velruse.store.interface import UserStore
from velruse.utils import cached_property
class MongoDBStore(UserStore):
"""MongoDB Storage for Auth Provider"""
def __init__(self, host='localhost', port=27017, db="mongo_db_name", collection='velruse_ustore'):
self.host = host
self.port = port
self.db = db
self.collection = collection
@classmethod
def load_from_config(cls, config):
"""Load the MongoDBStore based on the config"""
params = {}
for k, v in config.items():
key = k.lower()
if key not in ['host', 'port', 'db', 'collection']:
continue
params[key] = v
return cls(**params)
@cached_property #Fix this later -htormey
def _conn(self):
"""The MongoDB connection, cached for this call"""
try:
db_conn = Connection(self.host, self.port, slave_okay=False)
except ConnectionFailure:
raise Exception('Unable to connect to MongoDB')
conn = db_conn[self.db]
return conn
def retrieve(self, key):
data = self._conn[self.collection].find_one({'key' : key })
if data:
return pickle.loads(data['value'])
else:
return None
def store(self, key, value, expires=None):
try:
r = self._conn[self.collection].update({ 'key': key},
{ '$set' : { "value" : Binary(pickle.dumps(value)) }},
upsert=True, safe=True)
except OperationFailure:
return False
else:
return True
def delete(self, key):
try:
self._conn[self.collection].remove({'key' : key })
except OperationFailure:
return False
else:
return True
def purge_expired(self):
pass
|
<commit_before><commit_msg>Add in support for mongodb store. Add dependencies for pymongo/redis.<commit_after>"""MongoDB UserStore implementation"""
try:
import cPickle as pickle
except ImportError:
import pickle
import pymongo
from pymongo import Connection
from pymongo.errors import ConnectionFailure
from pymongo.binary import Binary
from pymongo.errors import OperationFailure
from velruse.store.interface import UserStore
from velruse.utils import cached_property
class MongoDBStore(UserStore):
"""MongoDB Storage for Auth Provider"""
def __init__(self, host='localhost', port=27017, db="mongo_db_name", collection='velruse_ustore'):
self.host = host
self.port = port
self.db = db
self.collection = collection
@classmethod
def load_from_config(cls, config):
"""Load the MongoDBStore based on the config"""
params = {}
for k, v in config.items():
key = k.lower()
if key not in ['host', 'port', 'db', 'collection']:
continue
params[key] = v
return cls(**params)
@cached_property #Fix this later -htormey
def _conn(self):
"""The MongoDB connection, cached for this call"""
try:
db_conn = Connection(self.host, self.port, slave_okay=False)
except ConnectionFailure:
raise Exception('Unable to connect to MongoDB')
conn = db_conn[self.db]
return conn
def retrieve(self, key):
data = self._conn[self.collection].find_one({'key' : key })
if data:
return pickle.loads(data['value'])
else:
return None
def store(self, key, value, expires=None):
try:
r = self._conn[self.collection].update({ 'key': key},
{ '$set' : { "value" : Binary(pickle.dumps(value)) }},
upsert=True, safe=True)
except OperationFailure:
return False
else:
return True
def delete(self, key):
try:
self._conn[self.collection].remove({'key' : key })
except OperationFailure:
return False
else:
return True
def purge_expired(self):
pass
|
|
70e4843ccb2dde8a1fa2b50dcfb10ae10c3c3d9a
|
utils/Layers.py
|
utils/Layers.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a collection of helpful utilitys
"""
import tensorflow as tf
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras import Model
class Conv1DTranspose(Model):
"""Implementation of a 1-dimentional transpose convolution layer.
This implementation is supose to emulate the interface of
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose,
so that it can be easily swapped out.
NOTE: When this was implemented 'tf.keras.layers.Conv1DTranspose' was
only available in the nightly build. Hence, this implementation using
2D Transpose convolution.
"""
def __init__(self, filters, kernel_size, strides, padding='same', **kwargs):
super(Conv1DTranspose, self).__init__()
self.conv_2d = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, **kwargs )
def call(self, x):
x = tf.expand_dims(x, axis=2)
x_up = self.conv_2d(x)
x_up = tf.squeeze(x_up, axis=2)
return x_up
|
Add 1D Transpose Convolution Implementation Official implementation of transpose 1d convolution is only available in nightly build at the moment
|
Add 1D Transpose Convolution Implementation
Official implementation of transpose 1d convolution is only
available in nightly build at the moment
|
Python
|
apache-2.0
|
googleinterns/audio_synthesis
|
Add 1D Transpose Convolution Implementation
Official implementation of transpose 1d convolution is only
available in nightly build at the moment
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a collection of helpful utilitys
"""
import tensorflow as tf
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras import Model
class Conv1DTranspose(Model):
"""Implementation of a 1-dimentional transpose convolution layer.
This implementation is supose to emulate the interface of
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose,
so that it can be easily swapped out.
NOTE: When this was implemented 'tf.keras.layers.Conv1DTranspose' was
only available in the nightly build. Hence, this implementation using
2D Transpose convolution.
"""
def __init__(self, filters, kernel_size, strides, padding='same', **kwargs):
super(Conv1DTranspose, self).__init__()
self.conv_2d = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, **kwargs )
def call(self, x):
x = tf.expand_dims(x, axis=2)
x_up = self.conv_2d(x)
x_up = tf.squeeze(x_up, axis=2)
return x_up
|
<commit_before><commit_msg>Add 1D Transpose Convolution Implementation
Official implementation of transpose 1d convolution is only
available in nightly build at the moment<commit_after>
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a collection of helpful utilitys
"""
import tensorflow as tf
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras import Model
class Conv1DTranspose(Model):
"""Implementation of a 1-dimentional transpose convolution layer.
This implementation is supose to emulate the interface of
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose,
so that it can be easily swapped out.
NOTE: When this was implemented 'tf.keras.layers.Conv1DTranspose' was
only available in the nightly build. Hence, this implementation using
2D Transpose convolution.
"""
def __init__(self, filters, kernel_size, strides, padding='same', **kwargs):
super(Conv1DTranspose, self).__init__()
self.conv_2d = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, **kwargs )
def call(self, x):
x = tf.expand_dims(x, axis=2)
x_up = self.conv_2d(x)
x_up = tf.squeeze(x_up, axis=2)
return x_up
|
Add 1D Transpose Convolution Implementation
Official implementation of transpose 1d convolution is only
available in nightly build at the moment# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a collection of helpful utilitys
"""
import tensorflow as tf
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras import Model
class Conv1DTranspose(Model):
"""Implementation of a 1-dimentional transpose convolution layer.
This implementation is supose to emulate the interface of
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose,
so that it can be easily swapped out.
NOTE: When this was implemented 'tf.keras.layers.Conv1DTranspose' was
only available in the nightly build. Hence, this implementation using
2D Transpose convolution.
"""
def __init__(self, filters, kernel_size, strides, padding='same', **kwargs):
super(Conv1DTranspose, self).__init__()
self.conv_2d = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, **kwargs )
def call(self, x):
x = tf.expand_dims(x, axis=2)
x_up = self.conv_2d(x)
x_up = tf.squeeze(x_up, axis=2)
return x_up
|
<commit_before><commit_msg>Add 1D Transpose Convolution Implementation
Official implementation of transpose 1d convolution is only
available in nightly build at the moment<commit_after># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a collection of helpful utilitys
"""
import tensorflow as tf
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras import Model
class Conv1DTranspose(Model):
"""Implementation of a 1-dimentional transpose convolution layer.
This implementation is supose to emulate the interface of
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1DTranspose,
so that it can be easily swapped out.
NOTE: When this was implemented 'tf.keras.layers.Conv1DTranspose' was
only available in the nightly build. Hence, this implementation using
2D Transpose convolution.
"""
def __init__(self, filters, kernel_size, strides, padding='same', **kwargs):
super(Conv1DTranspose, self).__init__()
self.conv_2d = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, **kwargs )
def call(self, x):
x = tf.expand_dims(x, axis=2)
x_up = self.conv_2d(x)
x_up = tf.squeeze(x_up, axis=2)
return x_up
|
|
ae2e437b5bdf5202d0fc38d8f0d2dbe41bd68b11
|
sleep_wake_heatmap.py
|
sleep_wake_heatmap.py
|
import plotly as py
import plotly.graph_objs as go
import datetime
from sys import argv
import names
from csvparser import parse
data_file = argv[1]
raw_data = parse(data_file)
h = 12
w = len(raw_data)
start_time = 3
grid = [[0] * w for i in range(0, h)]
#def datetime_to_coords(dt):
# x = 0 # temp
# y = round((dt.hour + dt.minute / 60.0 - start_time) / 24.0 * h)
#
# return (x, y)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
x1, y1 = datetime_to_coords(rest)
x2, y2 = datetime_to_coords(wake)
# while x1 != x2 or y1 != y2:
# grid[y1][x1] = 1
#
# if y1 + 1 == h:
# y1 = 0
# x1 += 1
# else:
# y1 += 1
|
Add heatmap with working ish pseudocode
|
Add heatmap with working ish pseudocode
|
Python
|
mit
|
f-jiang/sleep-pattern-grapher
|
Add heatmap with working ish pseudocode
|
import plotly as py
import plotly.graph_objs as go
import datetime
from sys import argv
import names
from csvparser import parse
data_file = argv[1]
raw_data = parse(data_file)
h = 12
w = len(raw_data)
start_time = 3
grid = [[0] * w for i in range(0, h)]
#def datetime_to_coords(dt):
# x = 0 # temp
# y = round((dt.hour + dt.minute / 60.0 - start_time) / 24.0 * h)
#
# return (x, y)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
x1, y1 = datetime_to_coords(rest)
x2, y2 = datetime_to_coords(wake)
# while x1 != x2 or y1 != y2:
# grid[y1][x1] = 1
#
# if y1 + 1 == h:
# y1 = 0
# x1 += 1
# else:
# y1 += 1
|
<commit_before><commit_msg>Add heatmap with working ish pseudocode<commit_after>
|
import plotly as py
import plotly.graph_objs as go
import datetime
from sys import argv
import names
from csvparser import parse
data_file = argv[1]
raw_data = parse(data_file)
h = 12
w = len(raw_data)
start_time = 3
grid = [[0] * w for i in range(0, h)]
#def datetime_to_coords(dt):
# x = 0 # temp
# y = round((dt.hour + dt.minute / 60.0 - start_time) / 24.0 * h)
#
# return (x, y)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
x1, y1 = datetime_to_coords(rest)
x2, y2 = datetime_to_coords(wake)
# while x1 != x2 or y1 != y2:
# grid[y1][x1] = 1
#
# if y1 + 1 == h:
# y1 = 0
# x1 += 1
# else:
# y1 += 1
|
Add heatmap with working ish pseudocodeimport plotly as py
import plotly.graph_objs as go
import datetime
from sys import argv
import names
from csvparser import parse
data_file = argv[1]
raw_data = parse(data_file)
h = 12
w = len(raw_data)
start_time = 3
grid = [[0] * w for i in range(0, h)]
#def datetime_to_coords(dt):
# x = 0 # temp
# y = round((dt.hour + dt.minute / 60.0 - start_time) / 24.0 * h)
#
# return (x, y)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
x1, y1 = datetime_to_coords(rest)
x2, y2 = datetime_to_coords(wake)
# while x1 != x2 or y1 != y2:
# grid[y1][x1] = 1
#
# if y1 + 1 == h:
# y1 = 0
# x1 += 1
# else:
# y1 += 1
|
<commit_before><commit_msg>Add heatmap with working ish pseudocode<commit_after>import plotly as py
import plotly.graph_objs as go
import datetime
from sys import argv
import names
from csvparser import parse
data_file = argv[1]
raw_data = parse(data_file)
h = 12
w = len(raw_data)
start_time = 3
grid = [[0] * w for i in range(0, h)]
#def datetime_to_coords(dt):
# x = 0 # temp
# y = round((dt.hour + dt.minute / 60.0 - start_time) / 24.0 * h)
#
# return (x, y)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
x1, y1 = datetime_to_coords(rest)
x2, y2 = datetime_to_coords(wake)
# while x1 != x2 or y1 != y2:
# grid[y1][x1] = 1
#
# if y1 + 1 == h:
# y1 = 0
# x1 += 1
# else:
# y1 += 1
|
|
da372725ea236b971829e4a07e4c18d7d094152c
|
api_tests/logs/views/test_log_embeds.py
|
api_tests/logs/views/test_log_embeds.py
|
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestLogEmbeds(ApiTestCase):
def setUp(self):
super(TestLogEmbeds, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user, is_public=True)
self.first_reg_log = list(self.registration.logs)[0]
def test_embed_original_node(self):
registration_log_url = '/{}logs/{}/?embed=original_node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['original_node']
assert_equal(embeds['data']['id'], self.project._id)
def test_embed_node(self):
registration_log_url = '/{}logs/{}/?embed=node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['node']
assert_equal(embeds['data']['id'], self.registration._id)
def test_embed_user(self):
registration_log_url = '/{}logs/{}/?embed=user'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['user']
assert_equal(embeds['data']['id'], self.user._id)
def test_embed_attributes_not_relationships(self):
url = '/{}logs/{}/?embed=action'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: action")
|
Add tests for testing log embeds.
|
Add tests for testing log embeds.
|
Python
|
apache-2.0
|
alexschiller/osf.io,samchrisinger/osf.io,mattclark/osf.io,felliott/osf.io,saradbowman/osf.io,kwierman/osf.io,emetsger/osf.io,cslzchen/osf.io,leb2dg/osf.io,caneruguz/osf.io,caneruguz/osf.io,rdhyee/osf.io,zachjanicki/osf.io,saradbowman/osf.io,adlius/osf.io,doublebits/osf.io,SSJohns/osf.io,cwisecarver/osf.io,DanielSBrown/osf.io,chrisseto/osf.io,monikagrabowska/osf.io,mluo613/osf.io,wearpants/osf.io,kch8qx/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,RomanZWang/osf.io,SSJohns/osf.io,emetsger/osf.io,DanielSBrown/osf.io,abought/osf.io,crcresearch/osf.io,emetsger/osf.io,doublebits/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,jnayak1/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,mluke93/osf.io,HalcyonChimera/osf.io,mluke93/osf.io,mluke93/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,icereval/osf.io,wearpants/osf.io,abought/osf.io,rdhyee/osf.io,doublebits/osf.io,wearpants/osf.io,baylee-d/osf.io,leb2dg/osf.io,monikagrabowska/osf.io,asanfilippo7/osf.io,zachjanicki/osf.io,alexschiller/osf.io,SSJohns/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,mluo613/osf.io,amyshi188/osf.io,alexschiller/osf.io,kch8qx/osf.io,mfraezz/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,hmoco/osf.io,chrisseto/osf.io,sloria/osf.io,mluke93/osf.io,laurenrevere/osf.io,adlius/osf.io,kch8qx/osf.io,hmoco/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,RomanZWang/osf.io,caseyrollins/osf.io,asanfilippo7/osf.io,acshi/osf.io,jnayak1/osf.io,felliott/osf.io,chrisseto/osf.io,rdhyee/osf.io,kwierman/osf.io,chennan47/osf.io,RomanZWang/osf.io,zamattiac/osf.io,binoculars/osf.io,DanielSBrown/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,doublebits/osf.io,wearpants/osf.io,TomHeatwole/osf.io,crcresearch/osf.io,TomBaxter/osf.io,kwierman/osf.io,zamattiac/osf.io,icereval/osf.io,mfraezz/osf.io,kch8qx/osf.io,kch8qx/osf.io,samchrisinger/osf.io,chennan47/osf.io,cslzchen/osf.io,mluo613/osf.io,felliott/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,zachjanicki/osf.io,cwisecarver/osf.io,baylee-d/osf.io,erinspace/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,acshi/osf.io,abought/osf.io,felliott/osf.io,jnayak1/osf.io,hmoco/osf.io,chennan47/osf.io,caneruguz/osf.io,caseyrollins/osf.io,emetsger/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,aaxelb/osf.io,pattisdr/osf.io,hmoco/osf.io,TomHeatwole/osf.io,binoculars/osf.io,TomBaxter/osf.io,mluo613/osf.io,monikagrabowska/osf.io,Johnetordoff/osf.io,amyshi188/osf.io,mattclark/osf.io,acshi/osf.io,cslzchen/osf.io,acshi/osf.io,asanfilippo7/osf.io,sloria/osf.io,leb2dg/osf.io,pattisdr/osf.io,doublebits/osf.io,caseyrollins/osf.io,acshi/osf.io,Nesiehr/osf.io,alexschiller/osf.io,samchrisinger/osf.io,mfraezz/osf.io,adlius/osf.io,aaxelb/osf.io,alexschiller/osf.io,mattclark/osf.io,zamattiac/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,binoculars/osf.io,samchrisinger/osf.io,amyshi188/osf.io,sloria/osf.io,aaxelb/osf.io,mluo613/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,amyshi188/osf.io,baylee-d/osf.io,kwierman/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,DanielSBrown/osf.io,cslzchen/osf.io,abought/osf.io,mfraezz/osf.io,jnayak1/osf.io,adlius/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,pattisdr/osf.io,aaxelb/osf.io,icereval/osf.io,Nesiehr/osf.io,crcresearch/osf.io,Nesiehr/osf.io
|
Add tests for testing log embeds.
|
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestLogEmbeds(ApiTestCase):
def setUp(self):
super(TestLogEmbeds, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user, is_public=True)
self.first_reg_log = list(self.registration.logs)[0]
def test_embed_original_node(self):
registration_log_url = '/{}logs/{}/?embed=original_node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['original_node']
assert_equal(embeds['data']['id'], self.project._id)
def test_embed_node(self):
registration_log_url = '/{}logs/{}/?embed=node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['node']
assert_equal(embeds['data']['id'], self.registration._id)
def test_embed_user(self):
registration_log_url = '/{}logs/{}/?embed=user'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['user']
assert_equal(embeds['data']['id'], self.user._id)
def test_embed_attributes_not_relationships(self):
url = '/{}logs/{}/?embed=action'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: action")
|
<commit_before><commit_msg>Add tests for testing log embeds.<commit_after>
|
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestLogEmbeds(ApiTestCase):
def setUp(self):
super(TestLogEmbeds, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user, is_public=True)
self.first_reg_log = list(self.registration.logs)[0]
def test_embed_original_node(self):
registration_log_url = '/{}logs/{}/?embed=original_node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['original_node']
assert_equal(embeds['data']['id'], self.project._id)
def test_embed_node(self):
registration_log_url = '/{}logs/{}/?embed=node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['node']
assert_equal(embeds['data']['id'], self.registration._id)
def test_embed_user(self):
registration_log_url = '/{}logs/{}/?embed=user'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['user']
assert_equal(embeds['data']['id'], self.user._id)
def test_embed_attributes_not_relationships(self):
url = '/{}logs/{}/?embed=action'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: action")
|
Add tests for testing log embeds.from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestLogEmbeds(ApiTestCase):
def setUp(self):
super(TestLogEmbeds, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user, is_public=True)
self.first_reg_log = list(self.registration.logs)[0]
def test_embed_original_node(self):
registration_log_url = '/{}logs/{}/?embed=original_node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['original_node']
assert_equal(embeds['data']['id'], self.project._id)
def test_embed_node(self):
registration_log_url = '/{}logs/{}/?embed=node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['node']
assert_equal(embeds['data']['id'], self.registration._id)
def test_embed_user(self):
registration_log_url = '/{}logs/{}/?embed=user'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['user']
assert_equal(embeds['data']['id'], self.user._id)
def test_embed_attributes_not_relationships(self):
url = '/{}logs/{}/?embed=action'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: action")
|
<commit_before><commit_msg>Add tests for testing log embeds.<commit_after>from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestLogEmbeds(ApiTestCase):
def setUp(self):
super(TestLogEmbeds, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, creator=self.user, is_public=True)
self.first_reg_log = list(self.registration.logs)[0]
def test_embed_original_node(self):
registration_log_url = '/{}logs/{}/?embed=original_node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['original_node']
assert_equal(embeds['data']['id'], self.project._id)
def test_embed_node(self):
registration_log_url = '/{}logs/{}/?embed=node'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['node']
assert_equal(embeds['data']['id'], self.registration._id)
def test_embed_user(self):
registration_log_url = '/{}logs/{}/?embed=user'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(registration_log_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['action'], 'project_created')
embeds = res.json['data']['embeds']['user']
assert_equal(embeds['data']['id'], self.user._id)
def test_embed_attributes_not_relationships(self):
url = '/{}logs/{}/?embed=action'.format(API_BASE, self.first_reg_log._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: action")
|
|
3ec610d7f24aec622e13284088622c7f98b454b1
|
euler025.py
|
euler025.py
|
#!/usr/bin/python
from math import ceil, log10
PHI = 1.6180339887
x = 0
i = 0
while x < 1000:
i += 1
x = ceil(i * log10(PHI) - log10(5) / 2)
print(i, x)
print(i)
|
Add solution for problem 25
|
Add solution for problem 25
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 25
|
#!/usr/bin/python
from math import ceil, log10
PHI = 1.6180339887
x = 0
i = 0
while x < 1000:
i += 1
x = ceil(i * log10(PHI) - log10(5) / 2)
print(i, x)
print(i)
|
<commit_before><commit_msg>Add solution for problem 25<commit_after>
|
#!/usr/bin/python
from math import ceil, log10
PHI = 1.6180339887
x = 0
i = 0
while x < 1000:
i += 1
x = ceil(i * log10(PHI) - log10(5) / 2)
print(i, x)
print(i)
|
Add solution for problem 25#!/usr/bin/python
from math import ceil, log10
PHI = 1.6180339887
x = 0
i = 0
while x < 1000:
i += 1
x = ceil(i * log10(PHI) - log10(5) / 2)
print(i, x)
print(i)
|
<commit_before><commit_msg>Add solution for problem 25<commit_after>#!/usr/bin/python
from math import ceil, log10
PHI = 1.6180339887
x = 0
i = 0
while x < 1000:
i += 1
x = ceil(i * log10(PHI) - log10(5) / 2)
print(i, x)
print(i)
|
|
7ee1168509ecdef6ba6cc81faeb3b6c9674ddc67
|
tests/test_S3.py
|
tests/test_S3.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keyring.tests.test_backend import BackendBasicTests
from keyring.tests.py30compat import unittest
from s3keyring import S3
@unittest.skipUnless(S3.supported(),
"You need to configure the AWS credentials")
class S3PlaintextKeychainTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return S3.S3Keyring()
|
Add tests for the backend
|
Add tests for the backend
|
Python
|
mit
|
InnovativeTravel/s3-keyring
|
Add tests for the backend
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keyring.tests.test_backend import BackendBasicTests
from keyring.tests.py30compat import unittest
from s3keyring import S3
@unittest.skipUnless(S3.supported(),
"You need to configure the AWS credentials")
class S3PlaintextKeychainTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return S3.S3Keyring()
|
<commit_before><commit_msg>Add tests for the backend<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keyring.tests.test_backend import BackendBasicTests
from keyring.tests.py30compat import unittest
from s3keyring import S3
@unittest.skipUnless(S3.supported(),
"You need to configure the AWS credentials")
class S3PlaintextKeychainTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return S3.S3Keyring()
|
Add tests for the backend#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keyring.tests.test_backend import BackendBasicTests
from keyring.tests.py30compat import unittest
from s3keyring import S3
@unittest.skipUnless(S3.supported(),
"You need to configure the AWS credentials")
class S3PlaintextKeychainTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return S3.S3Keyring()
|
<commit_before><commit_msg>Add tests for the backend<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keyring.tests.test_backend import BackendBasicTests
from keyring.tests.py30compat import unittest
from s3keyring import S3
@unittest.skipUnless(S3.supported(),
"You need to configure the AWS credentials")
class S3PlaintextKeychainTestCase(BackendBasicTests, unittest.TestCase):
def init_keyring(self):
return S3.S3Keyring()
|
|
7027680e509055503c86544193d9ff46530be3d3
|
indra/databases/relevance_client.py
|
indra/databases/relevance_client.py
|
import logging
from indra.databases import ndex_client
logging = logging.getLogger('relevance')
ndex_relevance = 'http://general.bigmech.ndexbio.org:8080'
def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
kernel_id = res.get('kernel_id')
if kernel_id is None:
logging.error('Could not get heat kernel for network.')
return None
return kernel_id
def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logging.error('Could not get ranked entities.')
return None
return ranked_entities
|
Add relevance client based on NDEx service
|
Add relevance client based on NDEx service
|
Python
|
bsd-2-clause
|
jmuhlich/indra,pvtodorov/indra,bgyori/indra,pvtodorov/indra,bgyori/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,johnbachman/indra,jmuhlich/indra,sorgerlab/belpy,pvtodorov/indra,jmuhlich/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,pvtodorov/indra,bgyori/indra,johnbachman/belpy
|
Add relevance client based on NDEx service
|
import logging
from indra.databases import ndex_client
logging = logging.getLogger('relevance')
ndex_relevance = 'http://general.bigmech.ndexbio.org:8080'
def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
kernel_id = res.get('kernel_id')
if kernel_id is None:
logging.error('Could not get heat kernel for network.')
return None
return kernel_id
def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logging.error('Could not get ranked entities.')
return None
return ranked_entities
|
<commit_before><commit_msg>Add relevance client based on NDEx service<commit_after>
|
import logging
from indra.databases import ndex_client
logging = logging.getLogger('relevance')
ndex_relevance = 'http://general.bigmech.ndexbio.org:8080'
def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
kernel_id = res.get('kernel_id')
if kernel_id is None:
logging.error('Could not get heat kernel for network.')
return None
return kernel_id
def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logging.error('Could not get ranked entities.')
return None
return ranked_entities
|
Add relevance client based on NDEx serviceimport logging
from indra.databases import ndex_client
logging = logging.getLogger('relevance')
ndex_relevance = 'http://general.bigmech.ndexbio.org:8080'
def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
kernel_id = res.get('kernel_id')
if kernel_id is None:
logging.error('Could not get heat kernel for network.')
return None
return kernel_id
def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logging.error('Could not get ranked entities.')
return None
return ranked_entities
|
<commit_before><commit_msg>Add relevance client based on NDEx service<commit_after>import logging
from indra.databases import ndex_client
logging = logging.getLogger('relevance')
ndex_relevance = 'http://general.bigmech.ndexbio.org:8080'
def get_heat_kernel(network_id):
"""Return the identifier of a heat kernel calculated for a given network.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
Returns
-------
kernel_id : str
The identifier of the heat kernel calculated for the given network.
"""
url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id
res = ndex_client.send_request(url, {}, is_json=True, use_get=True)
kernel_id = res.get('kernel_id')
if kernel_id is None:
logging.error('Could not get heat kernel for network.')
return None
return kernel_id
def get_relevant_nodes(network_id, query_nodes):
"""Return a set of network nodes relevant to a given query set.
A heat diffusion algorithm is used on a pre-computed heat kernel for the
given network which starts from the given query nodes. The nodes
in the network are ranked according to heat score which is a measure
of relevance with respect to the query nodes.
Parameters
----------
network_id : str
The UUID of the network in NDEx.
query_nodes : list[str]
A list of node names with respect to which relevance is queried.
Returns
-------
ranked_entities : list[(str, float)]
A list containing pairs of node names and their relevance scores.
"""
url = ndex_relevance + '/rank_entities'
kernel_id = get_heat_kernel(network_id)
if isinstance(query_nodes, basestring):
query_nodes = [query_nodes]
params = {'identifier_set': query_nodes,
'kernel_id': kernel_id}
res = ndex_client.send_request(url, params, is_json=True)
ranked_entities = res.get('ranked_entities')
if ranked_entities is None:
logging.error('Could not get ranked entities.')
return None
return ranked_entities
|
|
a0fc417f9d5abbf19ff516590b6c5cdff849cb1e
|
get_pose.py
|
get_pose.py
|
import csv
def get_backup_pose(filename):
p = []
r = csv.DictReader(open(filename,'r'),delimiter = ';')
for row in r:
p.append(row)
a = {}
a['CSVPath'] = p[0]["CSVPath"]
a['cursor'] = int(p[0]["cursor"])
return a
def set_backup_pose(filename,dict):
f = open(filename,'w')
r = csv.DictWriter(f,delimiter = ';',lineterminator = '\n',fieldnames=["CSVPath","cursor"])
r.writeheader()
r.writerow(dict)
f.close()
if __name__ == '__main__':
a = {"CSVPath":"toto","cursor":5}
set_backup_pose("test.csv",a)
b = get_backup_pose("test.csv")
print b
|
Test file for backup pose
|
Test file for backup pose
|
Python
|
mit
|
ThotAlion/FIRE,ThotAlion/FIRE,ThotAlion/FIRE
|
Test file for backup pose
|
import csv
def get_backup_pose(filename):
p = []
r = csv.DictReader(open(filename,'r'),delimiter = ';')
for row in r:
p.append(row)
a = {}
a['CSVPath'] = p[0]["CSVPath"]
a['cursor'] = int(p[0]["cursor"])
return a
def set_backup_pose(filename,dict):
f = open(filename,'w')
r = csv.DictWriter(f,delimiter = ';',lineterminator = '\n',fieldnames=["CSVPath","cursor"])
r.writeheader()
r.writerow(dict)
f.close()
if __name__ == '__main__':
a = {"CSVPath":"toto","cursor":5}
set_backup_pose("test.csv",a)
b = get_backup_pose("test.csv")
print b
|
<commit_before><commit_msg>Test file for backup pose<commit_after>
|
import csv
def get_backup_pose(filename):
p = []
r = csv.DictReader(open(filename,'r'),delimiter = ';')
for row in r:
p.append(row)
a = {}
a['CSVPath'] = p[0]["CSVPath"]
a['cursor'] = int(p[0]["cursor"])
return a
def set_backup_pose(filename,dict):
f = open(filename,'w')
r = csv.DictWriter(f,delimiter = ';',lineterminator = '\n',fieldnames=["CSVPath","cursor"])
r.writeheader()
r.writerow(dict)
f.close()
if __name__ == '__main__':
a = {"CSVPath":"toto","cursor":5}
set_backup_pose("test.csv",a)
b = get_backup_pose("test.csv")
print b
|
Test file for backup poseimport csv
def get_backup_pose(filename):
p = []
r = csv.DictReader(open(filename,'r'),delimiter = ';')
for row in r:
p.append(row)
a = {}
a['CSVPath'] = p[0]["CSVPath"]
a['cursor'] = int(p[0]["cursor"])
return a
def set_backup_pose(filename,dict):
f = open(filename,'w')
r = csv.DictWriter(f,delimiter = ';',lineterminator = '\n',fieldnames=["CSVPath","cursor"])
r.writeheader()
r.writerow(dict)
f.close()
if __name__ == '__main__':
a = {"CSVPath":"toto","cursor":5}
set_backup_pose("test.csv",a)
b = get_backup_pose("test.csv")
print b
|
<commit_before><commit_msg>Test file for backup pose<commit_after>import csv
def get_backup_pose(filename):
p = []
r = csv.DictReader(open(filename,'r'),delimiter = ';')
for row in r:
p.append(row)
a = {}
a['CSVPath'] = p[0]["CSVPath"]
a['cursor'] = int(p[0]["cursor"])
return a
def set_backup_pose(filename,dict):
f = open(filename,'w')
r = csv.DictWriter(f,delimiter = ';',lineterminator = '\n',fieldnames=["CSVPath","cursor"])
r.writeheader()
r.writerow(dict)
f.close()
if __name__ == '__main__':
a = {"CSVPath":"toto","cursor":5}
set_backup_pose("test.csv",a)
b = get_backup_pose("test.csv")
print b
|
|
e7e4aaa2788edcb01f3e681bc557d4c7f5ae26e1
|
calm-ingest/validate_data.py
|
calm-ingest/validate_data.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Checks for the validity of Calm data.
This script runs a series of checks over the Calm data, looking for problems
that should be corrected in the source data.
"""
import json
from utils import read_records
WARNINGS = {}
def check_record(records):
try:
for record in records:
_check_exactly_one_field(record, 'AltRefNo')
_check_exactly_one_field(record, 'RefNo')
except KeyboardInterrupt:
pass
json.dump(WARNINGS, open('calm_warnings.json', 'w'), indent=2)
print('\nWritten warnings to calm_warnings.json')
def _warn(record, message, context_data=None):
"""Drop a message about an inconsistent record.
:param record: A record returned by read_records()
:param message: A string explaining the problem
:param context_data: Any record-specific contextual data
"""
record_id = record.find('RecordID').text
if message not in WARNINGS:
if context_data is None:
WARNINGS[message] = []
else:
WARNINGS[message] = {}
if context_data is None:
WARNINGS[message].append(record_id)
else:
WARNINGS[message][record_id] = context_data
def _check_exactly_one_field(record, field):
"""Checks that a record has exactly one, non-empty instance of
a given field.
:param record: A record returned by read_records()
:param field: Name of the Calm field to inspect
"""
fields = record.getchildren()
field_names = [c.tag for c in fields]
if fields.count(field) == 0:
_warn(record, 'Missing %r field' % field)
elif fields.count(field) != 1:
_warn(record, 'Too many instances of %r field' % field,
[f.text for f in fields if f.tag == field])
elif fields.find(field).text is None:
_warn(record, 'Empty %r field' % field)
if __name__ == '__main__':
import sys
check_record(read_records(sys.argv[1]))
|
Add a rough script for checking validity of Calm data
|
Add a rough script for checking validity of Calm data
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a rough script for checking validity of Calm data
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Checks for the validity of Calm data.
This script runs a series of checks over the Calm data, looking for problems
that should be corrected in the source data.
"""
import json
from utils import read_records
WARNINGS = {}
def check_record(records):
try:
for record in records:
_check_exactly_one_field(record, 'AltRefNo')
_check_exactly_one_field(record, 'RefNo')
except KeyboardInterrupt:
pass
json.dump(WARNINGS, open('calm_warnings.json', 'w'), indent=2)
print('\nWritten warnings to calm_warnings.json')
def _warn(record, message, context_data=None):
"""Drop a message about an inconsistent record.
:param record: A record returned by read_records()
:param message: A string explaining the problem
:param context_data: Any record-specific contextual data
"""
record_id = record.find('RecordID').text
if message not in WARNINGS:
if context_data is None:
WARNINGS[message] = []
else:
WARNINGS[message] = {}
if context_data is None:
WARNINGS[message].append(record_id)
else:
WARNINGS[message][record_id] = context_data
def _check_exactly_one_field(record, field):
"""Checks that a record has exactly one, non-empty instance of
a given field.
:param record: A record returned by read_records()
:param field: Name of the Calm field to inspect
"""
fields = record.getchildren()
field_names = [c.tag for c in fields]
if fields.count(field) == 0:
_warn(record, 'Missing %r field' % field)
elif fields.count(field) != 1:
_warn(record, 'Too many instances of %r field' % field,
[f.text for f in fields if f.tag == field])
elif fields.find(field).text is None:
_warn(record, 'Empty %r field' % field)
if __name__ == '__main__':
import sys
check_record(read_records(sys.argv[1]))
|
<commit_before><commit_msg>Add a rough script for checking validity of Calm data<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Checks for the validity of Calm data.
This script runs a series of checks over the Calm data, looking for problems
that should be corrected in the source data.
"""
import json
from utils import read_records
WARNINGS = {}
def check_record(records):
try:
for record in records:
_check_exactly_one_field(record, 'AltRefNo')
_check_exactly_one_field(record, 'RefNo')
except KeyboardInterrupt:
pass
json.dump(WARNINGS, open('calm_warnings.json', 'w'), indent=2)
print('\nWritten warnings to calm_warnings.json')
def _warn(record, message, context_data=None):
"""Drop a message about an inconsistent record.
:param record: A record returned by read_records()
:param message: A string explaining the problem
:param context_data: Any record-specific contextual data
"""
record_id = record.find('RecordID').text
if message not in WARNINGS:
if context_data is None:
WARNINGS[message] = []
else:
WARNINGS[message] = {}
if context_data is None:
WARNINGS[message].append(record_id)
else:
WARNINGS[message][record_id] = context_data
def _check_exactly_one_field(record, field):
"""Checks that a record has exactly one, non-empty instance of
a given field.
:param record: A record returned by read_records()
:param field: Name of the Calm field to inspect
"""
fields = record.getchildren()
field_names = [c.tag for c in fields]
if fields.count(field) == 0:
_warn(record, 'Missing %r field' % field)
elif fields.count(field) != 1:
_warn(record, 'Too many instances of %r field' % field,
[f.text for f in fields if f.tag == field])
elif fields.find(field).text is None:
_warn(record, 'Empty %r field' % field)
if __name__ == '__main__':
import sys
check_record(read_records(sys.argv[1]))
|
Add a rough script for checking validity of Calm data#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Checks for the validity of Calm data.
This script runs a series of checks over the Calm data, looking for problems
that should be corrected in the source data.
"""
import json
from utils import read_records
WARNINGS = {}
def check_record(records):
try:
for record in records:
_check_exactly_one_field(record, 'AltRefNo')
_check_exactly_one_field(record, 'RefNo')
except KeyboardInterrupt:
pass
json.dump(WARNINGS, open('calm_warnings.json', 'w'), indent=2)
print('\nWritten warnings to calm_warnings.json')
def _warn(record, message, context_data=None):
"""Drop a message about an inconsistent record.
:param record: A record returned by read_records()
:param message: A string explaining the problem
:param context_data: Any record-specific contextual data
"""
record_id = record.find('RecordID').text
if message not in WARNINGS:
if context_data is None:
WARNINGS[message] = []
else:
WARNINGS[message] = {}
if context_data is None:
WARNINGS[message].append(record_id)
else:
WARNINGS[message][record_id] = context_data
def _check_exactly_one_field(record, field):
"""Checks that a record has exactly one, non-empty instance of
a given field.
:param record: A record returned by read_records()
:param field: Name of the Calm field to inspect
"""
fields = record.getchildren()
field_names = [c.tag for c in fields]
if fields.count(field) == 0:
_warn(record, 'Missing %r field' % field)
elif fields.count(field) != 1:
_warn(record, 'Too many instances of %r field' % field,
[f.text for f in fields if f.tag == field])
elif fields.find(field).text is None:
_warn(record, 'Empty %r field' % field)
if __name__ == '__main__':
import sys
check_record(read_records(sys.argv[1]))
|
<commit_before><commit_msg>Add a rough script for checking validity of Calm data<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Checks for the validity of Calm data.
This script runs a series of checks over the Calm data, looking for problems
that should be corrected in the source data.
"""
import json
from utils import read_records
WARNINGS = {}
def check_record(records):
try:
for record in records:
_check_exactly_one_field(record, 'AltRefNo')
_check_exactly_one_field(record, 'RefNo')
except KeyboardInterrupt:
pass
json.dump(WARNINGS, open('calm_warnings.json', 'w'), indent=2)
print('\nWritten warnings to calm_warnings.json')
def _warn(record, message, context_data=None):
"""Drop a message about an inconsistent record.
:param record: A record returned by read_records()
:param message: A string explaining the problem
:param context_data: Any record-specific contextual data
"""
record_id = record.find('RecordID').text
if message not in WARNINGS:
if context_data is None:
WARNINGS[message] = []
else:
WARNINGS[message] = {}
if context_data is None:
WARNINGS[message].append(record_id)
else:
WARNINGS[message][record_id] = context_data
def _check_exactly_one_field(record, field):
"""Checks that a record has exactly one, non-empty instance of
a given field.
:param record: A record returned by read_records()
:param field: Name of the Calm field to inspect
"""
fields = record.getchildren()
field_names = [c.tag for c in fields]
if fields.count(field) == 0:
_warn(record, 'Missing %r field' % field)
elif fields.count(field) != 1:
_warn(record, 'Too many instances of %r field' % field,
[f.text for f in fields if f.tag == field])
elif fields.find(field).text is None:
_warn(record, 'Empty %r field' % field)
if __name__ == '__main__':
import sys
check_record(read_records(sys.argv[1]))
|
|
4886f21febbc6385e6aafb990163e69e31a5b99e
|
scripts/generate-guidebook-sponsor-logs.py
|
scripts/generate-guidebook-sponsor-logs.py
|
#!/usr/bin/env python3
import shlex
import subprocess
import sys
import zipfile
from os import makedirs
import frontmatter
import pandas as pd
# read in the custom list template
df_sponsors = pd.read_csv('Guidebook_CL_Template.csv')
rows = []
# create the directory that will store the resized sponsor logs
outdir = 'guidebook-sponsor-logos'
makedirs(outdir, exist_ok=True)
# create the zip file that will contain the resized logs
thumbnail_zipfh = zipfile.ZipFile('sponsor-thumbs.zip', 'w')
# read in the frontmatter from the sponsors page so that we can get
# the logos by sponsor level
content = frontmatter.load('../_pages/sponsors/overview.md')
for level in ['platinum', 'gold', 'silver', 'bronze', 'supporter']:
print(level)
logo_info = [(obj['title'], obj['image_path']) for obj in content[level] if 'title' in obj]
for title, image_name in logo_info:
print(' {}'.format(image_name))
# resize the image to 240x240 since that's what guidebook requires
try:
cmd = 'convert ../images/{} -resize 240x240 {}/{}'.format(image_name, outdir, image_name)
subprocess.check_call(shlex.split(cmd))
except subprocess.CalledProcessError:
print("Something went wrong when resizing {}".format(image_name))
sys.exit(1)
# add the resized image to the zip file but don't store the full path
thumbnail_zipfh.write('{}/{}'.format(outdir, image_name), arcname=image_name)
# now create the row for the custom list
d = {'Name': title,
"Sub-Title (i.e. Location, Table/Booth, or Title/Sponsorship Level)": '{}'.format(level.title()),
'Description (Optional)': '',
'Location/Room': '',
'Image (Optional)': '',
'Thumbnail (Optional)': image_name}
rows.append(d)
# close the zip file
thumbnail_zipfh.close()
# write the custom list rows out to a CSV file
df_sponsors = df_sponsors.append(rows)
df_sponsors.to_csv('acl2017-sponsors.csv', index=False)
|
Add script to generate sponsor info for guidebook.
|
Add script to generate sponsor info for guidebook.
|
Python
|
mit
|
acl2017/acl2017.github.io,acl2017/acl2017.github.io,acl2017/acl2017.github.io,acl2017/acl2017.github.io
|
Add script to generate sponsor info for guidebook.
|
#!/usr/bin/env python3
import shlex
import subprocess
import sys
import zipfile
from os import makedirs
import frontmatter
import pandas as pd
# read in the custom list template
df_sponsors = pd.read_csv('Guidebook_CL_Template.csv')
rows = []
# create the directory that will store the resized sponsor logs
outdir = 'guidebook-sponsor-logos'
makedirs(outdir, exist_ok=True)
# create the zip file that will contain the resized logs
thumbnail_zipfh = zipfile.ZipFile('sponsor-thumbs.zip', 'w')
# read in the frontmatter from the sponsors page so that we can get
# the logos by sponsor level
content = frontmatter.load('../_pages/sponsors/overview.md')
for level in ['platinum', 'gold', 'silver', 'bronze', 'supporter']:
print(level)
logo_info = [(obj['title'], obj['image_path']) for obj in content[level] if 'title' in obj]
for title, image_name in logo_info:
print(' {}'.format(image_name))
# resize the image to 240x240 since that's what guidebook requires
try:
cmd = 'convert ../images/{} -resize 240x240 {}/{}'.format(image_name, outdir, image_name)
subprocess.check_call(shlex.split(cmd))
except subprocess.CalledProcessError:
print("Something went wrong when resizing {}".format(image_name))
sys.exit(1)
# add the resized image to the zip file but don't store the full path
thumbnail_zipfh.write('{}/{}'.format(outdir, image_name), arcname=image_name)
# now create the row for the custom list
d = {'Name': title,
"Sub-Title (i.e. Location, Table/Booth, or Title/Sponsorship Level)": '{}'.format(level.title()),
'Description (Optional)': '',
'Location/Room': '',
'Image (Optional)': '',
'Thumbnail (Optional)': image_name}
rows.append(d)
# close the zip file
thumbnail_zipfh.close()
# write the custom list rows out to a CSV file
df_sponsors = df_sponsors.append(rows)
df_sponsors.to_csv('acl2017-sponsors.csv', index=False)
|
<commit_before><commit_msg>Add script to generate sponsor info for guidebook.<commit_after>
|
#!/usr/bin/env python3
import shlex
import subprocess
import sys
import zipfile
from os import makedirs
import frontmatter
import pandas as pd
# read in the custom list template
df_sponsors = pd.read_csv('Guidebook_CL_Template.csv')
rows = []
# create the directory that will store the resized sponsor logs
outdir = 'guidebook-sponsor-logos'
makedirs(outdir, exist_ok=True)
# create the zip file that will contain the resized logs
thumbnail_zipfh = zipfile.ZipFile('sponsor-thumbs.zip', 'w')
# read in the frontmatter from the sponsors page so that we can get
# the logos by sponsor level
content = frontmatter.load('../_pages/sponsors/overview.md')
for level in ['platinum', 'gold', 'silver', 'bronze', 'supporter']:
print(level)
logo_info = [(obj['title'], obj['image_path']) for obj in content[level] if 'title' in obj]
for title, image_name in logo_info:
print(' {}'.format(image_name))
# resize the image to 240x240 since that's what guidebook requires
try:
cmd = 'convert ../images/{} -resize 240x240 {}/{}'.format(image_name, outdir, image_name)
subprocess.check_call(shlex.split(cmd))
except subprocess.CalledProcessError:
print("Something went wrong when resizing {}".format(image_name))
sys.exit(1)
# add the resized image to the zip file but don't store the full path
thumbnail_zipfh.write('{}/{}'.format(outdir, image_name), arcname=image_name)
# now create the row for the custom list
d = {'Name': title,
"Sub-Title (i.e. Location, Table/Booth, or Title/Sponsorship Level)": '{}'.format(level.title()),
'Description (Optional)': '',
'Location/Room': '',
'Image (Optional)': '',
'Thumbnail (Optional)': image_name}
rows.append(d)
# close the zip file
thumbnail_zipfh.close()
# write the custom list rows out to a CSV file
df_sponsors = df_sponsors.append(rows)
df_sponsors.to_csv('acl2017-sponsors.csv', index=False)
|
Add script to generate sponsor info for guidebook.#!/usr/bin/env python3
import shlex
import subprocess
import sys
import zipfile
from os import makedirs
import frontmatter
import pandas as pd
# read in the custom list template
df_sponsors = pd.read_csv('Guidebook_CL_Template.csv')
rows = []
# create the directory that will store the resized sponsor logs
outdir = 'guidebook-sponsor-logos'
makedirs(outdir, exist_ok=True)
# create the zip file that will contain the resized logs
thumbnail_zipfh = zipfile.ZipFile('sponsor-thumbs.zip', 'w')
# read in the frontmatter from the sponsors page so that we can get
# the logos by sponsor level
content = frontmatter.load('../_pages/sponsors/overview.md')
for level in ['platinum', 'gold', 'silver', 'bronze', 'supporter']:
print(level)
logo_info = [(obj['title'], obj['image_path']) for obj in content[level] if 'title' in obj]
for title, image_name in logo_info:
print(' {}'.format(image_name))
# resize the image to 240x240 since that's what guidebook requires
try:
cmd = 'convert ../images/{} -resize 240x240 {}/{}'.format(image_name, outdir, image_name)
subprocess.check_call(shlex.split(cmd))
except subprocess.CalledProcessError:
print("Something went wrong when resizing {}".format(image_name))
sys.exit(1)
# add the resized image to the zip file but don't store the full path
thumbnail_zipfh.write('{}/{}'.format(outdir, image_name), arcname=image_name)
# now create the row for the custom list
d = {'Name': title,
"Sub-Title (i.e. Location, Table/Booth, or Title/Sponsorship Level)": '{}'.format(level.title()),
'Description (Optional)': '',
'Location/Room': '',
'Image (Optional)': '',
'Thumbnail (Optional)': image_name}
rows.append(d)
# close the zip file
thumbnail_zipfh.close()
# write the custom list rows out to a CSV file
df_sponsors = df_sponsors.append(rows)
df_sponsors.to_csv('acl2017-sponsors.csv', index=False)
|
<commit_before><commit_msg>Add script to generate sponsor info for guidebook.<commit_after>#!/usr/bin/env python3
import shlex
import subprocess
import sys
import zipfile
from os import makedirs
import frontmatter
import pandas as pd
# read in the custom list template
df_sponsors = pd.read_csv('Guidebook_CL_Template.csv')
rows = []
# create the directory that will store the resized sponsor logs
outdir = 'guidebook-sponsor-logos'
makedirs(outdir, exist_ok=True)
# create the zip file that will contain the resized logs
thumbnail_zipfh = zipfile.ZipFile('sponsor-thumbs.zip', 'w')
# read in the frontmatter from the sponsors page so that we can get
# the logos by sponsor level
content = frontmatter.load('../_pages/sponsors/overview.md')
for level in ['platinum', 'gold', 'silver', 'bronze', 'supporter']:
print(level)
logo_info = [(obj['title'], obj['image_path']) for obj in content[level] if 'title' in obj]
for title, image_name in logo_info:
print(' {}'.format(image_name))
# resize the image to 240x240 since that's what guidebook requires
try:
cmd = 'convert ../images/{} -resize 240x240 {}/{}'.format(image_name, outdir, image_name)
subprocess.check_call(shlex.split(cmd))
except subprocess.CalledProcessError:
print("Something went wrong when resizing {}".format(image_name))
sys.exit(1)
# add the resized image to the zip file but don't store the full path
thumbnail_zipfh.write('{}/{}'.format(outdir, image_name), arcname=image_name)
# now create the row for the custom list
d = {'Name': title,
"Sub-Title (i.e. Location, Table/Booth, or Title/Sponsorship Level)": '{}'.format(level.title()),
'Description (Optional)': '',
'Location/Room': '',
'Image (Optional)': '',
'Thumbnail (Optional)': image_name}
rows.append(d)
# close the zip file
thumbnail_zipfh.close()
# write the custom list rows out to a CSV file
df_sponsors = df_sponsors.append(rows)
df_sponsors.to_csv('acl2017-sponsors.csv', index=False)
|
|
167e7095c0a19d8139564280b3dc3e74791bf878
|
cabarchive/cli.py
|
cabarchive/cli.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Richard Hughes <richard@hughsie.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
# pylint: disable=wrong-import-position
import sys
import os
import argparse
sys.path.append(os.path.realpath("."))
from cabarchive import CabArchive
def main():
parser = argparse.ArgumentParser(description="Process cabinet archives.")
parser.add_argument(
"--decompress", type=bool, help="decompress the archives", default=False
)
parser.add_argument(
"--info", type=bool, help="Show the files inside the archive", default=True
)
parser.add_argument(
"--outdir", type=str, help="Specify the output directory", default="."
)
if len(sys.argv) == 1:
print("No input files given")
return 1
args, argv = parser.parse_known_args()
for arg in argv:
arc = CabArchive()
print("Parsing {}:".format(arg))
with open(arg, "rb") as f:
arc.parse(f.read())
if args.info:
for fn in arc:
print(fn)
if args.decompress:
for fn in arc:
with open(os.path.join(args.outdir, fn), "wb") as f:
print("Writing {}:".format(fn))
f.write(arc[fn].buf)
return 0
if __name__ == "__main__":
main()
|
Add a CLI test program
|
trivial: Add a CLI test program
|
Python
|
lgpl-2.1
|
hughsie/python-cabarchive
|
trivial: Add a CLI test program
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Richard Hughes <richard@hughsie.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
# pylint: disable=wrong-import-position
import sys
import os
import argparse
sys.path.append(os.path.realpath("."))
from cabarchive import CabArchive
def main():
parser = argparse.ArgumentParser(description="Process cabinet archives.")
parser.add_argument(
"--decompress", type=bool, help="decompress the archives", default=False
)
parser.add_argument(
"--info", type=bool, help="Show the files inside the archive", default=True
)
parser.add_argument(
"--outdir", type=str, help="Specify the output directory", default="."
)
if len(sys.argv) == 1:
print("No input files given")
return 1
args, argv = parser.parse_known_args()
for arg in argv:
arc = CabArchive()
print("Parsing {}:".format(arg))
with open(arg, "rb") as f:
arc.parse(f.read())
if args.info:
for fn in arc:
print(fn)
if args.decompress:
for fn in arc:
with open(os.path.join(args.outdir, fn), "wb") as f:
print("Writing {}:".format(fn))
f.write(arc[fn].buf)
return 0
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>trivial: Add a CLI test program<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Richard Hughes <richard@hughsie.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
# pylint: disable=wrong-import-position
import sys
import os
import argparse
sys.path.append(os.path.realpath("."))
from cabarchive import CabArchive
def main():
parser = argparse.ArgumentParser(description="Process cabinet archives.")
parser.add_argument(
"--decompress", type=bool, help="decompress the archives", default=False
)
parser.add_argument(
"--info", type=bool, help="Show the files inside the archive", default=True
)
parser.add_argument(
"--outdir", type=str, help="Specify the output directory", default="."
)
if len(sys.argv) == 1:
print("No input files given")
return 1
args, argv = parser.parse_known_args()
for arg in argv:
arc = CabArchive()
print("Parsing {}:".format(arg))
with open(arg, "rb") as f:
arc.parse(f.read())
if args.info:
for fn in arc:
print(fn)
if args.decompress:
for fn in arc:
with open(os.path.join(args.outdir, fn), "wb") as f:
print("Writing {}:".format(fn))
f.write(arc[fn].buf)
return 0
if __name__ == "__main__":
main()
|
trivial: Add a CLI test program#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Richard Hughes <richard@hughsie.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
# pylint: disable=wrong-import-position
import sys
import os
import argparse
sys.path.append(os.path.realpath("."))
from cabarchive import CabArchive
def main():
parser = argparse.ArgumentParser(description="Process cabinet archives.")
parser.add_argument(
"--decompress", type=bool, help="decompress the archives", default=False
)
parser.add_argument(
"--info", type=bool, help="Show the files inside the archive", default=True
)
parser.add_argument(
"--outdir", type=str, help="Specify the output directory", default="."
)
if len(sys.argv) == 1:
print("No input files given")
return 1
args, argv = parser.parse_known_args()
for arg in argv:
arc = CabArchive()
print("Parsing {}:".format(arg))
with open(arg, "rb") as f:
arc.parse(f.read())
if args.info:
for fn in arc:
print(fn)
if args.decompress:
for fn in arc:
with open(os.path.join(args.outdir, fn), "wb") as f:
print("Writing {}:".format(fn))
f.write(arc[fn].buf)
return 0
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>trivial: Add a CLI test program<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Richard Hughes <richard@hughsie.com>
#
# SPDX-License-Identifier: LGPL-2.1+
#
# pylint: disable=wrong-import-position
import sys
import os
import argparse
sys.path.append(os.path.realpath("."))
from cabarchive import CabArchive
def main():
parser = argparse.ArgumentParser(description="Process cabinet archives.")
parser.add_argument(
"--decompress", type=bool, help="decompress the archives", default=False
)
parser.add_argument(
"--info", type=bool, help="Show the files inside the archive", default=True
)
parser.add_argument(
"--outdir", type=str, help="Specify the output directory", default="."
)
if len(sys.argv) == 1:
print("No input files given")
return 1
args, argv = parser.parse_known_args()
for arg in argv:
arc = CabArchive()
print("Parsing {}:".format(arg))
with open(arg, "rb") as f:
arc.parse(f.read())
if args.info:
for fn in arc:
print(fn)
if args.decompress:
for fn in arc:
with open(os.path.join(args.outdir, fn), "wb") as f:
print("Writing {}:".format(fn))
f.write(arc[fn].buf)
return 0
if __name__ == "__main__":
main()
|
|
5d0c7b7d209b0487c0a12e995a11efa9d695a50e
|
pq/management/commands/pqcreate.py
|
pq/management/commands/pqcreate.py
|
from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from pq.queue import PQ_DEFAULT_JOB_TIMEOUT
class Command(BaseCommand):
help = "Create a queue"
args = "<queue queue ...>"
option_list = BaseCommand.option_list + (
make_option('--queue', '-q', dest='queue', default='',
help='Specify the queue [default]'),
make_option('--conn', '-c', dest='conn', default='default',
help='Specify a connection [default]'),
make_option('--scheduled', action="store_true", default=False,
dest="scheduled", help="Schedule jobs in the future"),
make_option('--timeout', '-t', type="int", dest='timeout',
help="Default timeout in seconds"),
make_option('--serial', action="store_true", default=False, dest='serial',
help="A timeout in seconds"),
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
from pq.queue import Queue, SerialQueue
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
for queue in args:
if options['serial']:
q = SerialQueue.create(queue)
else:
q = Queue.create(queue)
q.connection = options.get('conn')
q.scheduled = options.get('scheduled')
if timeout:
q.default_timeout = timeout
q.save()
|
Add a command to pre-create queues.
|
Add a command to pre-create queues.
|
Python
|
bsd-2-clause
|
bretth/django-pq
|
Add a command to pre-create queues.
|
from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from pq.queue import PQ_DEFAULT_JOB_TIMEOUT
class Command(BaseCommand):
help = "Create a queue"
args = "<queue queue ...>"
option_list = BaseCommand.option_list + (
make_option('--queue', '-q', dest='queue', default='',
help='Specify the queue [default]'),
make_option('--conn', '-c', dest='conn', default='default',
help='Specify a connection [default]'),
make_option('--scheduled', action="store_true", default=False,
dest="scheduled", help="Schedule jobs in the future"),
make_option('--timeout', '-t', type="int", dest='timeout',
help="Default timeout in seconds"),
make_option('--serial', action="store_true", default=False, dest='serial',
help="A timeout in seconds"),
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
from pq.queue import Queue, SerialQueue
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
for queue in args:
if options['serial']:
q = SerialQueue.create(queue)
else:
q = Queue.create(queue)
q.connection = options.get('conn')
q.scheduled = options.get('scheduled')
if timeout:
q.default_timeout = timeout
q.save()
|
<commit_before><commit_msg>Add a command to pre-create queues.<commit_after>
|
from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from pq.queue import PQ_DEFAULT_JOB_TIMEOUT
class Command(BaseCommand):
help = "Create a queue"
args = "<queue queue ...>"
option_list = BaseCommand.option_list + (
make_option('--queue', '-q', dest='queue', default='',
help='Specify the queue [default]'),
make_option('--conn', '-c', dest='conn', default='default',
help='Specify a connection [default]'),
make_option('--scheduled', action="store_true", default=False,
dest="scheduled", help="Schedule jobs in the future"),
make_option('--timeout', '-t', type="int", dest='timeout',
help="Default timeout in seconds"),
make_option('--serial', action="store_true", default=False, dest='serial',
help="A timeout in seconds"),
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
from pq.queue import Queue, SerialQueue
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
for queue in args:
if options['serial']:
q = SerialQueue.create(queue)
else:
q = Queue.create(queue)
q.connection = options.get('conn')
q.scheduled = options.get('scheduled')
if timeout:
q.default_timeout = timeout
q.save()
|
Add a command to pre-create queues.from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from pq.queue import PQ_DEFAULT_JOB_TIMEOUT
class Command(BaseCommand):
help = "Create a queue"
args = "<queue queue ...>"
option_list = BaseCommand.option_list + (
make_option('--queue', '-q', dest='queue', default='',
help='Specify the queue [default]'),
make_option('--conn', '-c', dest='conn', default='default',
help='Specify a connection [default]'),
make_option('--scheduled', action="store_true", default=False,
dest="scheduled", help="Schedule jobs in the future"),
make_option('--timeout', '-t', type="int", dest='timeout',
help="Default timeout in seconds"),
make_option('--serial', action="store_true", default=False, dest='serial',
help="A timeout in seconds"),
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
from pq.queue import Queue, SerialQueue
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
for queue in args:
if options['serial']:
q = SerialQueue.create(queue)
else:
q = Queue.create(queue)
q.connection = options.get('conn')
q.scheduled = options.get('scheduled')
if timeout:
q.default_timeout = timeout
q.save()
|
<commit_before><commit_msg>Add a command to pre-create queues.<commit_after>from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from pq.queue import PQ_DEFAULT_JOB_TIMEOUT
class Command(BaseCommand):
help = "Create a queue"
args = "<queue queue ...>"
option_list = BaseCommand.option_list + (
make_option('--queue', '-q', dest='queue', default='',
help='Specify the queue [default]'),
make_option('--conn', '-c', dest='conn', default='default',
help='Specify a connection [default]'),
make_option('--scheduled', action="store_true", default=False,
dest="scheduled", help="Schedule jobs in the future"),
make_option('--timeout', '-t', type="int", dest='timeout',
help="Default timeout in seconds"),
make_option('--serial', action="store_true", default=False, dest='serial',
help="A timeout in seconds"),
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
from pq.queue import Queue, SerialQueue
verbosity = int(options.get('verbosity', 1))
timeout = options.get('timeout')
for queue in args:
if options['serial']:
q = SerialQueue.create(queue)
else:
q = Queue.create(queue)
q.connection = options.get('conn')
q.scheduled = options.get('scheduled')
if timeout:
q.default_timeout = timeout
q.save()
|
|
8a0ee09eec5f24152babfff3882a38ca749b3cc9
|
solutions/uri/1029/1029.py
|
solutions/uri/1029/1029.py
|
from functools import lru_cache
fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f"fib({n}) = {calls_count} calls = {fibonacci_result}")
|
Solve Fibonacci, How Many Calls? in python
|
Solve Fibonacci, How Many Calls? in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Fibonacci, How Many Calls? in python
|
from functools import lru_cache
fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f"fib({n}) = {calls_count} calls = {fibonacci_result}")
|
<commit_before><commit_msg>Solve Fibonacci, How Many Calls? in python<commit_after>
|
from functools import lru_cache
fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f"fib({n}) = {calls_count} calls = {fibonacci_result}")
|
Solve Fibonacci, How Many Calls? in pythonfrom functools import lru_cache
fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f"fib({n}) = {calls_count} calls = {fibonacci_result}")
|
<commit_before><commit_msg>Solve Fibonacci, How Many Calls? in python<commit_after>from functools import lru_cache
fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f"fib({n}) = {calls_count} calls = {fibonacci_result}")
|
|
03afe1cc090bd48e07fd71616774dc4f05247bed
|
migrations/usernames.py
|
migrations/usernames.py
|
import re
import pymongo
from bhs_api import create_app
if __name__ == "__main__":
app, conf = create_app(testing=True)
app.testing = True
mongo = pymongo.MongoClient(app.config['MONGODB_HOST'])
user_db = mongo[conf.user_db_name]
is_english = re.compile('^[a-zA-Z]')
for i in user_db['user'].find({'name': {'$exists': True}}):
name = i['name']
if is_english.match(name):
new_name = {'en': name}
else:
new_name = {'he': name}
user_db['user'].update_one({'_id': i['_id']},
{'$set': {'name': new_name}})
|
Add migration script for users with names
|
Add migration script for users with names
If existing users have a name we need to change the name in the user db so it
becomes a dict like: {'en': 'Yossi Levy'} if the user name was `Yossi Levy`.
If the user name is in hebrew it will add ti with the 'he' key.
|
Python
|
agpl-3.0
|
Beit-Hatfutsot/dbs-back,Beit-Hatfutsot/dbs-back,Beit-Hatfutsot/dbs-back,Beit-Hatfutsot/dbs-back
|
Add migration script for users with names
If existing users have a name we need to change the name in the user db so it
becomes a dict like: {'en': 'Yossi Levy'} if the user name was `Yossi Levy`.
If the user name is in hebrew it will add ti with the 'he' key.
|
import re
import pymongo
from bhs_api import create_app
if __name__ == "__main__":
app, conf = create_app(testing=True)
app.testing = True
mongo = pymongo.MongoClient(app.config['MONGODB_HOST'])
user_db = mongo[conf.user_db_name]
is_english = re.compile('^[a-zA-Z]')
for i in user_db['user'].find({'name': {'$exists': True}}):
name = i['name']
if is_english.match(name):
new_name = {'en': name}
else:
new_name = {'he': name}
user_db['user'].update_one({'_id': i['_id']},
{'$set': {'name': new_name}})
|
<commit_before><commit_msg>Add migration script for users with names
If existing users have a name we need to change the name in the user db so it
becomes a dict like: {'en': 'Yossi Levy'} if the user name was `Yossi Levy`.
If the user name is in hebrew it will add ti with the 'he' key.<commit_after>
|
import re
import pymongo
from bhs_api import create_app
if __name__ == "__main__":
app, conf = create_app(testing=True)
app.testing = True
mongo = pymongo.MongoClient(app.config['MONGODB_HOST'])
user_db = mongo[conf.user_db_name]
is_english = re.compile('^[a-zA-Z]')
for i in user_db['user'].find({'name': {'$exists': True}}):
name = i['name']
if is_english.match(name):
new_name = {'en': name}
else:
new_name = {'he': name}
user_db['user'].update_one({'_id': i['_id']},
{'$set': {'name': new_name}})
|
Add migration script for users with names
If existing users have a name we need to change the name in the user db so it
becomes a dict like: {'en': 'Yossi Levy'} if the user name was `Yossi Levy`.
If the user name is in hebrew it will add ti with the 'he' key.import re
import pymongo
from bhs_api import create_app
if __name__ == "__main__":
app, conf = create_app(testing=True)
app.testing = True
mongo = pymongo.MongoClient(app.config['MONGODB_HOST'])
user_db = mongo[conf.user_db_name]
is_english = re.compile('^[a-zA-Z]')
for i in user_db['user'].find({'name': {'$exists': True}}):
name = i['name']
if is_english.match(name):
new_name = {'en': name}
else:
new_name = {'he': name}
user_db['user'].update_one({'_id': i['_id']},
{'$set': {'name': new_name}})
|
<commit_before><commit_msg>Add migration script for users with names
If existing users have a name we need to change the name in the user db so it
becomes a dict like: {'en': 'Yossi Levy'} if the user name was `Yossi Levy`.
If the user name is in hebrew it will add ti with the 'he' key.<commit_after>import re
import pymongo
from bhs_api import create_app
if __name__ == "__main__":
app, conf = create_app(testing=True)
app.testing = True
mongo = pymongo.MongoClient(app.config['MONGODB_HOST'])
user_db = mongo[conf.user_db_name]
is_english = re.compile('^[a-zA-Z]')
for i in user_db['user'].find({'name': {'$exists': True}}):
name = i['name']
if is_english.match(name):
new_name = {'en': name}
else:
new_name = {'he': name}
user_db['user'].update_one({'_id': i['_id']},
{'$set': {'name': new_name}})
|
|
4754771eb50cb29722b3c7b5f9bca6dc30ef3157
|
noodles/sources/eiti.py
|
noodles/sources/eiti.py
|
import logging
import urllib2
import csv
import os
import traceback
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter
from noodles.util import stage_path
from noodles.sources.common import Source
class EITISource(Source):
LABEL = 'EITI Country Reports'
URL = 'https://eiti.org/countries/reports'
source_url = 'http://eiti.org/countries/reports/compare/download/csv'
def _iterate_reports(self):
csvfile = csv.DictReader(urllib2.urlopen(self.source_url), delimiter = ';')
for line in csvfile:
fn_to_store = 'eiti_%s_%s.pdf' % (line['Country Name'].strip(), line['Years Covered'])
path = os.path.join(stage_path(self.name, 'download'), fn_to_store)
url = 'https://eiti.org/files/%s' % line['EITI Report'].strip()
if not os.path.exists(path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'wb') as fh:
try:
fh.write(urllib2.urlopen(url).read())
except urllib2.URLError:
#traceback.print_exc()
continue
text = self._txt_from_pdf(path)
if text:
yield text
def _txt_from_pdf(self, pdf_fn):
print(pdf_fn)
return os.popen("pdftotext '%s' -" % pdf_fn).read()
def extract(self):
for report in self._iterate_reports():
pass
|
Add EITI country reports as sources
|
Add EITI country reports as sources
NB: you need to have pdftotext installed to run this
|
Python
|
mit
|
uf6/noodles,uf6/noodles
|
Add EITI country reports as sources
NB: you need to have pdftotext installed to run this
|
import logging
import urllib2
import csv
import os
import traceback
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter
from noodles.util import stage_path
from noodles.sources.common import Source
class EITISource(Source):
LABEL = 'EITI Country Reports'
URL = 'https://eiti.org/countries/reports'
source_url = 'http://eiti.org/countries/reports/compare/download/csv'
def _iterate_reports(self):
csvfile = csv.DictReader(urllib2.urlopen(self.source_url), delimiter = ';')
for line in csvfile:
fn_to_store = 'eiti_%s_%s.pdf' % (line['Country Name'].strip(), line['Years Covered'])
path = os.path.join(stage_path(self.name, 'download'), fn_to_store)
url = 'https://eiti.org/files/%s' % line['EITI Report'].strip()
if not os.path.exists(path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'wb') as fh:
try:
fh.write(urllib2.urlopen(url).read())
except urllib2.URLError:
#traceback.print_exc()
continue
text = self._txt_from_pdf(path)
if text:
yield text
def _txt_from_pdf(self, pdf_fn):
print(pdf_fn)
return os.popen("pdftotext '%s' -" % pdf_fn).read()
def extract(self):
for report in self._iterate_reports():
pass
|
<commit_before><commit_msg>Add EITI country reports as sources
NB: you need to have pdftotext installed to run this<commit_after>
|
import logging
import urllib2
import csv
import os
import traceback
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter
from noodles.util import stage_path
from noodles.sources.common import Source
class EITISource(Source):
LABEL = 'EITI Country Reports'
URL = 'https://eiti.org/countries/reports'
source_url = 'http://eiti.org/countries/reports/compare/download/csv'
def _iterate_reports(self):
csvfile = csv.DictReader(urllib2.urlopen(self.source_url), delimiter = ';')
for line in csvfile:
fn_to_store = 'eiti_%s_%s.pdf' % (line['Country Name'].strip(), line['Years Covered'])
path = os.path.join(stage_path(self.name, 'download'), fn_to_store)
url = 'https://eiti.org/files/%s' % line['EITI Report'].strip()
if not os.path.exists(path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'wb') as fh:
try:
fh.write(urllib2.urlopen(url).read())
except urllib2.URLError:
#traceback.print_exc()
continue
text = self._txt_from_pdf(path)
if text:
yield text
def _txt_from_pdf(self, pdf_fn):
print(pdf_fn)
return os.popen("pdftotext '%s' -" % pdf_fn).read()
def extract(self):
for report in self._iterate_reports():
pass
|
Add EITI country reports as sources
NB: you need to have pdftotext installed to run thisimport logging
import urllib2
import csv
import os
import traceback
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter
from noodles.util import stage_path
from noodles.sources.common import Source
class EITISource(Source):
LABEL = 'EITI Country Reports'
URL = 'https://eiti.org/countries/reports'
source_url = 'http://eiti.org/countries/reports/compare/download/csv'
def _iterate_reports(self):
csvfile = csv.DictReader(urllib2.urlopen(self.source_url), delimiter = ';')
for line in csvfile:
fn_to_store = 'eiti_%s_%s.pdf' % (line['Country Name'].strip(), line['Years Covered'])
path = os.path.join(stage_path(self.name, 'download'), fn_to_store)
url = 'https://eiti.org/files/%s' % line['EITI Report'].strip()
if not os.path.exists(path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'wb') as fh:
try:
fh.write(urllib2.urlopen(url).read())
except urllib2.URLError:
#traceback.print_exc()
continue
text = self._txt_from_pdf(path)
if text:
yield text
def _txt_from_pdf(self, pdf_fn):
print(pdf_fn)
return os.popen("pdftotext '%s' -" % pdf_fn).read()
def extract(self):
for report in self._iterate_reports():
pass
|
<commit_before><commit_msg>Add EITI country reports as sources
NB: you need to have pdftotext installed to run this<commit_after>import logging
import urllib2
import csv
import os
import traceback
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter
from noodles.util import stage_path
from noodles.sources.common import Source
class EITISource(Source):
LABEL = 'EITI Country Reports'
URL = 'https://eiti.org/countries/reports'
source_url = 'http://eiti.org/countries/reports/compare/download/csv'
def _iterate_reports(self):
csvfile = csv.DictReader(urllib2.urlopen(self.source_url), delimiter = ';')
for line in csvfile:
fn_to_store = 'eiti_%s_%s.pdf' % (line['Country Name'].strip(), line['Years Covered'])
path = os.path.join(stage_path(self.name, 'download'), fn_to_store)
url = 'https://eiti.org/files/%s' % line['EITI Report'].strip()
if not os.path.exists(path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
with open(path, 'wb') as fh:
try:
fh.write(urllib2.urlopen(url).read())
except urllib2.URLError:
#traceback.print_exc()
continue
text = self._txt_from_pdf(path)
if text:
yield text
def _txt_from_pdf(self, pdf_fn):
print(pdf_fn)
return os.popen("pdftotext '%s' -" % pdf_fn).read()
def extract(self):
for report in self._iterate_reports():
pass
|
|
f7fb296395c4193dc2f2607d3be50cca280e86dc
|
tests/test_player.py
|
tests/test_player.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player import Player
def test_find_by_id():
plr = Player.find_by_id(8459469)
assert plr.name == "Rory Fitzpatrick"
def test_name_property():
plr = Player.find_by_id(8459457)
assert plr.name == " ".join((plr.first_name, plr.last_name))
|
Add initial test script for player objects
|
Add initial test script for player objects
|
Python
|
mit
|
leaffan/pynhldb
|
Add initial test script for player objects
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player import Player
def test_find_by_id():
plr = Player.find_by_id(8459469)
assert plr.name == "Rory Fitzpatrick"
def test_name_property():
plr = Player.find_by_id(8459457)
assert plr.name == " ".join((plr.first_name, plr.last_name))
|
<commit_before><commit_msg>Add initial test script for player objects<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player import Player
def test_find_by_id():
plr = Player.find_by_id(8459469)
assert plr.name == "Rory Fitzpatrick"
def test_name_property():
plr = Player.find_by_id(8459457)
assert plr.name == " ".join((plr.first_name, plr.last_name))
|
Add initial test script for player objects#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player import Player
def test_find_by_id():
plr = Player.find_by_id(8459469)
assert plr.name == "Rory Fitzpatrick"
def test_name_property():
plr = Player.find_by_id(8459457)
assert plr.name == " ".join((plr.first_name, plr.last_name))
|
<commit_before><commit_msg>Add initial test script for player objects<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.player import Player
def test_find_by_id():
plr = Player.find_by_id(8459469)
assert plr.name == "Rory Fitzpatrick"
def test_name_property():
plr = Player.find_by_id(8459457)
assert plr.name == " ".join((plr.first_name, plr.last_name))
|
|
2afdec4acc893628cba7179e52ca0bbb0207c49a
|
jsontest.py
|
jsontest.py
|
import json
from collections import OrderedDict
import netCDF4
import readNetCDF as rnc
import numpy as np
import util
from netCDF4 import Dataset
json_path = 'json_output_test.json'
nc_file = 'foam_2011-01-01.nc'
domain_type = "Grid" # Default
dset = rnc.load_netcdf(nc_file)
print(rnc.get_var_names(dset))
################ Temporary###############
###################################################
# ReadNetCDF data
rnc.extract_var_data(rnc.get_var_names(dset))
shape = rnc.get_shape(dset, 'ICEC')
def load_json(path):
with open(path, 'r') as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
json_template = load_json(json_path)
# print(json.dumps(json_template,indent=4))
def update_json(json_template, data, domain_type ):
main_var = 'ICEC'
json_template['domain']['domainType'] = domain_type
# Coordinate data
json_template['domain']['axes']['x']['values'] = data['lat'].tolist()
json_template['domain']['axes']['y']['values'] = data['lon'].tolist()
json_template['ranges'][main_var]['shape'] = rnc.get_shape(dset,main_var)
json_template['ranges'][main_var]['values'] = (data[main_var].flatten().tolist())
print(json.dumps(json_template,indent=4))
# for x in data['lat']:
#
#
# for y in data['lon']:
# json_template['domain']['axes']['y']['values'].join(float(y))
json_template['domain']['axes']['z']['values'] = [5]
update_json(json_template,rnc.extract_var_data(rnc.get_var_names(dset)),"Grid")
|
Test nc -> covJSON v0.1
|
Test nc -> covJSON v0.1
|
Python
|
bsd-3-clause
|
Reading-eScience-Centre/pycovjson
|
Test nc -> covJSON v0.1
|
import json
from collections import OrderedDict
import netCDF4
import readNetCDF as rnc
import numpy as np
import util
from netCDF4 import Dataset
json_path = 'json_output_test.json'
nc_file = 'foam_2011-01-01.nc'
domain_type = "Grid" # Default
dset = rnc.load_netcdf(nc_file)
print(rnc.get_var_names(dset))
################ Temporary###############
###################################################
# ReadNetCDF data
rnc.extract_var_data(rnc.get_var_names(dset))
shape = rnc.get_shape(dset, 'ICEC')
def load_json(path):
with open(path, 'r') as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
json_template = load_json(json_path)
# print(json.dumps(json_template,indent=4))
def update_json(json_template, data, domain_type ):
main_var = 'ICEC'
json_template['domain']['domainType'] = domain_type
# Coordinate data
json_template['domain']['axes']['x']['values'] = data['lat'].tolist()
json_template['domain']['axes']['y']['values'] = data['lon'].tolist()
json_template['ranges'][main_var]['shape'] = rnc.get_shape(dset,main_var)
json_template['ranges'][main_var]['values'] = (data[main_var].flatten().tolist())
print(json.dumps(json_template,indent=4))
# for x in data['lat']:
#
#
# for y in data['lon']:
# json_template['domain']['axes']['y']['values'].join(float(y))
json_template['domain']['axes']['z']['values'] = [5]
update_json(json_template,rnc.extract_var_data(rnc.get_var_names(dset)),"Grid")
|
<commit_before><commit_msg>Test nc -> covJSON v0.1<commit_after>
|
import json
from collections import OrderedDict
import netCDF4
import readNetCDF as rnc
import numpy as np
import util
from netCDF4 import Dataset
json_path = 'json_output_test.json'
nc_file = 'foam_2011-01-01.nc'
domain_type = "Grid" # Default
dset = rnc.load_netcdf(nc_file)
print(rnc.get_var_names(dset))
################ Temporary###############
###################################################
# ReadNetCDF data
rnc.extract_var_data(rnc.get_var_names(dset))
shape = rnc.get_shape(dset, 'ICEC')
def load_json(path):
with open(path, 'r') as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
json_template = load_json(json_path)
# print(json.dumps(json_template,indent=4))
def update_json(json_template, data, domain_type ):
main_var = 'ICEC'
json_template['domain']['domainType'] = domain_type
# Coordinate data
json_template['domain']['axes']['x']['values'] = data['lat'].tolist()
json_template['domain']['axes']['y']['values'] = data['lon'].tolist()
json_template['ranges'][main_var]['shape'] = rnc.get_shape(dset,main_var)
json_template['ranges'][main_var]['values'] = (data[main_var].flatten().tolist())
print(json.dumps(json_template,indent=4))
# for x in data['lat']:
#
#
# for y in data['lon']:
# json_template['domain']['axes']['y']['values'].join(float(y))
json_template['domain']['axes']['z']['values'] = [5]
update_json(json_template,rnc.extract_var_data(rnc.get_var_names(dset)),"Grid")
|
Test nc -> covJSON v0.1import json
from collections import OrderedDict
import netCDF4
import readNetCDF as rnc
import numpy as np
import util
from netCDF4 import Dataset
json_path = 'json_output_test.json'
nc_file = 'foam_2011-01-01.nc'
domain_type = "Grid" # Default
dset = rnc.load_netcdf(nc_file)
print(rnc.get_var_names(dset))
################ Temporary###############
###################################################
# ReadNetCDF data
rnc.extract_var_data(rnc.get_var_names(dset))
shape = rnc.get_shape(dset, 'ICEC')
def load_json(path):
with open(path, 'r') as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
json_template = load_json(json_path)
# print(json.dumps(json_template,indent=4))
def update_json(json_template, data, domain_type ):
main_var = 'ICEC'
json_template['domain']['domainType'] = domain_type
# Coordinate data
json_template['domain']['axes']['x']['values'] = data['lat'].tolist()
json_template['domain']['axes']['y']['values'] = data['lon'].tolist()
json_template['ranges'][main_var]['shape'] = rnc.get_shape(dset,main_var)
json_template['ranges'][main_var]['values'] = (data[main_var].flatten().tolist())
print(json.dumps(json_template,indent=4))
# for x in data['lat']:
#
#
# for y in data['lon']:
# json_template['domain']['axes']['y']['values'].join(float(y))
json_template['domain']['axes']['z']['values'] = [5]
update_json(json_template,rnc.extract_var_data(rnc.get_var_names(dset)),"Grid")
|
<commit_before><commit_msg>Test nc -> covJSON v0.1<commit_after>import json
from collections import OrderedDict
import netCDF4
import readNetCDF as rnc
import numpy as np
import util
from netCDF4 import Dataset
json_path = 'json_output_test.json'
nc_file = 'foam_2011-01-01.nc'
domain_type = "Grid" # Default
dset = rnc.load_netcdf(nc_file)
print(rnc.get_var_names(dset))
################ Temporary###############
###################################################
# ReadNetCDF data
rnc.extract_var_data(rnc.get_var_names(dset))
shape = rnc.get_shape(dset, 'ICEC')
def load_json(path):
with open(path, 'r') as fp:
return json.load(fp, object_pairs_hook=OrderedDict)
json_template = load_json(json_path)
# print(json.dumps(json_template,indent=4))
def update_json(json_template, data, domain_type ):
main_var = 'ICEC'
json_template['domain']['domainType'] = domain_type
# Coordinate data
json_template['domain']['axes']['x']['values'] = data['lat'].tolist()
json_template['domain']['axes']['y']['values'] = data['lon'].tolist()
json_template['ranges'][main_var]['shape'] = rnc.get_shape(dset,main_var)
json_template['ranges'][main_var]['values'] = (data[main_var].flatten().tolist())
print(json.dumps(json_template,indent=4))
# for x in data['lat']:
#
#
# for y in data['lon']:
# json_template['domain']['axes']['y']['values'].join(float(y))
json_template['domain']['axes']['z']['values'] = [5]
update_json(json_template,rnc.extract_var_data(rnc.get_var_names(dset)),"Grid")
|
|
22ae8b04443c13a1c5e9a25a93c2c52faa996b95
|
CodeFights/lateRide.py
|
CodeFights/lateRide.py
|
#!/usr/local/bin/python
# Code Fights Late Ride Problem
def lateRide(n):
h, m = divmod(n, 60)
return sum(map(int, str(h) + str(m)))
def main():
tests = [
[240, 4],
[808, 14],
[1439, 19],
[0, 0],
[23, 5],
[8, 8]
]
for t in tests:
res = lateRide(t[0])
if t[1] == res:
print("PASSED: lateRide({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lateRide({}) returned {}, answer: {}"
.format(t[0], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights late ride problem
|
Solve Code Fights late ride problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights late ride problem
|
#!/usr/local/bin/python
# Code Fights Late Ride Problem
def lateRide(n):
h, m = divmod(n, 60)
return sum(map(int, str(h) + str(m)))
def main():
tests = [
[240, 4],
[808, 14],
[1439, 19],
[0, 0],
[23, 5],
[8, 8]
]
for t in tests:
res = lateRide(t[0])
if t[1] == res:
print("PASSED: lateRide({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lateRide({}) returned {}, answer: {}"
.format(t[0], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights late ride problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Late Ride Problem
def lateRide(n):
h, m = divmod(n, 60)
return sum(map(int, str(h) + str(m)))
def main():
tests = [
[240, 4],
[808, 14],
[1439, 19],
[0, 0],
[23, 5],
[8, 8]
]
for t in tests:
res = lateRide(t[0])
if t[1] == res:
print("PASSED: lateRide({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lateRide({}) returned {}, answer: {}"
.format(t[0], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights late ride problem#!/usr/local/bin/python
# Code Fights Late Ride Problem
def lateRide(n):
h, m = divmod(n, 60)
return sum(map(int, str(h) + str(m)))
def main():
tests = [
[240, 4],
[808, 14],
[1439, 19],
[0, 0],
[23, 5],
[8, 8]
]
for t in tests:
res = lateRide(t[0])
if t[1] == res:
print("PASSED: lateRide({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lateRide({}) returned {}, answer: {}"
.format(t[0], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights late ride problem<commit_after>#!/usr/local/bin/python
# Code Fights Late Ride Problem
def lateRide(n):
h, m = divmod(n, 60)
return sum(map(int, str(h) + str(m)))
def main():
tests = [
[240, 4],
[808, 14],
[1439, 19],
[0, 0],
[23, 5],
[8, 8]
]
for t in tests:
res = lateRide(t[0])
if t[1] == res:
print("PASSED: lateRide({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lateRide({}) returned {}, answer: {}"
.format(t[0], res, t[2]))
if __name__ == '__main__':
main()
|
|
3fabbc491a106175d61ffb6a2694501972c47141
|
tools/pip-extract.py
|
tools/pip-extract.py
|
#!/usr/bin/env python
from distutils.version import LooseVersion
import os
import re
import sys
import yaml
if __name__ == '__main__':
fn = sys.argv[1]
with open(sys.argv[1], 'r') as fh:
lines = fh.readlines()
entries = set()
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if re.match(r"\w(.*)", line):
entries.add(line)
versions = dict()
split_on = set(['==', '>=', '<='])
for entry in entries:
matched = False
for s in split_on:
if entry.find(s) != -1:
name, sep, version = entry.partition(s)
if name and version.strip():
versions[name] = version.strip()
matched = True
break
if not matched:
versions[entry] = None
cleaned_versions = dict()
for (k, v) in versions.items():
if not k:
continue
if not v or not v.strip():
cleaned_versions[k] = None
else:
cleaned_versions[k] = LooseVersion(v)
pips = []
for (k, v) in cleaned_versions.items():
if v:
pips.append({
'name': k,
'version': str(v),
})
else:
pips.append({'name': k})
out = dict()
out['pips'] = pips
print(yaml.dump(out, default_flow_style=False, indent=4,
line_break="\n", explicit_start=True, explicit_end=True))
|
Add little tool to help extract yaml names+versions from pip-require files
|
Add little tool to help extract yaml names+versions from pip-require files
|
Python
|
apache-2.0
|
stackforge/anvil,mc2014/anvil,mc2014/anvil,stackforge/anvil
|
Add little tool to help extract yaml names+versions from pip-require files
|
#!/usr/bin/env python
from distutils.version import LooseVersion
import os
import re
import sys
import yaml
if __name__ == '__main__':
fn = sys.argv[1]
with open(sys.argv[1], 'r') as fh:
lines = fh.readlines()
entries = set()
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if re.match(r"\w(.*)", line):
entries.add(line)
versions = dict()
split_on = set(['==', '>=', '<='])
for entry in entries:
matched = False
for s in split_on:
if entry.find(s) != -1:
name, sep, version = entry.partition(s)
if name and version.strip():
versions[name] = version.strip()
matched = True
break
if not matched:
versions[entry] = None
cleaned_versions = dict()
for (k, v) in versions.items():
if not k:
continue
if not v or not v.strip():
cleaned_versions[k] = None
else:
cleaned_versions[k] = LooseVersion(v)
pips = []
for (k, v) in cleaned_versions.items():
if v:
pips.append({
'name': k,
'version': str(v),
})
else:
pips.append({'name': k})
out = dict()
out['pips'] = pips
print(yaml.dump(out, default_flow_style=False, indent=4,
line_break="\n", explicit_start=True, explicit_end=True))
|
<commit_before><commit_msg>Add little tool to help extract yaml names+versions from pip-require files<commit_after>
|
#!/usr/bin/env python
from distutils.version import LooseVersion
import os
import re
import sys
import yaml
if __name__ == '__main__':
fn = sys.argv[1]
with open(sys.argv[1], 'r') as fh:
lines = fh.readlines()
entries = set()
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if re.match(r"\w(.*)", line):
entries.add(line)
versions = dict()
split_on = set(['==', '>=', '<='])
for entry in entries:
matched = False
for s in split_on:
if entry.find(s) != -1:
name, sep, version = entry.partition(s)
if name and version.strip():
versions[name] = version.strip()
matched = True
break
if not matched:
versions[entry] = None
cleaned_versions = dict()
for (k, v) in versions.items():
if not k:
continue
if not v or not v.strip():
cleaned_versions[k] = None
else:
cleaned_versions[k] = LooseVersion(v)
pips = []
for (k, v) in cleaned_versions.items():
if v:
pips.append({
'name': k,
'version': str(v),
})
else:
pips.append({'name': k})
out = dict()
out['pips'] = pips
print(yaml.dump(out, default_flow_style=False, indent=4,
line_break="\n", explicit_start=True, explicit_end=True))
|
Add little tool to help extract yaml names+versions from pip-require files#!/usr/bin/env python
from distutils.version import LooseVersion
import os
import re
import sys
import yaml
if __name__ == '__main__':
fn = sys.argv[1]
with open(sys.argv[1], 'r') as fh:
lines = fh.readlines()
entries = set()
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if re.match(r"\w(.*)", line):
entries.add(line)
versions = dict()
split_on = set(['==', '>=', '<='])
for entry in entries:
matched = False
for s in split_on:
if entry.find(s) != -1:
name, sep, version = entry.partition(s)
if name and version.strip():
versions[name] = version.strip()
matched = True
break
if not matched:
versions[entry] = None
cleaned_versions = dict()
for (k, v) in versions.items():
if not k:
continue
if not v or not v.strip():
cleaned_versions[k] = None
else:
cleaned_versions[k] = LooseVersion(v)
pips = []
for (k, v) in cleaned_versions.items():
if v:
pips.append({
'name': k,
'version': str(v),
})
else:
pips.append({'name': k})
out = dict()
out['pips'] = pips
print(yaml.dump(out, default_flow_style=False, indent=4,
line_break="\n", explicit_start=True, explicit_end=True))
|
<commit_before><commit_msg>Add little tool to help extract yaml names+versions from pip-require files<commit_after>#!/usr/bin/env python
from distutils.version import LooseVersion
import os
import re
import sys
import yaml
if __name__ == '__main__':
fn = sys.argv[1]
with open(sys.argv[1], 'r') as fh:
lines = fh.readlines()
entries = set()
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if re.match(r"\w(.*)", line):
entries.add(line)
versions = dict()
split_on = set(['==', '>=', '<='])
for entry in entries:
matched = False
for s in split_on:
if entry.find(s) != -1:
name, sep, version = entry.partition(s)
if name and version.strip():
versions[name] = version.strip()
matched = True
break
if not matched:
versions[entry] = None
cleaned_versions = dict()
for (k, v) in versions.items():
if not k:
continue
if not v or not v.strip():
cleaned_versions[k] = None
else:
cleaned_versions[k] = LooseVersion(v)
pips = []
for (k, v) in cleaned_versions.items():
if v:
pips.append({
'name': k,
'version': str(v),
})
else:
pips.append({'name': k})
out = dict()
out['pips'] = pips
print(yaml.dump(out, default_flow_style=False, indent=4,
line_break="\n", explicit_start=True, explicit_end=True))
|
|
797ebb71721e067140a2437810cd4923b03af3fa
|
homework3/game.py
|
homework3/game.py
|
from collections import OrderedDict
from random import shuffle
MAX_CONFLICTS = [-1, {
'row': -1,
'column': -1,
'conflicts': [],
}]
def print_board(board):
"""Print board."""
b = ['_'] * len(board)
for queen, position in board.items():
b[position['row']] = ['_'] * len(board)
b[position['row']][position['column']] = '*'
b = ['|'.join(r) for r in b]
for row in b:
print(row)
def move_queen(board):
"""Move the queen with maximum number of conflicts."""
global MAX_CONFLICTS
calculate_conflicts(board)
print('MAX_CONFLICTS: ', MAX_CONFLICTS)
MAX_CONFLICTS = None
def calculate_conflicts(board):
"""Calculates all conflicts for each queen and gets the one with maximum conflicts."""
print('calculate')
print('BEFORE calculation', board)
global MAX_CONFLICTS
positions = board.values()
for queen, position in board.items():
print('\n CURRENT POSITION', queen, position)
if len(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))[1:])
if len(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))[1:])
if len(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))[1:])
# update MAX_CONFLICTS
if len(position['conflicts']) > len(MAX_CONFLICTS[1]['conflicts']):
MAX_CONFLICTS = [queen, position]
print('AFTER calculation', board)
def main():
# number = int(input('Enter the count of queens: '))
number = 4
row_indexes = list(range(number))
shuffle(row_indexes)
# generate board with queens one per row and one per column
board = OrderedDict([
(i, {
'row': row_indexes[i],
'column': i,
'conflicts': []
})
for i in range(number)])
print_board(board)
while MAX_CONFLICTS:
move_queen(board)
# print(board)
if __name__ == '__main__':
main()
|
Create board and find conflicts of the whole board
|
Create board and find conflicts of the whole board
|
Python
|
mit
|
mileto94/Data-Mining
|
Create board and find conflicts of the whole board
|
from collections import OrderedDict
from random import shuffle
MAX_CONFLICTS = [-1, {
'row': -1,
'column': -1,
'conflicts': [],
}]
def print_board(board):
"""Print board."""
b = ['_'] * len(board)
for queen, position in board.items():
b[position['row']] = ['_'] * len(board)
b[position['row']][position['column']] = '*'
b = ['|'.join(r) for r in b]
for row in b:
print(row)
def move_queen(board):
"""Move the queen with maximum number of conflicts."""
global MAX_CONFLICTS
calculate_conflicts(board)
print('MAX_CONFLICTS: ', MAX_CONFLICTS)
MAX_CONFLICTS = None
def calculate_conflicts(board):
"""Calculates all conflicts for each queen and gets the one with maximum conflicts."""
print('calculate')
print('BEFORE calculation', board)
global MAX_CONFLICTS
positions = board.values()
for queen, position in board.items():
print('\n CURRENT POSITION', queen, position)
if len(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))[1:])
if len(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))[1:])
if len(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))[1:])
# update MAX_CONFLICTS
if len(position['conflicts']) > len(MAX_CONFLICTS[1]['conflicts']):
MAX_CONFLICTS = [queen, position]
print('AFTER calculation', board)
def main():
# number = int(input('Enter the count of queens: '))
number = 4
row_indexes = list(range(number))
shuffle(row_indexes)
# generate board with queens one per row and one per column
board = OrderedDict([
(i, {
'row': row_indexes[i],
'column': i,
'conflicts': []
})
for i in range(number)])
print_board(board)
while MAX_CONFLICTS:
move_queen(board)
# print(board)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create board and find conflicts of the whole board<commit_after>
|
from collections import OrderedDict
from random import shuffle
MAX_CONFLICTS = [-1, {
'row': -1,
'column': -1,
'conflicts': [],
}]
def print_board(board):
"""Print board."""
b = ['_'] * len(board)
for queen, position in board.items():
b[position['row']] = ['_'] * len(board)
b[position['row']][position['column']] = '*'
b = ['|'.join(r) for r in b]
for row in b:
print(row)
def move_queen(board):
"""Move the queen with maximum number of conflicts."""
global MAX_CONFLICTS
calculate_conflicts(board)
print('MAX_CONFLICTS: ', MAX_CONFLICTS)
MAX_CONFLICTS = None
def calculate_conflicts(board):
"""Calculates all conflicts for each queen and gets the one with maximum conflicts."""
print('calculate')
print('BEFORE calculation', board)
global MAX_CONFLICTS
positions = board.values()
for queen, position in board.items():
print('\n CURRENT POSITION', queen, position)
if len(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))[1:])
if len(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))[1:])
if len(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))[1:])
# update MAX_CONFLICTS
if len(position['conflicts']) > len(MAX_CONFLICTS[1]['conflicts']):
MAX_CONFLICTS = [queen, position]
print('AFTER calculation', board)
def main():
# number = int(input('Enter the count of queens: '))
number = 4
row_indexes = list(range(number))
shuffle(row_indexes)
# generate board with queens one per row and one per column
board = OrderedDict([
(i, {
'row': row_indexes[i],
'column': i,
'conflicts': []
})
for i in range(number)])
print_board(board)
while MAX_CONFLICTS:
move_queen(board)
# print(board)
if __name__ == '__main__':
main()
|
Create board and find conflicts of the whole boardfrom collections import OrderedDict
from random import shuffle
MAX_CONFLICTS = [-1, {
'row': -1,
'column': -1,
'conflicts': [],
}]
def print_board(board):
"""Print board."""
b = ['_'] * len(board)
for queen, position in board.items():
b[position['row']] = ['_'] * len(board)
b[position['row']][position['column']] = '*'
b = ['|'.join(r) for r in b]
for row in b:
print(row)
def move_queen(board):
"""Move the queen with maximum number of conflicts."""
global MAX_CONFLICTS
calculate_conflicts(board)
print('MAX_CONFLICTS: ', MAX_CONFLICTS)
MAX_CONFLICTS = None
def calculate_conflicts(board):
"""Calculates all conflicts for each queen and gets the one with maximum conflicts."""
print('calculate')
print('BEFORE calculation', board)
global MAX_CONFLICTS
positions = board.values()
for queen, position in board.items():
print('\n CURRENT POSITION', queen, position)
if len(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))[1:])
if len(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))[1:])
if len(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))[1:])
# update MAX_CONFLICTS
if len(position['conflicts']) > len(MAX_CONFLICTS[1]['conflicts']):
MAX_CONFLICTS = [queen, position]
print('AFTER calculation', board)
def main():
# number = int(input('Enter the count of queens: '))
number = 4
row_indexes = list(range(number))
shuffle(row_indexes)
# generate board with queens one per row and one per column
board = OrderedDict([
(i, {
'row': row_indexes[i],
'column': i,
'conflicts': []
})
for i in range(number)])
print_board(board)
while MAX_CONFLICTS:
move_queen(board)
# print(board)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create board and find conflicts of the whole board<commit_after>from collections import OrderedDict
from random import shuffle
MAX_CONFLICTS = [-1, {
'row': -1,
'column': -1,
'conflicts': [],
}]
def print_board(board):
"""Print board."""
b = ['_'] * len(board)
for queen, position in board.items():
b[position['row']] = ['_'] * len(board)
b[position['row']][position['column']] = '*'
b = ['|'.join(r) for r in b]
for row in b:
print(row)
def move_queen(board):
"""Move the queen with maximum number of conflicts."""
global MAX_CONFLICTS
calculate_conflicts(board)
print('MAX_CONFLICTS: ', MAX_CONFLICTS)
MAX_CONFLICTS = None
def calculate_conflicts(board):
"""Calculates all conflicts for each queen and gets the one with maximum conflicts."""
print('calculate')
print('BEFORE calculation', board)
global MAX_CONFLICTS
positions = board.values()
for queen, position in board.items():
print('\n CURRENT POSITION', queen, position)
if len(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['row'] == board[queen]['row'], positions))[1:])
if len(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: pos['column'] == board[queen]['column'], positions))[1:])
if len(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))) > 1:
position['conflicts'].extend(list(filter(lambda pos: abs(board[queen]['column'] - pos['column']) == abs(board[queen]['row'] - pos['row']), positions))[1:])
# update MAX_CONFLICTS
if len(position['conflicts']) > len(MAX_CONFLICTS[1]['conflicts']):
MAX_CONFLICTS = [queen, position]
print('AFTER calculation', board)
def main():
# number = int(input('Enter the count of queens: '))
number = 4
row_indexes = list(range(number))
shuffle(row_indexes)
# generate board with queens one per row and one per column
board = OrderedDict([
(i, {
'row': row_indexes[i],
'column': i,
'conflicts': []
})
for i in range(number)])
print_board(board)
while MAX_CONFLICTS:
move_queen(board)
# print(board)
if __name__ == '__main__':
main()
|
|
c1fcf18feaafab22f32bb755cae7920d0f09cbb9
|
tests/grid_2d_canvas_test.py
|
tests/grid_2d_canvas_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 2D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid2DCanvas
class Grid2DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid2DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
self.assertTrue(isinstance(canvas.collection, PathCollection))
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid2DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add 2d grid canvas construction test.
|
Add 2d grid canvas construction test.
|
Python
|
mit
|
PytLab/catplot
|
Add 2d grid canvas construction test.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 2D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid2DCanvas
class Grid2DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid2DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
self.assertTrue(isinstance(canvas.collection, PathCollection))
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid2DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add 2d grid canvas construction test.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 2D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid2DCanvas
class Grid2DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid2DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
self.assertTrue(isinstance(canvas.collection, PathCollection))
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid2DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add 2d grid canvas construction test.#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 2D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid2DCanvas
class Grid2DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid2DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
self.assertTrue(isinstance(canvas.collection, PathCollection))
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid2DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add 2d grid canvas construction test.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 2D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid2DCanvas
class Grid2DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid2DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
self.assertTrue(isinstance(canvas.collection, PathCollection))
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid2DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
76817a379aff0dd6968df6a6163c635f7e7b53e3
|
dynd/tests/test_ndobject_basics.py
|
dynd/tests/test_ndobject_basics.py
|
import sys
import unittest
from dynd import nd, ndt
class TestBasics(unittest.TestCase):
def test_index(self):
# Test that the __index__ method/nb_index slot
# in ndobject is working
a = [1, 2, 3, 4, 5, 6]
self.assertEqual(a[nd.ndobject(0)], 1)
self.assertEqual(a[nd.ndobject(1):nd.ndobject(3)], [2, 3])
self.assertEqual(a[nd.ndobject(-1, ndt.int8)], 6)
def test_index_errors(self):
a = [1, 2, 3, 4, 5, 6]
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(True))
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(3.5))
self.assertRaises(IndexError, lambda x : a[x], nd.ndobject(10))
if __name__ == '__main__':
unittest.main()
|
Add some tests for the __index__/nb_index slot in ndobject
|
Add some tests for the __index__/nb_index slot in ndobject
|
Python
|
bsd-2-clause
|
izaid/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,mwiebe/dynd-python,insertinterestingnamehere/dynd-python,aterrel/dynd-python,michaelpacer/dynd-python,pombredanne/dynd-python,insertinterestingnamehere/dynd-python,pombredanne/dynd-python,pombredanne/dynd-python,insertinterestingnamehere/dynd-python,cpcloud/dynd-python,aterrel/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,aterrel/dynd-python,pombredanne/dynd-python,insertinterestingnamehere/dynd-python,michaelpacer/dynd-python,michaelpacer/dynd-python,aterrel/dynd-python,mwiebe/dynd-python,izaid/dynd-python,izaid/dynd-python,ContinuumIO/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,izaid/dynd-python
|
Add some tests for the __index__/nb_index slot in ndobject
|
import sys
import unittest
from dynd import nd, ndt
class TestBasics(unittest.TestCase):
def test_index(self):
# Test that the __index__ method/nb_index slot
# in ndobject is working
a = [1, 2, 3, 4, 5, 6]
self.assertEqual(a[nd.ndobject(0)], 1)
self.assertEqual(a[nd.ndobject(1):nd.ndobject(3)], [2, 3])
self.assertEqual(a[nd.ndobject(-1, ndt.int8)], 6)
def test_index_errors(self):
a = [1, 2, 3, 4, 5, 6]
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(True))
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(3.5))
self.assertRaises(IndexError, lambda x : a[x], nd.ndobject(10))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for the __index__/nb_index slot in ndobject<commit_after>
|
import sys
import unittest
from dynd import nd, ndt
class TestBasics(unittest.TestCase):
def test_index(self):
# Test that the __index__ method/nb_index slot
# in ndobject is working
a = [1, 2, 3, 4, 5, 6]
self.assertEqual(a[nd.ndobject(0)], 1)
self.assertEqual(a[nd.ndobject(1):nd.ndobject(3)], [2, 3])
self.assertEqual(a[nd.ndobject(-1, ndt.int8)], 6)
def test_index_errors(self):
a = [1, 2, 3, 4, 5, 6]
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(True))
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(3.5))
self.assertRaises(IndexError, lambda x : a[x], nd.ndobject(10))
if __name__ == '__main__':
unittest.main()
|
Add some tests for the __index__/nb_index slot in ndobjectimport sys
import unittest
from dynd import nd, ndt
class TestBasics(unittest.TestCase):
def test_index(self):
# Test that the __index__ method/nb_index slot
# in ndobject is working
a = [1, 2, 3, 4, 5, 6]
self.assertEqual(a[nd.ndobject(0)], 1)
self.assertEqual(a[nd.ndobject(1):nd.ndobject(3)], [2, 3])
self.assertEqual(a[nd.ndobject(-1, ndt.int8)], 6)
def test_index_errors(self):
a = [1, 2, 3, 4, 5, 6]
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(True))
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(3.5))
self.assertRaises(IndexError, lambda x : a[x], nd.ndobject(10))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for the __index__/nb_index slot in ndobject<commit_after>import sys
import unittest
from dynd import nd, ndt
class TestBasics(unittest.TestCase):
def test_index(self):
# Test that the __index__ method/nb_index slot
# in ndobject is working
a = [1, 2, 3, 4, 5, 6]
self.assertEqual(a[nd.ndobject(0)], 1)
self.assertEqual(a[nd.ndobject(1):nd.ndobject(3)], [2, 3])
self.assertEqual(a[nd.ndobject(-1, ndt.int8)], 6)
def test_index_errors(self):
a = [1, 2, 3, 4, 5, 6]
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(True))
self.assertRaises(TypeError, lambda x : a[x], nd.ndobject(3.5))
self.assertRaises(IndexError, lambda x : a[x], nd.ndobject(10))
if __name__ == '__main__':
unittest.main()
|
|
e1ffb7b75ff4c1b4124b9af5bdd1b00230589129
|
tests/test_connect.py
|
tests/test_connect.py
|
import asyncpg
from asyncpg import _testbase as tb
class TestConnect(tb.ConnectedTestCase):
async def test_connect_1(self):
with self.assertRaisesRegex(
Exception, 'role "__does_not_exist__" does not exist'):
await asyncpg.connect(user="__does_not_exist__", loop=self.loop)
|
Add user, password, dbname to connect(); suport UNIX sockets
|
Add user, password, dbname to connect(); suport UNIX sockets
|
Python
|
apache-2.0
|
MagicStack/asyncpg,MagicStack/asyncpg
|
Add user, password, dbname to connect(); suport UNIX sockets
|
import asyncpg
from asyncpg import _testbase as tb
class TestConnect(tb.ConnectedTestCase):
async def test_connect_1(self):
with self.assertRaisesRegex(
Exception, 'role "__does_not_exist__" does not exist'):
await asyncpg.connect(user="__does_not_exist__", loop=self.loop)
|
<commit_before><commit_msg>Add user, password, dbname to connect(); suport UNIX sockets<commit_after>
|
import asyncpg
from asyncpg import _testbase as tb
class TestConnect(tb.ConnectedTestCase):
async def test_connect_1(self):
with self.assertRaisesRegex(
Exception, 'role "__does_not_exist__" does not exist'):
await asyncpg.connect(user="__does_not_exist__", loop=self.loop)
|
Add user, password, dbname to connect(); suport UNIX socketsimport asyncpg
from asyncpg import _testbase as tb
class TestConnect(tb.ConnectedTestCase):
async def test_connect_1(self):
with self.assertRaisesRegex(
Exception, 'role "__does_not_exist__" does not exist'):
await asyncpg.connect(user="__does_not_exist__", loop=self.loop)
|
<commit_before><commit_msg>Add user, password, dbname to connect(); suport UNIX sockets<commit_after>import asyncpg
from asyncpg import _testbase as tb
class TestConnect(tb.ConnectedTestCase):
async def test_connect_1(self):
with self.assertRaisesRegex(
Exception, 'role "__does_not_exist__" does not exist'):
await asyncpg.connect(user="__does_not_exist__", loop=self.loop)
|
|
75b3357617ebd8152ebf25abcb776a252ea7389d
|
tests/test_leonard.py
|
tests/test_leonard.py
|
"""Test garner.dates."""
from __future__ import absolute_import
from __future__ import print_function
from .check import Check
from proselint.checks.typography import exclamation
class TestCheck(Check):
"""Test class for leonard.exclamation."""
__test__ = True
def test_capitalization_and_no_exclamation(self):
"""Don't throw error when phrase has capitalization."""
text = """
The QUICK BROWN fox juMPED over the lazy cat.
"""
errors = exclamation.check_repeated_exclamations(text)
assert len(errors) == 0
def test_exclamation(self):
"""Test leonard.exclamation. with exclamation marks."""
text = """Sally sells seashells and they were too expensive!!!!"""
errors = exclamation.check_repeated_exclamations(text)
print(errors)
assert len(errors) == 1
|
Add new test for exclamation.leonard
|
Add new test for exclamation.leonard
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint
|
Add new test for exclamation.leonard
|
"""Test garner.dates."""
from __future__ import absolute_import
from __future__ import print_function
from .check import Check
from proselint.checks.typography import exclamation
class TestCheck(Check):
"""Test class for leonard.exclamation."""
__test__ = True
def test_capitalization_and_no_exclamation(self):
"""Don't throw error when phrase has capitalization."""
text = """
The QUICK BROWN fox juMPED over the lazy cat.
"""
errors = exclamation.check_repeated_exclamations(text)
assert len(errors) == 0
def test_exclamation(self):
"""Test leonard.exclamation. with exclamation marks."""
text = """Sally sells seashells and they were too expensive!!!!"""
errors = exclamation.check_repeated_exclamations(text)
print(errors)
assert len(errors) == 1
|
<commit_before><commit_msg>Add new test for exclamation.leonard<commit_after>
|
"""Test garner.dates."""
from __future__ import absolute_import
from __future__ import print_function
from .check import Check
from proselint.checks.typography import exclamation
class TestCheck(Check):
"""Test class for leonard.exclamation."""
__test__ = True
def test_capitalization_and_no_exclamation(self):
"""Don't throw error when phrase has capitalization."""
text = """
The QUICK BROWN fox juMPED over the lazy cat.
"""
errors = exclamation.check_repeated_exclamations(text)
assert len(errors) == 0
def test_exclamation(self):
"""Test leonard.exclamation. with exclamation marks."""
text = """Sally sells seashells and they were too expensive!!!!"""
errors = exclamation.check_repeated_exclamations(text)
print(errors)
assert len(errors) == 1
|
Add new test for exclamation.leonard"""Test garner.dates."""
from __future__ import absolute_import
from __future__ import print_function
from .check import Check
from proselint.checks.typography import exclamation
class TestCheck(Check):
"""Test class for leonard.exclamation."""
__test__ = True
def test_capitalization_and_no_exclamation(self):
"""Don't throw error when phrase has capitalization."""
text = """
The QUICK BROWN fox juMPED over the lazy cat.
"""
errors = exclamation.check_repeated_exclamations(text)
assert len(errors) == 0
def test_exclamation(self):
"""Test leonard.exclamation. with exclamation marks."""
text = """Sally sells seashells and they were too expensive!!!!"""
errors = exclamation.check_repeated_exclamations(text)
print(errors)
assert len(errors) == 1
|
<commit_before><commit_msg>Add new test for exclamation.leonard<commit_after>"""Test garner.dates."""
from __future__ import absolute_import
from __future__ import print_function
from .check import Check
from proselint.checks.typography import exclamation
class TestCheck(Check):
"""Test class for leonard.exclamation."""
__test__ = True
def test_capitalization_and_no_exclamation(self):
"""Don't throw error when phrase has capitalization."""
text = """
The QUICK BROWN fox juMPED over the lazy cat.
"""
errors = exclamation.check_repeated_exclamations(text)
assert len(errors) == 0
def test_exclamation(self):
"""Test leonard.exclamation. with exclamation marks."""
text = """Sally sells seashells and they were too expensive!!!!"""
errors = exclamation.check_repeated_exclamations(text)
print(errors)
assert len(errors) == 1
|
|
9fb10d8d30fbdb08e46bea3996e75098dce904e5
|
fabfile/testbeds/testbed_nodec3.py
|
fabfile/testbeds/testbed_nodec3.py
|
from fabric.api import env
host1 = 'root@10.204.216.60'
ext_routers = [('mx1', '10.204.216.253')]
router_asn = 64510
public_vn_rtgt = 10003
public_vn_subnet = "10.204.219.24/29"
host_build = 'vjoshi@10.204.216.56'
env.roledefs = {
'all': [host1],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host1],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
}
env.hostnames = {
'all': ['nodec3']
}
env.passwords = {
host1: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir='/root/contrail-test'
env.mail_from='vjoshi@juniper.net'
env.mail_to='vjoshi@juniper.net'
env.log_scenario='Vedus single node'
multi_tenancy=True
env.interface_rename = False
env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
|
Add testbed file for tempest node
|
Add testbed file for tempest node
Change-Id: Idabe51a777aa73ff7a3fa94fc0f219ebd80686fc
|
Python
|
apache-2.0
|
Juniper/contrail-fabric-utils,Juniper/contrail-fabric-utils
|
Add testbed file for tempest node
Change-Id: Idabe51a777aa73ff7a3fa94fc0f219ebd80686fc
|
from fabric.api import env
host1 = 'root@10.204.216.60'
ext_routers = [('mx1', '10.204.216.253')]
router_asn = 64510
public_vn_rtgt = 10003
public_vn_subnet = "10.204.219.24/29"
host_build = 'vjoshi@10.204.216.56'
env.roledefs = {
'all': [host1],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host1],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
}
env.hostnames = {
'all': ['nodec3']
}
env.passwords = {
host1: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir='/root/contrail-test'
env.mail_from='vjoshi@juniper.net'
env.mail_to='vjoshi@juniper.net'
env.log_scenario='Vedus single node'
multi_tenancy=True
env.interface_rename = False
env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
|
<commit_before><commit_msg>Add testbed file for tempest node
Change-Id: Idabe51a777aa73ff7a3fa94fc0f219ebd80686fc<commit_after>
|
from fabric.api import env
host1 = 'root@10.204.216.60'
ext_routers = [('mx1', '10.204.216.253')]
router_asn = 64510
public_vn_rtgt = 10003
public_vn_subnet = "10.204.219.24/29"
host_build = 'vjoshi@10.204.216.56'
env.roledefs = {
'all': [host1],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host1],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
}
env.hostnames = {
'all': ['nodec3']
}
env.passwords = {
host1: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir='/root/contrail-test'
env.mail_from='vjoshi@juniper.net'
env.mail_to='vjoshi@juniper.net'
env.log_scenario='Vedus single node'
multi_tenancy=True
env.interface_rename = False
env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
|
Add testbed file for tempest node
Change-Id: Idabe51a777aa73ff7a3fa94fc0f219ebd80686fcfrom fabric.api import env
host1 = 'root@10.204.216.60'
ext_routers = [('mx1', '10.204.216.253')]
router_asn = 64510
public_vn_rtgt = 10003
public_vn_subnet = "10.204.219.24/29"
host_build = 'vjoshi@10.204.216.56'
env.roledefs = {
'all': [host1],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host1],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
}
env.hostnames = {
'all': ['nodec3']
}
env.passwords = {
host1: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir='/root/contrail-test'
env.mail_from='vjoshi@juniper.net'
env.mail_to='vjoshi@juniper.net'
env.log_scenario='Vedus single node'
multi_tenancy=True
env.interface_rename = False
env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
|
<commit_before><commit_msg>Add testbed file for tempest node
Change-Id: Idabe51a777aa73ff7a3fa94fc0f219ebd80686fc<commit_after>from fabric.api import env
host1 = 'root@10.204.216.60'
ext_routers = [('mx1', '10.204.216.253')]
router_asn = 64510
public_vn_rtgt = 10003
public_vn_subnet = "10.204.219.24/29"
host_build = 'vjoshi@10.204.216.56'
env.roledefs = {
'all': [host1],
'cfgm': [host1],
'openstack': [host1],
'control': [host1],
'compute': [host1],
'collector': [host1],
'webui': [host1],
'database': [host1],
'build': [host_build],
}
env.hostnames = {
'all': ['nodec3']
}
env.passwords = {
host1: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir='/root/contrail-test'
env.mail_from='vjoshi@juniper.net'
env.mail_to='vjoshi@juniper.net'
env.log_scenario='Vedus single node'
multi_tenancy=True
env.interface_rename = False
env.encap_priority = "'MPLSoUDP','MPLSoGRE','VXLAN'"
|
|
ec28288b924a6378b70de3192f962bf26cf5362b
|
fil_finder/tests/_testing_data.py
|
fil_finder/tests/_testing_data.py
|
'''
Load in the testing data
'''
import os
from astropy.io import fits
from astropy.table import Table
dir_path = os.path.dirname(__file__)
path1 = os.path.join(dir_path, "testing_data/test1")
path2 = os.path.join(dir_path, "testing_data/test2")
img_path = os.path.join(dir_path, "testing_data")
# Load in the fits file
img, hdr = \
fits.getdata(os.path.join(img_path, "filaments_updatedhdr.fits"),
header=True)
# Load in each dataset
model1 = fits.getdata(os.path.join(path1, "test1_filament_model.fits"))
mask1 = fits.getdata(os.path.join(path1, "test1_mask.fits"))
skeletons1 = \
fits.getdata(os.path.join(path1, "test1_skeletons.fits"))
model2 = fits.getdata(os.path.join(path2, "test2_filament_model.fits"))
mask2 = fits.getdata(os.path.join(path2, "test2_mask.fits"))
skeletons2 = \
fits.getdata(os.path.join(path2, "test2_skeletons.fits"))
table1 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
table2 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
branch_tables1 = \
[Table.read(os.path.join(path1, "test1_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
branch_tables2 = \
[Table.read(os.path.join(path2, "test2_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
|
Load in the saved testing data
|
Load in the saved testing data
|
Python
|
mit
|
dcolombo/FilFinder,e-koch/FilFinder,keflavich/fil_finder
|
Load in the saved testing data
|
'''
Load in the testing data
'''
import os
from astropy.io import fits
from astropy.table import Table
dir_path = os.path.dirname(__file__)
path1 = os.path.join(dir_path, "testing_data/test1")
path2 = os.path.join(dir_path, "testing_data/test2")
img_path = os.path.join(dir_path, "testing_data")
# Load in the fits file
img, hdr = \
fits.getdata(os.path.join(img_path, "filaments_updatedhdr.fits"),
header=True)
# Load in each dataset
model1 = fits.getdata(os.path.join(path1, "test1_filament_model.fits"))
mask1 = fits.getdata(os.path.join(path1, "test1_mask.fits"))
skeletons1 = \
fits.getdata(os.path.join(path1, "test1_skeletons.fits"))
model2 = fits.getdata(os.path.join(path2, "test2_filament_model.fits"))
mask2 = fits.getdata(os.path.join(path2, "test2_mask.fits"))
skeletons2 = \
fits.getdata(os.path.join(path2, "test2_skeletons.fits"))
table1 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
table2 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
branch_tables1 = \
[Table.read(os.path.join(path1, "test1_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
branch_tables2 = \
[Table.read(os.path.join(path2, "test2_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
|
<commit_before><commit_msg>Load in the saved testing data<commit_after>
|
'''
Load in the testing data
'''
import os
from astropy.io import fits
from astropy.table import Table
dir_path = os.path.dirname(__file__)
path1 = os.path.join(dir_path, "testing_data/test1")
path2 = os.path.join(dir_path, "testing_data/test2")
img_path = os.path.join(dir_path, "testing_data")
# Load in the fits file
img, hdr = \
fits.getdata(os.path.join(img_path, "filaments_updatedhdr.fits"),
header=True)
# Load in each dataset
model1 = fits.getdata(os.path.join(path1, "test1_filament_model.fits"))
mask1 = fits.getdata(os.path.join(path1, "test1_mask.fits"))
skeletons1 = \
fits.getdata(os.path.join(path1, "test1_skeletons.fits"))
model2 = fits.getdata(os.path.join(path2, "test2_filament_model.fits"))
mask2 = fits.getdata(os.path.join(path2, "test2_mask.fits"))
skeletons2 = \
fits.getdata(os.path.join(path2, "test2_skeletons.fits"))
table1 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
table2 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
branch_tables1 = \
[Table.read(os.path.join(path1, "test1_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
branch_tables2 = \
[Table.read(os.path.join(path2, "test2_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
|
Load in the saved testing data
'''
Load in the testing data
'''
import os
from astropy.io import fits
from astropy.table import Table
dir_path = os.path.dirname(__file__)
path1 = os.path.join(dir_path, "testing_data/test1")
path2 = os.path.join(dir_path, "testing_data/test2")
img_path = os.path.join(dir_path, "testing_data")
# Load in the fits file
img, hdr = \
fits.getdata(os.path.join(img_path, "filaments_updatedhdr.fits"),
header=True)
# Load in each dataset
model1 = fits.getdata(os.path.join(path1, "test1_filament_model.fits"))
mask1 = fits.getdata(os.path.join(path1, "test1_mask.fits"))
skeletons1 = \
fits.getdata(os.path.join(path1, "test1_skeletons.fits"))
model2 = fits.getdata(os.path.join(path2, "test2_filament_model.fits"))
mask2 = fits.getdata(os.path.join(path2, "test2_mask.fits"))
skeletons2 = \
fits.getdata(os.path.join(path2, "test2_skeletons.fits"))
table1 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
table2 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
branch_tables1 = \
[Table.read(os.path.join(path1, "test1_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
branch_tables2 = \
[Table.read(os.path.join(path2, "test2_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
|
<commit_before><commit_msg>Load in the saved testing data<commit_after>
'''
Load in the testing data
'''
import os
from astropy.io import fits
from astropy.table import Table
dir_path = os.path.dirname(__file__)
path1 = os.path.join(dir_path, "testing_data/test1")
path2 = os.path.join(dir_path, "testing_data/test2")
img_path = os.path.join(dir_path, "testing_data")
# Load in the fits file
img, hdr = \
fits.getdata(os.path.join(img_path, "filaments_updatedhdr.fits"),
header=True)
# Load in each dataset
model1 = fits.getdata(os.path.join(path1, "test1_filament_model.fits"))
mask1 = fits.getdata(os.path.join(path1, "test1_mask.fits"))
skeletons1 = \
fits.getdata(os.path.join(path1, "test1_skeletons.fits"))
model2 = fits.getdata(os.path.join(path2, "test2_filament_model.fits"))
mask2 = fits.getdata(os.path.join(path2, "test2_mask.fits"))
skeletons2 = \
fits.getdata(os.path.join(path2, "test2_skeletons.fits"))
table1 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
table2 = Table.read(os.path.join(path1, "test1_table.hdf5"), path="data")
branch_tables1 = \
[Table.read(os.path.join(path1, "test1_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
branch_tables2 = \
[Table.read(os.path.join(path2, "test2_table_branch.hdf5"), path="branch_"+str(i))
for i in range(7)]
|
|
845b4fe3bf708d0434cb64d37a212bc0fd6b5ac6
|
ci/testsettings.py
|
ci/testsettings.py
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
# SECRET_KEY = ''
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_cas_ng',
'pucas',
)
# SECRET_KEY = ''
|
Add installed apps configuration to test settings
|
Add installed apps configuration to test settings
|
Python
|
apache-2.0
|
Princeton-CDH/django-pucas,Princeton-CDH/django-pucas
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
# SECRET_KEY = ''
Add installed apps configuration to test settings
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_cas_ng',
'pucas',
)
# SECRET_KEY = ''
|
<commit_before># minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
# SECRET_KEY = ''
<commit_msg>Add installed apps configuration to test settings<commit_after>
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_cas_ng',
'pucas',
)
# SECRET_KEY = ''
|
# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
# SECRET_KEY = ''
Add installed apps configuration to test settings# minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_cas_ng',
'pucas',
)
# SECRET_KEY = ''
|
<commit_before># minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
# SECRET_KEY = ''
<commit_msg>Add installed apps configuration to test settings<commit_after># minimal django settings required to run tests
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db",
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_cas_ng',
'pucas',
)
# SECRET_KEY = ''
|
0dd70c2e6e991163852e8889e0a4802e702ae68d
|
mea_module.py
|
mea_module.py
|
import sys
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.mea import MEA
class MEA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(MEA_Module, self).__init__(argv)
self.tasks = {'ONT::PERFORM': ['ONT::SIMULATE-MODEL']}
def init(self):
'''
Initialize TRIPS module
'''
super(MEA_Module, self).init()
# Send subscribe messages
for task, subtasks in self.tasks.iteritems():
for subtask in subtasks:
msg_txt = '(subscribe :content (request &key :content ' +\
'(%s &key :content (%s . *))))' % (task, subtask)
self.send(KQMLPerformative.fromString(msg_txt))
# Instantiate a singleton MEA agent
self.mea = MEA()
self.ready()
def receive_request(self, msg, content):
'''
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
"tell" message is then sent back.
'''
content_list = cast(KQMLList, content)
task_str = content_list.get(0).toString().upper()
if task_str == 'ONT::PERFORM':
subtask = cast(KQMLList,content_list.getKeywordArg(':content'))
subtask_str = subtask.get(0).toString().upper()
if subtask_str == 'ONT::SIMULATE-MODEL':
reply_content = self.respond_simulate_model(content_list)
else:
self.error_reply(msg, 'unknown request subtask ' + subtask_str)
return
else:
self.error_reply(msg, 'unknown request task ' + task_str)
return
reply_msg = KQMLPerformative('reply')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_simulate_model(self, content_list):
'''
Response content to simulate-model request
'''
# TODO: implement
reply_content = KQMLList()
reply_content.add('()')
return reply_content
if __name__ == "__main__":
MEA_Module(['-name', 'MEA'] + sys.argv[1:]).run()
|
Add TRIPS module for MEA
|
Add TRIPS module for MEA
|
Python
|
bsd-2-clause
|
bgyori/bioagents,sorgerlab/bioagents
|
Add TRIPS module for MEA
|
import sys
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.mea import MEA
class MEA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(MEA_Module, self).__init__(argv)
self.tasks = {'ONT::PERFORM': ['ONT::SIMULATE-MODEL']}
def init(self):
'''
Initialize TRIPS module
'''
super(MEA_Module, self).init()
# Send subscribe messages
for task, subtasks in self.tasks.iteritems():
for subtask in subtasks:
msg_txt = '(subscribe :content (request &key :content ' +\
'(%s &key :content (%s . *))))' % (task, subtask)
self.send(KQMLPerformative.fromString(msg_txt))
# Instantiate a singleton MEA agent
self.mea = MEA()
self.ready()
def receive_request(self, msg, content):
'''
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
"tell" message is then sent back.
'''
content_list = cast(KQMLList, content)
task_str = content_list.get(0).toString().upper()
if task_str == 'ONT::PERFORM':
subtask = cast(KQMLList,content_list.getKeywordArg(':content'))
subtask_str = subtask.get(0).toString().upper()
if subtask_str == 'ONT::SIMULATE-MODEL':
reply_content = self.respond_simulate_model(content_list)
else:
self.error_reply(msg, 'unknown request subtask ' + subtask_str)
return
else:
self.error_reply(msg, 'unknown request task ' + task_str)
return
reply_msg = KQMLPerformative('reply')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_simulate_model(self, content_list):
'''
Response content to simulate-model request
'''
# TODO: implement
reply_content = KQMLList()
reply_content.add('()')
return reply_content
if __name__ == "__main__":
MEA_Module(['-name', 'MEA'] + sys.argv[1:]).run()
|
<commit_before><commit_msg>Add TRIPS module for MEA<commit_after>
|
import sys
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.mea import MEA
class MEA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(MEA_Module, self).__init__(argv)
self.tasks = {'ONT::PERFORM': ['ONT::SIMULATE-MODEL']}
def init(self):
'''
Initialize TRIPS module
'''
super(MEA_Module, self).init()
# Send subscribe messages
for task, subtasks in self.tasks.iteritems():
for subtask in subtasks:
msg_txt = '(subscribe :content (request &key :content ' +\
'(%s &key :content (%s . *))))' % (task, subtask)
self.send(KQMLPerformative.fromString(msg_txt))
# Instantiate a singleton MEA agent
self.mea = MEA()
self.ready()
def receive_request(self, msg, content):
'''
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
"tell" message is then sent back.
'''
content_list = cast(KQMLList, content)
task_str = content_list.get(0).toString().upper()
if task_str == 'ONT::PERFORM':
subtask = cast(KQMLList,content_list.getKeywordArg(':content'))
subtask_str = subtask.get(0).toString().upper()
if subtask_str == 'ONT::SIMULATE-MODEL':
reply_content = self.respond_simulate_model(content_list)
else:
self.error_reply(msg, 'unknown request subtask ' + subtask_str)
return
else:
self.error_reply(msg, 'unknown request task ' + task_str)
return
reply_msg = KQMLPerformative('reply')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_simulate_model(self, content_list):
'''
Response content to simulate-model request
'''
# TODO: implement
reply_content = KQMLList()
reply_content.add('()')
return reply_content
if __name__ == "__main__":
MEA_Module(['-name', 'MEA'] + sys.argv[1:]).run()
|
Add TRIPS module for MEAimport sys
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.mea import MEA
class MEA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(MEA_Module, self).__init__(argv)
self.tasks = {'ONT::PERFORM': ['ONT::SIMULATE-MODEL']}
def init(self):
'''
Initialize TRIPS module
'''
super(MEA_Module, self).init()
# Send subscribe messages
for task, subtasks in self.tasks.iteritems():
for subtask in subtasks:
msg_txt = '(subscribe :content (request &key :content ' +\
'(%s &key :content (%s . *))))' % (task, subtask)
self.send(KQMLPerformative.fromString(msg_txt))
# Instantiate a singleton MEA agent
self.mea = MEA()
self.ready()
def receive_request(self, msg, content):
'''
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
"tell" message is then sent back.
'''
content_list = cast(KQMLList, content)
task_str = content_list.get(0).toString().upper()
if task_str == 'ONT::PERFORM':
subtask = cast(KQMLList,content_list.getKeywordArg(':content'))
subtask_str = subtask.get(0).toString().upper()
if subtask_str == 'ONT::SIMULATE-MODEL':
reply_content = self.respond_simulate_model(content_list)
else:
self.error_reply(msg, 'unknown request subtask ' + subtask_str)
return
else:
self.error_reply(msg, 'unknown request task ' + task_str)
return
reply_msg = KQMLPerformative('reply')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_simulate_model(self, content_list):
'''
Response content to simulate-model request
'''
# TODO: implement
reply_content = KQMLList()
reply_content.add('()')
return reply_content
if __name__ == "__main__":
MEA_Module(['-name', 'MEA'] + sys.argv[1:]).run()
|
<commit_before><commit_msg>Add TRIPS module for MEA<commit_after>import sys
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.mea import MEA
class MEA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(MEA_Module, self).__init__(argv)
self.tasks = {'ONT::PERFORM': ['ONT::SIMULATE-MODEL']}
def init(self):
'''
Initialize TRIPS module
'''
super(MEA_Module, self).init()
# Send subscribe messages
for task, subtasks in self.tasks.iteritems():
for subtask in subtasks:
msg_txt = '(subscribe :content (request &key :content ' +\
'(%s &key :content (%s . *))))' % (task, subtask)
self.send(KQMLPerformative.fromString(msg_txt))
# Instantiate a singleton MEA agent
self.mea = MEA()
self.ready()
def receive_request(self, msg, content):
'''
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
"tell" message is then sent back.
'''
content_list = cast(KQMLList, content)
task_str = content_list.get(0).toString().upper()
if task_str == 'ONT::PERFORM':
subtask = cast(KQMLList,content_list.getKeywordArg(':content'))
subtask_str = subtask.get(0).toString().upper()
if subtask_str == 'ONT::SIMULATE-MODEL':
reply_content = self.respond_simulate_model(content_list)
else:
self.error_reply(msg, 'unknown request subtask ' + subtask_str)
return
else:
self.error_reply(msg, 'unknown request task ' + task_str)
return
reply_msg = KQMLPerformative('reply')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_simulate_model(self, content_list):
'''
Response content to simulate-model request
'''
# TODO: implement
reply_content = KQMLList()
reply_content.add('()')
return reply_content
if __name__ == "__main__":
MEA_Module(['-name', 'MEA'] + sys.argv[1:]).run()
|
|
dfa6d3b3fe148895599360d01195200b06e7583d
|
receiver-dump.py
|
receiver-dump.py
|
#******************************************************************************
#
# receiver-dump.py
#
#******************************************************************************
#
# Author: Werner Lane
# E-mail: laneboysrc@gmail.com
#
#******************************************************************************
import serial
import sys
import time
jitterString0x07 = [
'|* .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. *|'
]
jitterString0x0f = [
'|* .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. *|'
]
def int2bin(n, count=32):
"""returns the binary of integer n, using count number of digits"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def dump(port):
try:
s = serial.Serial(port, 38400)
except serial.SerialException, e:
print("Unable to open port %s.\nError message: %s" % (port, e))
sys.exit(0)
oldValue = 0
oldData = 0
startTime = time.time()
bigDiff = False
while True:
c = s.read(1)
numString = ""
while c != "\n":
numString = numString + c
c = s.read(1)
try:
value = int(numString, 10)
except ValueError:
value = 0
if abs(oldValue - value) > 4:
#print abs(oldValue - value)
if bigDiff or abs(oldValue - value) > 32:
oldValue = value
bigDiff = False
else:
bigDiff = True
#print "GLITCH", abs(oldValue - value)
else:
oldValue = value
#print "%s %d" % (int2bin(value >> 3, 7), value & 0x7)
jitter = value & 0x7
print jitterString0x07[jitter],
data = value >> 3
if oldData != data:
now = time.time()
print "%3d %s" % (int((now - startTime) * 1000), int2bin(data, 6)),
startTime = now
oldData = data
print
if __name__ == '__main__':
try:
port = sys.argv[1]
except IndexError:
port = '/dev/ttyUSB0'
try:
dump(port)
except KeyboardInterrupt:
print ""
sys.exit(0)
|
Add receiver dump program, output of firmware
|
Add receiver dump program, output of firmware
|
Python
|
unlicense
|
laneboysrc/hk310-expansion,laneboysrc/hk310-expansion,laneboysrc/hk310-expansion
|
Add receiver dump program, output of firmware
|
#******************************************************************************
#
# receiver-dump.py
#
#******************************************************************************
#
# Author: Werner Lane
# E-mail: laneboysrc@gmail.com
#
#******************************************************************************
import serial
import sys
import time
jitterString0x07 = [
'|* .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. *|'
]
jitterString0x0f = [
'|* .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. *|'
]
def int2bin(n, count=32):
"""returns the binary of integer n, using count number of digits"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def dump(port):
try:
s = serial.Serial(port, 38400)
except serial.SerialException, e:
print("Unable to open port %s.\nError message: %s" % (port, e))
sys.exit(0)
oldValue = 0
oldData = 0
startTime = time.time()
bigDiff = False
while True:
c = s.read(1)
numString = ""
while c != "\n":
numString = numString + c
c = s.read(1)
try:
value = int(numString, 10)
except ValueError:
value = 0
if abs(oldValue - value) > 4:
#print abs(oldValue - value)
if bigDiff or abs(oldValue - value) > 32:
oldValue = value
bigDiff = False
else:
bigDiff = True
#print "GLITCH", abs(oldValue - value)
else:
oldValue = value
#print "%s %d" % (int2bin(value >> 3, 7), value & 0x7)
jitter = value & 0x7
print jitterString0x07[jitter],
data = value >> 3
if oldData != data:
now = time.time()
print "%3d %s" % (int((now - startTime) * 1000), int2bin(data, 6)),
startTime = now
oldData = data
print
if __name__ == '__main__':
try:
port = sys.argv[1]
except IndexError:
port = '/dev/ttyUSB0'
try:
dump(port)
except KeyboardInterrupt:
print ""
sys.exit(0)
|
<commit_before><commit_msg>Add receiver dump program, output of firmware<commit_after>
|
#******************************************************************************
#
# receiver-dump.py
#
#******************************************************************************
#
# Author: Werner Lane
# E-mail: laneboysrc@gmail.com
#
#******************************************************************************
import serial
import sys
import time
jitterString0x07 = [
'|* .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. *|'
]
jitterString0x0f = [
'|* .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. *|'
]
def int2bin(n, count=32):
"""returns the binary of integer n, using count number of digits"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def dump(port):
try:
s = serial.Serial(port, 38400)
except serial.SerialException, e:
print("Unable to open port %s.\nError message: %s" % (port, e))
sys.exit(0)
oldValue = 0
oldData = 0
startTime = time.time()
bigDiff = False
while True:
c = s.read(1)
numString = ""
while c != "\n":
numString = numString + c
c = s.read(1)
try:
value = int(numString, 10)
except ValueError:
value = 0
if abs(oldValue - value) > 4:
#print abs(oldValue - value)
if bigDiff or abs(oldValue - value) > 32:
oldValue = value
bigDiff = False
else:
bigDiff = True
#print "GLITCH", abs(oldValue - value)
else:
oldValue = value
#print "%s %d" % (int2bin(value >> 3, 7), value & 0x7)
jitter = value & 0x7
print jitterString0x07[jitter],
data = value >> 3
if oldData != data:
now = time.time()
print "%3d %s" % (int((now - startTime) * 1000), int2bin(data, 6)),
startTime = now
oldData = data
print
if __name__ == '__main__':
try:
port = sys.argv[1]
except IndexError:
port = '/dev/ttyUSB0'
try:
dump(port)
except KeyboardInterrupt:
print ""
sys.exit(0)
|
Add receiver dump program, output of firmware#******************************************************************************
#
# receiver-dump.py
#
#******************************************************************************
#
# Author: Werner Lane
# E-mail: laneboysrc@gmail.com
#
#******************************************************************************
import serial
import sys
import time
jitterString0x07 = [
'|* .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. *|'
]
jitterString0x0f = [
'|* .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. *|'
]
def int2bin(n, count=32):
"""returns the binary of integer n, using count number of digits"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def dump(port):
try:
s = serial.Serial(port, 38400)
except serial.SerialException, e:
print("Unable to open port %s.\nError message: %s" % (port, e))
sys.exit(0)
oldValue = 0
oldData = 0
startTime = time.time()
bigDiff = False
while True:
c = s.read(1)
numString = ""
while c != "\n":
numString = numString + c
c = s.read(1)
try:
value = int(numString, 10)
except ValueError:
value = 0
if abs(oldValue - value) > 4:
#print abs(oldValue - value)
if bigDiff or abs(oldValue - value) > 32:
oldValue = value
bigDiff = False
else:
bigDiff = True
#print "GLITCH", abs(oldValue - value)
else:
oldValue = value
#print "%s %d" % (int2bin(value >> 3, 7), value & 0x7)
jitter = value & 0x7
print jitterString0x07[jitter],
data = value >> 3
if oldData != data:
now = time.time()
print "%3d %s" % (int((now - startTime) * 1000), int2bin(data, 6)),
startTime = now
oldData = data
print
if __name__ == '__main__':
try:
port = sys.argv[1]
except IndexError:
port = '/dev/ttyUSB0'
try:
dump(port)
except KeyboardInterrupt:
print ""
sys.exit(0)
|
<commit_before><commit_msg>Add receiver dump program, output of firmware<commit_after>#******************************************************************************
#
# receiver-dump.py
#
#******************************************************************************
#
# Author: Werner Lane
# E-mail: laneboysrc@gmail.com
#
#******************************************************************************
import serial
import sys
import time
jitterString0x07 = [
'|* .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. *|'
]
jitterString0x0f = [
'|* .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| * .. |',
'| *.. |',
'| *. |',
'| .* |',
'| ..* |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. * |',
'| .. *|'
]
def int2bin(n, count=32):
"""returns the binary of integer n, using count number of digits"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def dump(port):
try:
s = serial.Serial(port, 38400)
except serial.SerialException, e:
print("Unable to open port %s.\nError message: %s" % (port, e))
sys.exit(0)
oldValue = 0
oldData = 0
startTime = time.time()
bigDiff = False
while True:
c = s.read(1)
numString = ""
while c != "\n":
numString = numString + c
c = s.read(1)
try:
value = int(numString, 10)
except ValueError:
value = 0
if abs(oldValue - value) > 4:
#print abs(oldValue - value)
if bigDiff or abs(oldValue - value) > 32:
oldValue = value
bigDiff = False
else:
bigDiff = True
#print "GLITCH", abs(oldValue - value)
else:
oldValue = value
#print "%s %d" % (int2bin(value >> 3, 7), value & 0x7)
jitter = value & 0x7
print jitterString0x07[jitter],
data = value >> 3
if oldData != data:
now = time.time()
print "%3d %s" % (int((now - startTime) * 1000), int2bin(data, 6)),
startTime = now
oldData = data
print
if __name__ == '__main__':
try:
port = sys.argv[1]
except IndexError:
port = '/dev/ttyUSB0'
try:
dump(port)
except KeyboardInterrupt:
print ""
sys.exit(0)
|
|
678ba5e332b7c54cdb39d117dc443bf88da7d111
|
jsontosql.py
|
jsontosql.py
|
import os
import os.path
from json import loads
import click
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class JSONToSQL(object):
def __init__(self, json, user, password, database):
self.data = loads(json.read())
self.db = VendCrawlerDB(user, password, database)
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in data:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
@click.command()
@click.argument('json', type=click.File('r'))
@click.argument('user')
@click.argument('password')
@click.argument('database')
def cli(json, user, password, database):
JSONToSQL(json, user, password, database)
if __name__ == '__main__':
cli()
|
Add json to sql conversion script.
|
Add json to sql conversion script.
|
Python
|
mit
|
josetaas/vendcrawler,josetaas/vendcrawler,josetaas/vendcrawler
|
Add json to sql conversion script.
|
import os
import os.path
from json import loads
import click
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class JSONToSQL(object):
def __init__(self, json, user, password, database):
self.data = loads(json.read())
self.db = VendCrawlerDB(user, password, database)
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in data:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
@click.command()
@click.argument('json', type=click.File('r'))
@click.argument('user')
@click.argument('password')
@click.argument('database')
def cli(json, user, password, database):
JSONToSQL(json, user, password, database)
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add json to sql conversion script.<commit_after>
|
import os
import os.path
from json import loads
import click
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class JSONToSQL(object):
def __init__(self, json, user, password, database):
self.data = loads(json.read())
self.db = VendCrawlerDB(user, password, database)
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in data:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
@click.command()
@click.argument('json', type=click.File('r'))
@click.argument('user')
@click.argument('password')
@click.argument('database')
def cli(json, user, password, database):
JSONToSQL(json, user, password, database)
if __name__ == '__main__':
cli()
|
Add json to sql conversion script.import os
import os.path
from json import loads
import click
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class JSONToSQL(object):
def __init__(self, json, user, password, database):
self.data = loads(json.read())
self.db = VendCrawlerDB(user, password, database)
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in data:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
@click.command()
@click.argument('json', type=click.File('r'))
@click.argument('user')
@click.argument('password')
@click.argument('database')
def cli(json, user, password, database):
JSONToSQL(json, user, password, database)
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add json to sql conversion script.<commit_after>import os
import os.path
from json import loads
import click
from vendcrawler.scripts.vendcrawlerdb import VendCrawlerDB
class JSONToSQL(object):
def __init__(self, json, user, password, database):
self.data = loads(json.read())
self.db = VendCrawlerDB(user, password, database)
table = 'items'
columns = ['item_id', 'item_name', 'vendor_id', 'shop_name',
'amount', 'price', 'map', 'datetime']
values = []
for items in data:
for item in items:
value = [int(item['id']),
item['name'],
int(item['vendor_id']),
item['shop'],
int(item['amount'].replace(',', '')),
int(item['price'].replace(',', '')),
item['map'],
item['datetime']]
values.append(value)
self.vcdb.insert(table, columns, values)
@click.command()
@click.argument('json', type=click.File('r'))
@click.argument('user')
@click.argument('password')
@click.argument('database')
def cli(json, user, password, database):
JSONToSQL(json, user, password, database)
if __name__ == '__main__':
cli()
|
|
b0db73209cbe29bf070581fd81afc6fbf86af718
|
skan/draw.py
|
skan/draw.py
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from skimage.color import gray2rgb
from .csr import summarise
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def overlay_skeleton_2d(image, skeleton, *,
image_cmap=None, color=(1, 0, 0), alpha=1, axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1 - alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(image, skeleton, *,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
summary = summarise(skeleton)
coords_cols = (['img-coord-0-%i' % i for i in range(2)] +
['img-coord-1-%i' % i for i in range(2)])
coords = summary[coords_cols]
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(skeleton_colormap,
min(len(np.unique(color_values)), 256))
colormapped = cmap(color_values)
for ((r0, c0, r1, c1), color) in zip(coords.iterrows(),
colormapped):
axes.plot([c0, c1], [r0, r1], color=color, marker=None)
return axes
|
Add first draft of plotting functions
|
Add first draft of plotting functions
|
Python
|
bsd-3-clause
|
jni/skan
|
Add first draft of plotting functions
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from skimage.color import gray2rgb
from .csr import summarise
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def overlay_skeleton_2d(image, skeleton, *,
image_cmap=None, color=(1, 0, 0), alpha=1, axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1 - alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(image, skeleton, *,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
summary = summarise(skeleton)
coords_cols = (['img-coord-0-%i' % i for i in range(2)] +
['img-coord-1-%i' % i for i in range(2)])
coords = summary[coords_cols]
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(skeleton_colormap,
min(len(np.unique(color_values)), 256))
colormapped = cmap(color_values)
for ((r0, c0, r1, c1), color) in zip(coords.iterrows(),
colormapped):
axes.plot([c0, c1], [r0, r1], color=color, marker=None)
return axes
|
<commit_before><commit_msg>Add first draft of plotting functions<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from skimage.color import gray2rgb
from .csr import summarise
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def overlay_skeleton_2d(image, skeleton, *,
image_cmap=None, color=(1, 0, 0), alpha=1, axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1 - alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(image, skeleton, *,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
summary = summarise(skeleton)
coords_cols = (['img-coord-0-%i' % i for i in range(2)] +
['img-coord-1-%i' % i for i in range(2)])
coords = summary[coords_cols]
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(skeleton_colormap,
min(len(np.unique(color_values)), 256))
colormapped = cmap(color_values)
for ((r0, c0, r1, c1), color) in zip(coords.iterrows(),
colormapped):
axes.plot([c0, c1], [r0, r1], color=color, marker=None)
return axes
|
Add first draft of plotting functionsimport numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from skimage.color import gray2rgb
from .csr import summarise
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def overlay_skeleton_2d(image, skeleton, *,
image_cmap=None, color=(1, 0, 0), alpha=1, axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1 - alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(image, skeleton, *,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
summary = summarise(skeleton)
coords_cols = (['img-coord-0-%i' % i for i in range(2)] +
['img-coord-1-%i' % i for i in range(2)])
coords = summary[coords_cols]
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(skeleton_colormap,
min(len(np.unique(color_values)), 256))
colormapped = cmap(color_values)
for ((r0, c0, r1, c1), color) in zip(coords.iterrows(),
colormapped):
axes.plot([c0, c1], [r0, r1], color=color, marker=None)
return axes
|
<commit_before><commit_msg>Add first draft of plotting functions<commit_after>import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from skimage.color import gray2rgb
from .csr import summarise
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def overlay_skeleton_2d(image, skeleton, *,
image_cmap=None, color=(1, 0, 0), alpha=1, axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1 - alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(image, skeleton, *,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None):
image = _normalise_image(image, image_cmap=image_cmap)
summary = summarise(skeleton)
coords_cols = (['img-coord-0-%i' % i for i in range(2)] +
['img-coord-1-%i' % i for i in range(2)])
coords = summary[coords_cols]
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(skeleton_colormap,
min(len(np.unique(color_values)), 256))
colormapped = cmap(color_values)
for ((r0, c0, r1, c1), color) in zip(coords.iterrows(),
colormapped):
axes.plot([c0, c1], [r0, r1], color=color, marker=None)
return axes
|
|
7174061ac5ae2453c3369c7bb7dd01e45ee36092
|
scripts/pseudo_users_and_groups_operations.py
|
scripts/pseudo_users_and_groups_operations.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
parser = argparse.ArgumentParser(
description=\
"""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Например можно запустить так:\n "+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help="префикс, по которому находятся файлы с отображениями пользователей"
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users/",
help="префикс, по которому находятся каталоги пользователей псевдокластера"
)
args=parser.parse_args()
user_group_map=dict()
file_descr=open(file_system_prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(file_system_prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
#os.system()
print command_line
|
Create pseudo_users and pseudo_groups. In future it will remove pseudo users and pseudo groups too.
|
Create pseudo_users and pseudo_groups.
In future it will remove pseudo users and pseudo groups too.
|
Python
|
lgpl-2.1
|
pseudo-cluster/pseudo-cluster,pseudo-cluster/pseudo-cluster
|
Create pseudo_users and pseudo_groups.
In future it will remove pseudo users and pseudo groups too.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
parser = argparse.ArgumentParser(
description=\
"""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Например можно запустить так:\n "+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help="префикс, по которому находятся файлы с отображениями пользователей"
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users/",
help="префикс, по которому находятся каталоги пользователей псевдокластера"
)
args=parser.parse_args()
user_group_map=dict()
file_descr=open(file_system_prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(file_system_prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
#os.system()
print command_line
|
<commit_before><commit_msg>Create pseudo_users and pseudo_groups.
In future it will remove pseudo users and pseudo groups too.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
parser = argparse.ArgumentParser(
description=\
"""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Например можно запустить так:\n "+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help="префикс, по которому находятся файлы с отображениями пользователей"
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users/",
help="префикс, по которому находятся каталоги пользователей псевдокластера"
)
args=parser.parse_args()
user_group_map=dict()
file_descr=open(file_system_prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(file_system_prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
#os.system()
print command_line
|
Create pseudo_users and pseudo_groups.
In future it will remove pseudo users and pseudo groups too.#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
parser = argparse.ArgumentParser(
description=\
"""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Например можно запустить так:\n "+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help="префикс, по которому находятся файлы с отображениями пользователей"
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users/",
help="префикс, по которому находятся каталоги пользователей псевдокластера"
)
args=parser.parse_args()
user_group_map=dict()
file_descr=open(file_system_prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(file_system_prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
#os.system()
print command_line
|
<commit_before><commit_msg>Create pseudo_users and pseudo_groups.
In future it will remove pseudo users and pseudo groups too.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
parser = argparse.ArgumentParser(
description=\
"""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Например можно запустить так:\n "+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help="префикс, по которому находятся файлы с отображениями пользователей"
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users/",
help="префикс, по которому находятся каталоги пользователей псевдокластера"
)
args=parser.parse_args()
user_group_map=dict()
file_descr=open(file_system_prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(file_system_prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
#os.system()
print command_line
|
|
54c1fe598d4a8d76ae7ec0ce867f45af056c12e6
|
Lab/10/Template_10_A.py
|
Lab/10/Template_10_A.py
|
palindrom (kata) :
#TODO : implementasi fungsi palindrom untuk menyelesaikan soal palindrom
checkActiveAmoeba (hariKe, amoebaAktif) :
#TODO : implementasi fungsi ini untuk menyelesaikan soal Amoeba
kataPalindrom = input("Masukkan kata : ")
#TODO : cetak keluaran sesuai permintaan soal
hari = int(input("Cek amoeba aktif pada hari ke : "))
#TODO : cetak keluaran sesuai permintaan soal
"""
Soal AMOEBA
Test Case 1 :
Hari ke : 3
>> 32
Hari ke : 5
>> 512
Untuk hariKe <= 0 :
cetak "ERROR"
"""
|
Add template for lab 10 class A
|
Add template for lab 10 class A
|
Python
|
mit
|
laymonage/TarungLab,giovanism/TarungLab
|
Add template for lab 10 class A
|
palindrom (kata) :
#TODO : implementasi fungsi palindrom untuk menyelesaikan soal palindrom
checkActiveAmoeba (hariKe, amoebaAktif) :
#TODO : implementasi fungsi ini untuk menyelesaikan soal Amoeba
kataPalindrom = input("Masukkan kata : ")
#TODO : cetak keluaran sesuai permintaan soal
hari = int(input("Cek amoeba aktif pada hari ke : "))
#TODO : cetak keluaran sesuai permintaan soal
"""
Soal AMOEBA
Test Case 1 :
Hari ke : 3
>> 32
Hari ke : 5
>> 512
Untuk hariKe <= 0 :
cetak "ERROR"
"""
|
<commit_before><commit_msg>Add template for lab 10 class A<commit_after>
|
palindrom (kata) :
#TODO : implementasi fungsi palindrom untuk menyelesaikan soal palindrom
checkActiveAmoeba (hariKe, amoebaAktif) :
#TODO : implementasi fungsi ini untuk menyelesaikan soal Amoeba
kataPalindrom = input("Masukkan kata : ")
#TODO : cetak keluaran sesuai permintaan soal
hari = int(input("Cek amoeba aktif pada hari ke : "))
#TODO : cetak keluaran sesuai permintaan soal
"""
Soal AMOEBA
Test Case 1 :
Hari ke : 3
>> 32
Hari ke : 5
>> 512
Untuk hariKe <= 0 :
cetak "ERROR"
"""
|
Add template for lab 10 class Apalindrom (kata) :
#TODO : implementasi fungsi palindrom untuk menyelesaikan soal palindrom
checkActiveAmoeba (hariKe, amoebaAktif) :
#TODO : implementasi fungsi ini untuk menyelesaikan soal Amoeba
kataPalindrom = input("Masukkan kata : ")
#TODO : cetak keluaran sesuai permintaan soal
hari = int(input("Cek amoeba aktif pada hari ke : "))
#TODO : cetak keluaran sesuai permintaan soal
"""
Soal AMOEBA
Test Case 1 :
Hari ke : 3
>> 32
Hari ke : 5
>> 512
Untuk hariKe <= 0 :
cetak "ERROR"
"""
|
<commit_before><commit_msg>Add template for lab 10 class A<commit_after>palindrom (kata) :
#TODO : implementasi fungsi palindrom untuk menyelesaikan soal palindrom
checkActiveAmoeba (hariKe, amoebaAktif) :
#TODO : implementasi fungsi ini untuk menyelesaikan soal Amoeba
kataPalindrom = input("Masukkan kata : ")
#TODO : cetak keluaran sesuai permintaan soal
hari = int(input("Cek amoeba aktif pada hari ke : "))
#TODO : cetak keluaran sesuai permintaan soal
"""
Soal AMOEBA
Test Case 1 :
Hari ke : 3
>> 32
Hari ke : 5
>> 512
Untuk hariKe <= 0 :
cetak "ERROR"
"""
|
|
254930d317ab61bc85169d8c64e2c6eab2d48f5f
|
tests/unit/cloudant_t/design_doc_test.py
|
tests/unit/cloudant_t/design_doc_test.py
|
#!/usr/bin/env python
"""
_design_doc_test_
"""
import mock
import unittest
from cloudant.design_document import DesignDocument
from cloudant.document import Document
class DesignDocTests(unittest.TestCase):
"""
tests for design doc object
"""
@mock.patch.object(Document, 'fetch')
def test_design_doc(self, mock_fetch):
"""test overridden methods work as expected"""
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1' : {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.fetch()
self.failUnless(mock_fetch.called)
views = [ x for _,x in ddoc.iterviews() ]
self.assertEqual(len(views), 1)
view = views[0]
self.failUnless('view1' in view)
self.assertEqual(view['view1']['map'], 'MAP')
self.assertEqual(view['view1']['reduce'], 'REDUCE')
self.failUnless('view1' in ddoc.views)
def test_ddoc_add_view(self):
mock_database = mock.Mock()
with mock.patch('cloudant.design_document.DesignDocument.save') as mock_save:
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.add_view('view2', "MAP2")
self.failUnless('view2' in ddoc['views'])
self.assertEqual(ddoc['views']['view2'].map, 'MAP2')
self.assertEqual(ddoc['views']['view2'].reduce, None)
self.failUnless(mock_save.called)
def test_list_views(self):
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'},
'view2': {'map': "MAP", 'reduce': 'REDUCE'},
}
self.assertEqual(ddoc.list_views(), ['view1', 'view2'])
if __name__ == '__main__':
unittest.main()
|
Copy DesignDocument tests to their own module
|
Copy DesignDocument tests to their own module
|
Python
|
apache-2.0
|
cloudant/python-cloudant
|
Copy DesignDocument tests to their own module
|
#!/usr/bin/env python
"""
_design_doc_test_
"""
import mock
import unittest
from cloudant.design_document import DesignDocument
from cloudant.document import Document
class DesignDocTests(unittest.TestCase):
"""
tests for design doc object
"""
@mock.patch.object(Document, 'fetch')
def test_design_doc(self, mock_fetch):
"""test overridden methods work as expected"""
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1' : {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.fetch()
self.failUnless(mock_fetch.called)
views = [ x for _,x in ddoc.iterviews() ]
self.assertEqual(len(views), 1)
view = views[0]
self.failUnless('view1' in view)
self.assertEqual(view['view1']['map'], 'MAP')
self.assertEqual(view['view1']['reduce'], 'REDUCE')
self.failUnless('view1' in ddoc.views)
def test_ddoc_add_view(self):
mock_database = mock.Mock()
with mock.patch('cloudant.design_document.DesignDocument.save') as mock_save:
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.add_view('view2', "MAP2")
self.failUnless('view2' in ddoc['views'])
self.assertEqual(ddoc['views']['view2'].map, 'MAP2')
self.assertEqual(ddoc['views']['view2'].reduce, None)
self.failUnless(mock_save.called)
def test_list_views(self):
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'},
'view2': {'map': "MAP", 'reduce': 'REDUCE'},
}
self.assertEqual(ddoc.list_views(), ['view1', 'view2'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Copy DesignDocument tests to their own module<commit_after>
|
#!/usr/bin/env python
"""
_design_doc_test_
"""
import mock
import unittest
from cloudant.design_document import DesignDocument
from cloudant.document import Document
class DesignDocTests(unittest.TestCase):
"""
tests for design doc object
"""
@mock.patch.object(Document, 'fetch')
def test_design_doc(self, mock_fetch):
"""test overridden methods work as expected"""
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1' : {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.fetch()
self.failUnless(mock_fetch.called)
views = [ x for _,x in ddoc.iterviews() ]
self.assertEqual(len(views), 1)
view = views[0]
self.failUnless('view1' in view)
self.assertEqual(view['view1']['map'], 'MAP')
self.assertEqual(view['view1']['reduce'], 'REDUCE')
self.failUnless('view1' in ddoc.views)
def test_ddoc_add_view(self):
mock_database = mock.Mock()
with mock.patch('cloudant.design_document.DesignDocument.save') as mock_save:
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.add_view('view2', "MAP2")
self.failUnless('view2' in ddoc['views'])
self.assertEqual(ddoc['views']['view2'].map, 'MAP2')
self.assertEqual(ddoc['views']['view2'].reduce, None)
self.failUnless(mock_save.called)
def test_list_views(self):
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'},
'view2': {'map': "MAP", 'reduce': 'REDUCE'},
}
self.assertEqual(ddoc.list_views(), ['view1', 'view2'])
if __name__ == '__main__':
unittest.main()
|
Copy DesignDocument tests to their own module#!/usr/bin/env python
"""
_design_doc_test_
"""
import mock
import unittest
from cloudant.design_document import DesignDocument
from cloudant.document import Document
class DesignDocTests(unittest.TestCase):
"""
tests for design doc object
"""
@mock.patch.object(Document, 'fetch')
def test_design_doc(self, mock_fetch):
"""test overridden methods work as expected"""
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1' : {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.fetch()
self.failUnless(mock_fetch.called)
views = [ x for _,x in ddoc.iterviews() ]
self.assertEqual(len(views), 1)
view = views[0]
self.failUnless('view1' in view)
self.assertEqual(view['view1']['map'], 'MAP')
self.assertEqual(view['view1']['reduce'], 'REDUCE')
self.failUnless('view1' in ddoc.views)
def test_ddoc_add_view(self):
mock_database = mock.Mock()
with mock.patch('cloudant.design_document.DesignDocument.save') as mock_save:
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.add_view('view2', "MAP2")
self.failUnless('view2' in ddoc['views'])
self.assertEqual(ddoc['views']['view2'].map, 'MAP2')
self.assertEqual(ddoc['views']['view2'].reduce, None)
self.failUnless(mock_save.called)
def test_list_views(self):
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'},
'view2': {'map': "MAP", 'reduce': 'REDUCE'},
}
self.assertEqual(ddoc.list_views(), ['view1', 'view2'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Copy DesignDocument tests to their own module<commit_after>#!/usr/bin/env python
"""
_design_doc_test_
"""
import mock
import unittest
from cloudant.design_document import DesignDocument
from cloudant.document import Document
class DesignDocTests(unittest.TestCase):
"""
tests for design doc object
"""
@mock.patch.object(Document, 'fetch')
def test_design_doc(self, mock_fetch):
"""test overridden methods work as expected"""
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1' : {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.fetch()
self.failUnless(mock_fetch.called)
views = [ x for _,x in ddoc.iterviews() ]
self.assertEqual(len(views), 1)
view = views[0]
self.failUnless('view1' in view)
self.assertEqual(view['view1']['map'], 'MAP')
self.assertEqual(view['view1']['reduce'], 'REDUCE')
self.failUnless('view1' in ddoc.views)
def test_ddoc_add_view(self):
mock_database = mock.Mock()
with mock.patch('cloudant.design_document.DesignDocument.save') as mock_save:
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'}
}
ddoc.add_view('view2', "MAP2")
self.failUnless('view2' in ddoc['views'])
self.assertEqual(ddoc['views']['view2'].map, 'MAP2')
self.assertEqual(ddoc['views']['view2'].reduce, None)
self.failUnless(mock_save.called)
def test_list_views(self):
mock_database = mock.Mock()
ddoc = DesignDocument(mock_database, '_design/unittest')
ddoc['views'] = {
'view1': {'map': "MAP", 'reduce': 'REDUCE'},
'view2': {'map': "MAP", 'reduce': 'REDUCE'},
}
self.assertEqual(ddoc.list_views(), ['view1', 'view2'])
if __name__ == '__main__':
unittest.main()
|
|
37dd523cfb5a26cb33493ba29f99d8a97a8401e4
|
inst/py/phyloxml_from_msa.py
|
inst/py/phyloxml_from_msa.py
|
"""Generate a phylo.xml from a MUSCLE MSA.fasta"""
import argparse
##### PARSE ARGUMENTS #####
argparser = argparse.ArgumentParser()
argparser.add_argument("msa", help="path to the MUSCLE MSA.fasta")
argparser.add_argument("phyloxml", help="path to an output phylo.xml")
args = argparser.parse_args()
args = vars(args) # Give access to arguments using a dict: e.g. args["msa"]
##### END PARSE ARGUMENTS #####
def phyloxml_from_msa(msa, phyloxml):
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
ms_alignment = AlignIO.read(msa, "fasta")
calculator = DistanceCalculator("ident")
dist_matrix = calculator.get_distance(ms_alignment)
constructor = DistanceTreeConstructor()
tree = constructor.upgma(dist_matrix)
Phylo.write(tree, phyloxml, "phyloxml")
if __name__ == "__main__":
msa = args["msa"]
phyloxml = args["phyloxml"]
phyloxml_from_msa(msa, phyloxml)
|
Add Python function to build phylo.xml from MSA with Biopython
|
Add Python function to build phylo.xml from MSA with Biopython
|
Python
|
bsd-2-clause
|
daniel0128/receptormarker,nsh87/receptormarker,nsh87/receptormarker,daniel0128/receptormarker,daniel0128/receptormarker,nsh87/receptormarker,nsh87/receptormarker,catterbu/receptormarker,daniel0128/receptormarker,daniel0128/receptormarker,catterbu/receptormarker,daniel0128/receptormarker,catterbu/receptormarker,catterbu/receptormarker,daniel0128/receptormarker,nsh87/receptormarker,catterbu/receptormarker
|
Add Python function to build phylo.xml from MSA with Biopython
|
"""Generate a phylo.xml from a MUSCLE MSA.fasta"""
import argparse
##### PARSE ARGUMENTS #####
argparser = argparse.ArgumentParser()
argparser.add_argument("msa", help="path to the MUSCLE MSA.fasta")
argparser.add_argument("phyloxml", help="path to an output phylo.xml")
args = argparser.parse_args()
args = vars(args) # Give access to arguments using a dict: e.g. args["msa"]
##### END PARSE ARGUMENTS #####
def phyloxml_from_msa(msa, phyloxml):
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
ms_alignment = AlignIO.read(msa, "fasta")
calculator = DistanceCalculator("ident")
dist_matrix = calculator.get_distance(ms_alignment)
constructor = DistanceTreeConstructor()
tree = constructor.upgma(dist_matrix)
Phylo.write(tree, phyloxml, "phyloxml")
if __name__ == "__main__":
msa = args["msa"]
phyloxml = args["phyloxml"]
phyloxml_from_msa(msa, phyloxml)
|
<commit_before><commit_msg>Add Python function to build phylo.xml from MSA with Biopython<commit_after>
|
"""Generate a phylo.xml from a MUSCLE MSA.fasta"""
import argparse
##### PARSE ARGUMENTS #####
argparser = argparse.ArgumentParser()
argparser.add_argument("msa", help="path to the MUSCLE MSA.fasta")
argparser.add_argument("phyloxml", help="path to an output phylo.xml")
args = argparser.parse_args()
args = vars(args) # Give access to arguments using a dict: e.g. args["msa"]
##### END PARSE ARGUMENTS #####
def phyloxml_from_msa(msa, phyloxml):
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
ms_alignment = AlignIO.read(msa, "fasta")
calculator = DistanceCalculator("ident")
dist_matrix = calculator.get_distance(ms_alignment)
constructor = DistanceTreeConstructor()
tree = constructor.upgma(dist_matrix)
Phylo.write(tree, phyloxml, "phyloxml")
if __name__ == "__main__":
msa = args["msa"]
phyloxml = args["phyloxml"]
phyloxml_from_msa(msa, phyloxml)
|
Add Python function to build phylo.xml from MSA with Biopython"""Generate a phylo.xml from a MUSCLE MSA.fasta"""
import argparse
##### PARSE ARGUMENTS #####
argparser = argparse.ArgumentParser()
argparser.add_argument("msa", help="path to the MUSCLE MSA.fasta")
argparser.add_argument("phyloxml", help="path to an output phylo.xml")
args = argparser.parse_args()
args = vars(args) # Give access to arguments using a dict: e.g. args["msa"]
##### END PARSE ARGUMENTS #####
def phyloxml_from_msa(msa, phyloxml):
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
ms_alignment = AlignIO.read(msa, "fasta")
calculator = DistanceCalculator("ident")
dist_matrix = calculator.get_distance(ms_alignment)
constructor = DistanceTreeConstructor()
tree = constructor.upgma(dist_matrix)
Phylo.write(tree, phyloxml, "phyloxml")
if __name__ == "__main__":
msa = args["msa"]
phyloxml = args["phyloxml"]
phyloxml_from_msa(msa, phyloxml)
|
<commit_before><commit_msg>Add Python function to build phylo.xml from MSA with Biopython<commit_after>"""Generate a phylo.xml from a MUSCLE MSA.fasta"""
import argparse
##### PARSE ARGUMENTS #####
argparser = argparse.ArgumentParser()
argparser.add_argument("msa", help="path to the MUSCLE MSA.fasta")
argparser.add_argument("phyloxml", help="path to an output phylo.xml")
args = argparser.parse_args()
args = vars(args) # Give access to arguments using a dict: e.g. args["msa"]
##### END PARSE ARGUMENTS #####
def phyloxml_from_msa(msa, phyloxml):
from Bio import AlignIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
ms_alignment = AlignIO.read(msa, "fasta")
calculator = DistanceCalculator("ident")
dist_matrix = calculator.get_distance(ms_alignment)
constructor = DistanceTreeConstructor()
tree = constructor.upgma(dist_matrix)
Phylo.write(tree, phyloxml, "phyloxml")
if __name__ == "__main__":
msa = args["msa"]
phyloxml = args["phyloxml"]
phyloxml_from_msa(msa, phyloxml)
|
|
d83be93d707ead0563f2eefc71b1f3d90e20df4a
|
glowing-lines.py
|
glowing-lines.py
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
COLORS = []
def get_origin_point():
return [random.randint(0, W-1), random.randint(0, W-1)]
def get_vector():
return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)]
def draw_one_line(draw):
op = get_origin_point()
vec = get_vector()
tu = tuple(op + vec)
for i in range(NCOLORS):
draw.line(tu, fill=COLORS[i], width=NCOLORS-i)
def draw_lines(draw):
for i in range(30):
draw_one_line(draw)
def init_colors(ncolors):
v = 255.0
for i in range(ncolors):
COLORS.append((0, int(v), 0))
v *= 0.80
COLORS.reverse()
init_colors(NCOLORS)
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw_lines(draw)
im.save('f.png')
|
Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additive
|
Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additive
|
Python
|
mit
|
redpig2/pilhacks
|
Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additive
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
COLORS = []
def get_origin_point():
return [random.randint(0, W-1), random.randint(0, W-1)]
def get_vector():
return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)]
def draw_one_line(draw):
op = get_origin_point()
vec = get_vector()
tu = tuple(op + vec)
for i in range(NCOLORS):
draw.line(tu, fill=COLORS[i], width=NCOLORS-i)
def draw_lines(draw):
for i in range(30):
draw_one_line(draw)
def init_colors(ncolors):
v = 255.0
for i in range(ncolors):
COLORS.append((0, int(v), 0))
v *= 0.80
COLORS.reverse()
init_colors(NCOLORS)
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw_lines(draw)
im.save('f.png')
|
<commit_before><commit_msg>Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additive<commit_after>
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
COLORS = []
def get_origin_point():
return [random.randint(0, W-1), random.randint(0, W-1)]
def get_vector():
return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)]
def draw_one_line(draw):
op = get_origin_point()
vec = get_vector()
tu = tuple(op + vec)
for i in range(NCOLORS):
draw.line(tu, fill=COLORS[i], width=NCOLORS-i)
def draw_lines(draw):
for i in range(30):
draw_one_line(draw)
def init_colors(ncolors):
v = 255.0
for i in range(ncolors):
COLORS.append((0, int(v), 0))
v *= 0.80
COLORS.reverse()
init_colors(NCOLORS)
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw_lines(draw)
im.save('f.png')
|
Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additivefrom PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
COLORS = []
def get_origin_point():
return [random.randint(0, W-1), random.randint(0, W-1)]
def get_vector():
return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)]
def draw_one_line(draw):
op = get_origin_point()
vec = get_vector()
tu = tuple(op + vec)
for i in range(NCOLORS):
draw.line(tu, fill=COLORS[i], width=NCOLORS-i)
def draw_lines(draw):
for i in range(30):
draw_one_line(draw)
def init_colors(ncolors):
v = 255.0
for i in range(ncolors):
COLORS.append((0, int(v), 0))
v *= 0.80
COLORS.reverse()
init_colors(NCOLORS)
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw_lines(draw)
im.save('f.png')
|
<commit_before><commit_msg>Add crude script to draw glowing lines; currently the dark part hides the bright pixels of other lines, making them look physical; should be additive<commit_after>from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
COLORS = []
def get_origin_point():
return [random.randint(0, W-1), random.randint(0, W-1)]
def get_vector():
return [random.randint(0.3*W, 0.6*W), random.randint(0.3*W, 0.6*W)]
def draw_one_line(draw):
op = get_origin_point()
vec = get_vector()
tu = tuple(op + vec)
for i in range(NCOLORS):
draw.line(tu, fill=COLORS[i], width=NCOLORS-i)
def draw_lines(draw):
for i in range(30):
draw_one_line(draw)
def init_colors(ncolors):
v = 255.0
for i in range(ncolors):
COLORS.append((0, int(v), 0))
v *= 0.80
COLORS.reverse()
init_colors(NCOLORS)
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw_lines(draw)
im.save('f.png')
|
|
d1de958daac3991bf673c4e5b10a0564a9a610d8
|
var/spack/packages/libmonitor/package.py
|
var/spack/packages/libmonitor/package.py
|
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmonitor(Package):
homepage = "http://hpctoolkit.org"
url = "file:///g/g0/legendre/tools/oss/openspeedshop-release-2.1/SOURCES/libmonitor-20130218.tar.gz"
version('20130218', 'aa85c2c580e2dafb823cc47b09374279')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
Add libmonitor to spack. Still needs svn support for checkout
|
Add libmonitor to spack. Still needs svn support for checkout
|
Python
|
lgpl-2.1
|
matthiasdiener/spack,mfherbst/spack,TheTimmy/spack,EmreAtes/spack,TheTimmy/spack,mfherbst/spack,EmreAtes/spack,tmerrick1/spack,skosukhin/spack,lgarren/spack,lgarren/spack,TheTimmy/spack,LLNL/spack,mfherbst/spack,skosukhin/spack,mfherbst/spack,TheTimmy/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,LLNL/spack,tmerrick1/spack,skosukhin/spack,iulian787/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,EmreAtes/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,tmerrick1/spack,EmreAtes/spack,skosukhin/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,LLNL/spack,lgarren/spack,krafczyk/spack,matthiasdiener/spack,TheTimmy/spack,tmerrick1/spack,krafczyk/spack,lgarren/spack,krafczyk/spack,lgarren/spack,iulian787/spack,skosukhin/spack,krafczyk/spack
|
Add libmonitor to spack. Still needs svn support for checkout
|
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmonitor(Package):
homepage = "http://hpctoolkit.org"
url = "file:///g/g0/legendre/tools/oss/openspeedshop-release-2.1/SOURCES/libmonitor-20130218.tar.gz"
version('20130218', 'aa85c2c580e2dafb823cc47b09374279')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
<commit_before><commit_msg>Add libmonitor to spack. Still needs svn support for checkout<commit_after>
|
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmonitor(Package):
homepage = "http://hpctoolkit.org"
url = "file:///g/g0/legendre/tools/oss/openspeedshop-release-2.1/SOURCES/libmonitor-20130218.tar.gz"
version('20130218', 'aa85c2c580e2dafb823cc47b09374279')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
Add libmonitor to spack. Still needs svn support for checkout##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmonitor(Package):
homepage = "http://hpctoolkit.org"
url = "file:///g/g0/legendre/tools/oss/openspeedshop-release-2.1/SOURCES/libmonitor-20130218.tar.gz"
version('20130218', 'aa85c2c580e2dafb823cc47b09374279')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
<commit_before><commit_msg>Add libmonitor to spack. Still needs svn support for checkout<commit_after>##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmonitor(Package):
homepage = "http://hpctoolkit.org"
url = "file:///g/g0/legendre/tools/oss/openspeedshop-release-2.1/SOURCES/libmonitor-20130218.tar.gz"
version('20130218', 'aa85c2c580e2dafb823cc47b09374279')
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
|
|
329438288f9bf031e806020b37e3b9260e9dc07b
|
scripts/create_nom_prenom_unifier.py
|
scripts/create_nom_prenom_unifier.py
|
# -*- coding: utf-8 -*-
import pandas as pd
from unidecode import unidecode
df = pd.DataFrame()
from utils import Fingerprinter
def nomprenomtwice(nom):
if not pd.isnull(nom):
anom = nom.split(' ')
if anom[len(anom)/2] != anom[0]:
return nom.upper()
return ' '.join(anom[0:len(anom)/2]).upper()
return nom
def get_fingerprint(string):
if not pd.isnull(string):
return Fingerprinter(string).get_fingerprint()
return string
for filename in ["dentistes.refined.csv",
"infirmiers.refined.csv",
"medecins_exploitables.refined.csv",
"medecins_inexploitables.refined.csv",
"pharmaciens.refined.csv",
"sagefemmes.refined.csv"]:
newdf = pd.read_csv("data/refined/%s" % filename, dtype=object, encoding='utf-8', usecols=["BENEF_PS_QUALITE_NOM_PRENOM"])
if df is None:
df = newdf
else:
df = df.append(newdf)
df["fingerprint"] = df.BENEF_PS_QUALITE_NOM_PRENOM.apply(nomprenomtwice).apply(get_fingerprint)
gp = df.groupby(["fingerprint"])
unifier = []
for group_name, rows in gp:
names = sorted(rows.BENEF_PS_QUALITE_NOM_PRENOM.unique(), lambda x,y: cmp(len(x), len(y)))
if len(names) > 1:
reference = names[0]
for name in names[1:]:
fixed_name = nomprenomtwice(unidecode(reference).strip().upper())
if len(fixed_name) > 0:
unifier.append(",".join([name.encode('utf-8'), fixed_name]))
print "\n".join(unifier)
|
Add script to generate a nom prenom unifier
|
Add script to generate a nom prenom unifier
|
Python
|
agpl-3.0
|
regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data
|
Add script to generate a nom prenom unifier
|
# -*- coding: utf-8 -*-
import pandas as pd
from unidecode import unidecode
df = pd.DataFrame()
from utils import Fingerprinter
def nomprenomtwice(nom):
if not pd.isnull(nom):
anom = nom.split(' ')
if anom[len(anom)/2] != anom[0]:
return nom.upper()
return ' '.join(anom[0:len(anom)/2]).upper()
return nom
def get_fingerprint(string):
if not pd.isnull(string):
return Fingerprinter(string).get_fingerprint()
return string
for filename in ["dentistes.refined.csv",
"infirmiers.refined.csv",
"medecins_exploitables.refined.csv",
"medecins_inexploitables.refined.csv",
"pharmaciens.refined.csv",
"sagefemmes.refined.csv"]:
newdf = pd.read_csv("data/refined/%s" % filename, dtype=object, encoding='utf-8', usecols=["BENEF_PS_QUALITE_NOM_PRENOM"])
if df is None:
df = newdf
else:
df = df.append(newdf)
df["fingerprint"] = df.BENEF_PS_QUALITE_NOM_PRENOM.apply(nomprenomtwice).apply(get_fingerprint)
gp = df.groupby(["fingerprint"])
unifier = []
for group_name, rows in gp:
names = sorted(rows.BENEF_PS_QUALITE_NOM_PRENOM.unique(), lambda x,y: cmp(len(x), len(y)))
if len(names) > 1:
reference = names[0]
for name in names[1:]:
fixed_name = nomprenomtwice(unidecode(reference).strip().upper())
if len(fixed_name) > 0:
unifier.append(",".join([name.encode('utf-8'), fixed_name]))
print "\n".join(unifier)
|
<commit_before><commit_msg>Add script to generate a nom prenom unifier<commit_after>
|
# -*- coding: utf-8 -*-
import pandas as pd
from unidecode import unidecode
df = pd.DataFrame()
from utils import Fingerprinter
def nomprenomtwice(nom):
if not pd.isnull(nom):
anom = nom.split(' ')
if anom[len(anom)/2] != anom[0]:
return nom.upper()
return ' '.join(anom[0:len(anom)/2]).upper()
return nom
def get_fingerprint(string):
if not pd.isnull(string):
return Fingerprinter(string).get_fingerprint()
return string
for filename in ["dentistes.refined.csv",
"infirmiers.refined.csv",
"medecins_exploitables.refined.csv",
"medecins_inexploitables.refined.csv",
"pharmaciens.refined.csv",
"sagefemmes.refined.csv"]:
newdf = pd.read_csv("data/refined/%s" % filename, dtype=object, encoding='utf-8', usecols=["BENEF_PS_QUALITE_NOM_PRENOM"])
if df is None:
df = newdf
else:
df = df.append(newdf)
df["fingerprint"] = df.BENEF_PS_QUALITE_NOM_PRENOM.apply(nomprenomtwice).apply(get_fingerprint)
gp = df.groupby(["fingerprint"])
unifier = []
for group_name, rows in gp:
names = sorted(rows.BENEF_PS_QUALITE_NOM_PRENOM.unique(), lambda x,y: cmp(len(x), len(y)))
if len(names) > 1:
reference = names[0]
for name in names[1:]:
fixed_name = nomprenomtwice(unidecode(reference).strip().upper())
if len(fixed_name) > 0:
unifier.append(",".join([name.encode('utf-8'), fixed_name]))
print "\n".join(unifier)
|
Add script to generate a nom prenom unifier# -*- coding: utf-8 -*-
import pandas as pd
from unidecode import unidecode
df = pd.DataFrame()
from utils import Fingerprinter
def nomprenomtwice(nom):
if not pd.isnull(nom):
anom = nom.split(' ')
if anom[len(anom)/2] != anom[0]:
return nom.upper()
return ' '.join(anom[0:len(anom)/2]).upper()
return nom
def get_fingerprint(string):
if not pd.isnull(string):
return Fingerprinter(string).get_fingerprint()
return string
for filename in ["dentistes.refined.csv",
"infirmiers.refined.csv",
"medecins_exploitables.refined.csv",
"medecins_inexploitables.refined.csv",
"pharmaciens.refined.csv",
"sagefemmes.refined.csv"]:
newdf = pd.read_csv("data/refined/%s" % filename, dtype=object, encoding='utf-8', usecols=["BENEF_PS_QUALITE_NOM_PRENOM"])
if df is None:
df = newdf
else:
df = df.append(newdf)
df["fingerprint"] = df.BENEF_PS_QUALITE_NOM_PRENOM.apply(nomprenomtwice).apply(get_fingerprint)
gp = df.groupby(["fingerprint"])
unifier = []
for group_name, rows in gp:
names = sorted(rows.BENEF_PS_QUALITE_NOM_PRENOM.unique(), lambda x,y: cmp(len(x), len(y)))
if len(names) > 1:
reference = names[0]
for name in names[1:]:
fixed_name = nomprenomtwice(unidecode(reference).strip().upper())
if len(fixed_name) > 0:
unifier.append(",".join([name.encode('utf-8'), fixed_name]))
print "\n".join(unifier)
|
<commit_before><commit_msg>Add script to generate a nom prenom unifier<commit_after># -*- coding: utf-8 -*-
import pandas as pd
from unidecode import unidecode
df = pd.DataFrame()
from utils import Fingerprinter
def nomprenomtwice(nom):
if not pd.isnull(nom):
anom = nom.split(' ')
if anom[len(anom)/2] != anom[0]:
return nom.upper()
return ' '.join(anom[0:len(anom)/2]).upper()
return nom
def get_fingerprint(string):
if not pd.isnull(string):
return Fingerprinter(string).get_fingerprint()
return string
for filename in ["dentistes.refined.csv",
"infirmiers.refined.csv",
"medecins_exploitables.refined.csv",
"medecins_inexploitables.refined.csv",
"pharmaciens.refined.csv",
"sagefemmes.refined.csv"]:
newdf = pd.read_csv("data/refined/%s" % filename, dtype=object, encoding='utf-8', usecols=["BENEF_PS_QUALITE_NOM_PRENOM"])
if df is None:
df = newdf
else:
df = df.append(newdf)
df["fingerprint"] = df.BENEF_PS_QUALITE_NOM_PRENOM.apply(nomprenomtwice).apply(get_fingerprint)
gp = df.groupby(["fingerprint"])
unifier = []
for group_name, rows in gp:
names = sorted(rows.BENEF_PS_QUALITE_NOM_PRENOM.unique(), lambda x,y: cmp(len(x), len(y)))
if len(names) > 1:
reference = names[0]
for name in names[1:]:
fixed_name = nomprenomtwice(unidecode(reference).strip().upper())
if len(fixed_name) > 0:
unifier.append(",".join([name.encode('utf-8'), fixed_name]))
print "\n".join(unifier)
|
|
9f30d2dd142116f46dd0257bcf093e8e8dc2c11a
|
wagtail/admin/tests/test_dismissibles.py
|
wagtail/admin/tests/test_dismissibles.py
|
from django.test import TestCase
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestDismissiblesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.url = reverse("wagtailadmin_dismissibles")
def test_get_initial(self):
response = self.client.get(self.url)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_patch_valid(self):
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
def test_patch_invalid(self):
response = self.client.patch(
self.url, data="invalid", content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_post(self):
# The view only accepts GET and PATCH
response = self.client.post(self.url, data={"foo": "bar"})
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 405)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_get_without_userprofile(self):
# GET should work even if the user doesn't have a UserProfile,
# but it shouldn't create one
self.profile.delete()
response = self.client.get(self.url)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertIsNone(getattr(self.user, "wagtail_userprofile", None))
def test_patch_without_userprofile(self):
# PATCH should work even if the user doesn't have a UserProfile,
# in which case it should create one
self.profile.delete()
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
|
Add tests for Dismissibles view
|
Add tests for Dismissibles view
|
Python
|
bsd-3-clause
|
rsalmaso/wagtail,thenewguy/wagtail,rsalmaso/wagtail,thenewguy/wagtail,rsalmaso/wagtail,wagtail/wagtail,wagtail/wagtail,rsalmaso/wagtail,zerolab/wagtail,wagtail/wagtail,zerolab/wagtail,zerolab/wagtail,thenewguy/wagtail,zerolab/wagtail,wagtail/wagtail,rsalmaso/wagtail,wagtail/wagtail,thenewguy/wagtail,zerolab/wagtail,thenewguy/wagtail
|
Add tests for Dismissibles view
|
from django.test import TestCase
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestDismissiblesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.url = reverse("wagtailadmin_dismissibles")
def test_get_initial(self):
response = self.client.get(self.url)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_patch_valid(self):
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
def test_patch_invalid(self):
response = self.client.patch(
self.url, data="invalid", content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_post(self):
# The view only accepts GET and PATCH
response = self.client.post(self.url, data={"foo": "bar"})
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 405)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_get_without_userprofile(self):
# GET should work even if the user doesn't have a UserProfile,
# but it shouldn't create one
self.profile.delete()
response = self.client.get(self.url)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertIsNone(getattr(self.user, "wagtail_userprofile", None))
def test_patch_without_userprofile(self):
# PATCH should work even if the user doesn't have a UserProfile,
# in which case it should create one
self.profile.delete()
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
|
<commit_before><commit_msg>Add tests for Dismissibles view<commit_after>
|
from django.test import TestCase
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestDismissiblesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.url = reverse("wagtailadmin_dismissibles")
def test_get_initial(self):
response = self.client.get(self.url)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_patch_valid(self):
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
def test_patch_invalid(self):
response = self.client.patch(
self.url, data="invalid", content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_post(self):
# The view only accepts GET and PATCH
response = self.client.post(self.url, data={"foo": "bar"})
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 405)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_get_without_userprofile(self):
# GET should work even if the user doesn't have a UserProfile,
# but it shouldn't create one
self.profile.delete()
response = self.client.get(self.url)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertIsNone(getattr(self.user, "wagtail_userprofile", None))
def test_patch_without_userprofile(self):
# PATCH should work even if the user doesn't have a UserProfile,
# in which case it should create one
self.profile.delete()
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
|
Add tests for Dismissibles viewfrom django.test import TestCase
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestDismissiblesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.url = reverse("wagtailadmin_dismissibles")
def test_get_initial(self):
response = self.client.get(self.url)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_patch_valid(self):
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
def test_patch_invalid(self):
response = self.client.patch(
self.url, data="invalid", content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_post(self):
# The view only accepts GET and PATCH
response = self.client.post(self.url, data={"foo": "bar"})
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 405)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_get_without_userprofile(self):
# GET should work even if the user doesn't have a UserProfile,
# but it shouldn't create one
self.profile.delete()
response = self.client.get(self.url)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertIsNone(getattr(self.user, "wagtail_userprofile", None))
def test_patch_without_userprofile(self):
# PATCH should work even if the user doesn't have a UserProfile,
# in which case it should create one
self.profile.delete()
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
|
<commit_before><commit_msg>Add tests for Dismissibles view<commit_after>from django.test import TestCase
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestDismissiblesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.profile = UserProfile.get_for_user(self.user)
self.url = reverse("wagtailadmin_dismissibles")
def test_get_initial(self):
response = self.client.get(self.url)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_patch_valid(self):
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
def test_patch_invalid(self):
response = self.client.patch(
self.url, data="invalid", content_type="application/json"
)
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_post(self):
# The view only accepts GET and PATCH
response = self.client.post(self.url, data={"foo": "bar"})
self.profile.refresh_from_db()
self.assertEqual(response.status_code, 405)
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {})
def test_get_without_userprofile(self):
# GET should work even if the user doesn't have a UserProfile,
# but it shouldn't create one
self.profile.delete()
response = self.client.get(self.url)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
self.assertIsNone(getattr(self.user, "wagtail_userprofile", None))
def test_patch_without_userprofile(self):
# PATCH should work even if the user doesn't have a UserProfile,
# in which case it should create one
self.profile.delete()
response = self.client.patch(
self.url, data={"foo": "bar"}, content_type="application/json"
)
self.user.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"foo": "bar"})
self.assertEqual(self.user.wagtail_userprofile.dismissibles, {"foo": "bar"})
|
|
6e2b09d478d61c2ae66959b87cb58689b57436b0
|
rf_train.py
|
rf_train.py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500,
min_df = 5,
max_df = 0.25)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("rf_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = RandomForestClassifier(n_estimators = 250).fit(train_tfidf, training_targets)
save_clf = open("rf_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Random Forest vectorizer and classifier pickles
|
Create Random Forest vectorizer and classifier pickles
|
Python
|
mit
|
npentella/CuriousCorpus,npentella/CuriousCorpus,npentella/CuriousCorpus
|
Create Random Forest vectorizer and classifier pickles
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500,
min_df = 5,
max_df = 0.25)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("rf_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = RandomForestClassifier(n_estimators = 250).fit(train_tfidf, training_targets)
save_clf = open("rf_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Random Forest vectorizer and classifier pickles<commit_after>
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500,
min_df = 5,
max_df = 0.25)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("rf_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = RandomForestClassifier(n_estimators = 250).fit(train_tfidf, training_targets)
save_clf = open("rf_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
Create Random Forest vectorizer and classifier picklesfrom sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500,
min_df = 5,
max_df = 0.25)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("rf_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = RandomForestClassifier(n_estimators = 250).fit(train_tfidf, training_targets)
save_clf = open("rf_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
<commit_before><commit_msg>Create Random Forest vectorizer and classifier pickles<commit_after>from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
print "Grabbing data..."
training_text_collection_f = open("training_text_collection.pkl", "rb")
training_text_collection = joblib.load(training_text_collection_f)
training_text_collection_f.close()
training_targets_f = open("training_targets.pkl", "rb")
training_targets = joblib.load(training_targets_f)
training_targets_f.close()
print("Vectorizing data...")
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = "english", \
max_features = 2500,
min_df = 5,
max_df = 0.25)
train_tfidf = vectorizer.fit_transform(training_text_collection)
save_vect = open("rf_tfidf_vect.pkl", "wb")
joblib.dump(vectorizer, save_vect)
save_vect.close()
clf = RandomForestClassifier(n_estimators = 250).fit(train_tfidf, training_targets)
save_clf = open("rf_clf.pkl", "wb")
joblib.dump(clf, save_clf)
save_clf.close()
|
|
7cd00a28a0cbb152cd61c3ebbd4b72d3ab31baca
|
locators/tests/functional/test_latest.py
|
locators/tests/functional/test_latest.py
|
from locators import SimpleLocator
def test_latest_source():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# We want the first item in the following list
assert len(loc.get(proj, latest)['sdist']) != 0
def test_has_wheels():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# This will fail if there is no 'wheel' key.
# Maybe require all keys to be present?
assert len(loc.get(proj, latest)['wheel']) != 0
def test_has_old_wheels():
loc = SimpleLocator()
proj = 'pip'
versions = sorted(loc.versions(proj))
older = any(len(loc.get(proj, v)['wheel']) > 0 for v in versions[:-1])
current = len(loc.get(proj, versions[-1])['wheel']) != 0
def test_no_released_versions():
loc = SimpleLocator()
[d for d in loc.distributions() if not loc.versions(d)]
|
Add the basis for some functional tests
|
Add the basis for some functional tests
|
Python
|
mit
|
pfmoore/sandbox
|
Add the basis for some functional tests
|
from locators import SimpleLocator
def test_latest_source():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# We want the first item in the following list
assert len(loc.get(proj, latest)['sdist']) != 0
def test_has_wheels():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# This will fail if there is no 'wheel' key.
# Maybe require all keys to be present?
assert len(loc.get(proj, latest)['wheel']) != 0
def test_has_old_wheels():
loc = SimpleLocator()
proj = 'pip'
versions = sorted(loc.versions(proj))
older = any(len(loc.get(proj, v)['wheel']) > 0 for v in versions[:-1])
current = len(loc.get(proj, versions[-1])['wheel']) != 0
def test_no_released_versions():
loc = SimpleLocator()
[d for d in loc.distributions() if not loc.versions(d)]
|
<commit_before><commit_msg>Add the basis for some functional tests<commit_after>
|
from locators import SimpleLocator
def test_latest_source():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# We want the first item in the following list
assert len(loc.get(proj, latest)['sdist']) != 0
def test_has_wheels():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# This will fail if there is no 'wheel' key.
# Maybe require all keys to be present?
assert len(loc.get(proj, latest)['wheel']) != 0
def test_has_old_wheels():
loc = SimpleLocator()
proj = 'pip'
versions = sorted(loc.versions(proj))
older = any(len(loc.get(proj, v)['wheel']) > 0 for v in versions[:-1])
current = len(loc.get(proj, versions[-1])['wheel']) != 0
def test_no_released_versions():
loc = SimpleLocator()
[d for d in loc.distributions() if not loc.versions(d)]
|
Add the basis for some functional testsfrom locators import SimpleLocator
def test_latest_source():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# We want the first item in the following list
assert len(loc.get(proj, latest)['sdist']) != 0
def test_has_wheels():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# This will fail if there is no 'wheel' key.
# Maybe require all keys to be present?
assert len(loc.get(proj, latest)['wheel']) != 0
def test_has_old_wheels():
loc = SimpleLocator()
proj = 'pip'
versions = sorted(loc.versions(proj))
older = any(len(loc.get(proj, v)['wheel']) > 0 for v in versions[:-1])
current = len(loc.get(proj, versions[-1])['wheel']) != 0
def test_no_released_versions():
loc = SimpleLocator()
[d for d in loc.distributions() if not loc.versions(d)]
|
<commit_before><commit_msg>Add the basis for some functional tests<commit_after>from locators import SimpleLocator
def test_latest_source():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# We want the first item in the following list
assert len(loc.get(proj, latest)['sdist']) != 0
def test_has_wheels():
loc = SimpleLocator()
proj = 'pip'
latest = max(loc.versions(proj))
# This will fail if there is no 'wheel' key.
# Maybe require all keys to be present?
assert len(loc.get(proj, latest)['wheel']) != 0
def test_has_old_wheels():
loc = SimpleLocator()
proj = 'pip'
versions = sorted(loc.versions(proj))
older = any(len(loc.get(proj, v)['wheel']) > 0 for v in versions[:-1])
current = len(loc.get(proj, versions[-1])['wheel']) != 0
def test_no_released_versions():
loc = SimpleLocator()
[d for d in loc.distributions() if not loc.versions(d)]
|
|
921d95a432316b2b76aadabcc3fc90fbf90376aa
|
simphony/testing/abc_container_check.py
|
simphony/testing/abc_container_check.py
|
import abc
from functools import partial
from simphony.testing.utils import (
create_data_container, compare_data_containers)
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
class ContainerCheck(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.container = self.container_factory(u'foo')
self.data = create_data_container()
self.container.data = DataContainer(self.data)
@abc.abstractmethod
def container_factory(self, name):
""" Create and return the container object
"""
def test_name(self):
self.assertEqual(self.contaoner.name, u'foo')
def test_rename(self):
container = self.container
container.name = u'bar'
self.assertEqual(container.name, u'bar')
def test_data(self):
self.assertEqual(self.container.data, self.data)
self.assertIsNot(self.container.data, self.data)
def test_update_data(self):
container = self.container
data = container.data
data[CUBA.TEMPERATURE] = 23.4
self.assertNotEqual(container.data, data)
self.assertEqual(container.data, self.data)
container.data = data
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
|
Add a basic test template for high level CUDS containers
|
Add a basic test template for high level CUDS containers
|
Python
|
bsd-2-clause
|
simphony/simphony-common
|
Add a basic test template for high level CUDS containers
|
import abc
from functools import partial
from simphony.testing.utils import (
create_data_container, compare_data_containers)
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
class ContainerCheck(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.container = self.container_factory(u'foo')
self.data = create_data_container()
self.container.data = DataContainer(self.data)
@abc.abstractmethod
def container_factory(self, name):
""" Create and return the container object
"""
def test_name(self):
self.assertEqual(self.contaoner.name, u'foo')
def test_rename(self):
container = self.container
container.name = u'bar'
self.assertEqual(container.name, u'bar')
def test_data(self):
self.assertEqual(self.container.data, self.data)
self.assertIsNot(self.container.data, self.data)
def test_update_data(self):
container = self.container
data = container.data
data[CUBA.TEMPERATURE] = 23.4
self.assertNotEqual(container.data, data)
self.assertEqual(container.data, self.data)
container.data = data
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
|
<commit_before><commit_msg>Add a basic test template for high level CUDS containers<commit_after>
|
import abc
from functools import partial
from simphony.testing.utils import (
create_data_container, compare_data_containers)
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
class ContainerCheck(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.container = self.container_factory(u'foo')
self.data = create_data_container()
self.container.data = DataContainer(self.data)
@abc.abstractmethod
def container_factory(self, name):
""" Create and return the container object
"""
def test_name(self):
self.assertEqual(self.contaoner.name, u'foo')
def test_rename(self):
container = self.container
container.name = u'bar'
self.assertEqual(container.name, u'bar')
def test_data(self):
self.assertEqual(self.container.data, self.data)
self.assertIsNot(self.container.data, self.data)
def test_update_data(self):
container = self.container
data = container.data
data[CUBA.TEMPERATURE] = 23.4
self.assertNotEqual(container.data, data)
self.assertEqual(container.data, self.data)
container.data = data
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
|
Add a basic test template for high level CUDS containersimport abc
from functools import partial
from simphony.testing.utils import (
create_data_container, compare_data_containers)
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
class ContainerCheck(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.container = self.container_factory(u'foo')
self.data = create_data_container()
self.container.data = DataContainer(self.data)
@abc.abstractmethod
def container_factory(self, name):
""" Create and return the container object
"""
def test_name(self):
self.assertEqual(self.contaoner.name, u'foo')
def test_rename(self):
container = self.container
container.name = u'bar'
self.assertEqual(container.name, u'bar')
def test_data(self):
self.assertEqual(self.container.data, self.data)
self.assertIsNot(self.container.data, self.data)
def test_update_data(self):
container = self.container
data = container.data
data[CUBA.TEMPERATURE] = 23.4
self.assertNotEqual(container.data, data)
self.assertEqual(container.data, self.data)
container.data = data
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
|
<commit_before><commit_msg>Add a basic test template for high level CUDS containers<commit_after>import abc
from functools import partial
from simphony.testing.utils import (
create_data_container, compare_data_containers)
from simphony.core.data_container import DataContainer
from simphony.core.cuba import CUBA
class ContainerCheck(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.container = self.container_factory(u'foo')
self.data = create_data_container()
self.container.data = DataContainer(self.data)
@abc.abstractmethod
def container_factory(self, name):
""" Create and return the container object
"""
def test_name(self):
self.assertEqual(self.contaoner.name, u'foo')
def test_rename(self):
container = self.container
container.name = u'bar'
self.assertEqual(container.name, u'bar')
def test_data(self):
self.assertEqual(self.container.data, self.data)
self.assertIsNot(self.container.data, self.data)
def test_update_data(self):
container = self.container
data = container.data
data[CUBA.TEMPERATURE] = 23.4
self.assertNotEqual(container.data, data)
self.assertEqual(container.data, self.data)
container.data = data
self.assertEqual(container.data, data)
self.assertIsNot(container.data, data)
|
|
62ece88485b335086f76335095ccf348a123ee69
|
tests/simplemeshtest.py
|
tests/simplemeshtest.py
|
#!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh
import sys
NUMNODES = 5
NUMPACKETS = 10
nodes = []
# We're optimists
success = True
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, data):
if self.expected.get(node) == None:
self.expected[node] = int(data)
if (self.expected.get(node, int(data)) != int(data)):
global success
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 1:
reactor.stop()
m = TestMesh()
for x in xrange(0, NUMNODES):
nodes.append(m.addNode("node" + str(x)))
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 50%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.50)
firstnode = nodes[0]
for x in xrange(0, NUMPACKETS):
firstnode.pushInput(str(x) + "\n")
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
|
Add a very basis test (just test if messages are output in the right order by all nodes
|
Add a very basis test (just test if messages are output in the right order by all nodes
20070605063752-93b9a-171c986a65ac8fcdb791ea4ab34f870f087478dc.gz
|
Python
|
lgpl-2.1
|
freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut
|
Add a very basis test (just test if messages are output in the right order by all nodes
20070605063752-93b9a-171c986a65ac8fcdb791ea4ab34f870f087478dc.gz
|
#!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh
import sys
NUMNODES = 5
NUMPACKETS = 10
nodes = []
# We're optimists
success = True
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, data):
if self.expected.get(node) == None:
self.expected[node] = int(data)
if (self.expected.get(node, int(data)) != int(data)):
global success
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 1:
reactor.stop()
m = TestMesh()
for x in xrange(0, NUMNODES):
nodes.append(m.addNode("node" + str(x)))
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 50%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.50)
firstnode = nodes[0]
for x in xrange(0, NUMPACKETS):
firstnode.pushInput(str(x) + "\n")
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
|
<commit_before><commit_msg>Add a very basis test (just test if messages are output in the right order by all nodes
20070605063752-93b9a-171c986a65ac8fcdb791ea4ab34f870f087478dc.gz<commit_after>
|
#!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh
import sys
NUMNODES = 5
NUMPACKETS = 10
nodes = []
# We're optimists
success = True
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, data):
if self.expected.get(node) == None:
self.expected[node] = int(data)
if (self.expected.get(node, int(data)) != int(data)):
global success
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 1:
reactor.stop()
m = TestMesh()
for x in xrange(0, NUMNODES):
nodes.append(m.addNode("node" + str(x)))
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 50%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.50)
firstnode = nodes[0]
for x in xrange(0, NUMPACKETS):
firstnode.pushInput(str(x) + "\n")
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
|
Add a very basis test (just test if messages are output in the right order by all nodes
20070605063752-93b9a-171c986a65ac8fcdb791ea4ab34f870f087478dc.gz#!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh
import sys
NUMNODES = 5
NUMPACKETS = 10
nodes = []
# We're optimists
success = True
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, data):
if self.expected.get(node) == None:
self.expected[node] = int(data)
if (self.expected.get(node, int(data)) != int(data)):
global success
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 1:
reactor.stop()
m = TestMesh()
for x in xrange(0, NUMNODES):
nodes.append(m.addNode("node" + str(x)))
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 50%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.50)
firstnode = nodes[0]
for x in xrange(0, NUMPACKETS):
firstnode.pushInput(str(x) + "\n")
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
|
<commit_before><commit_msg>Add a very basis test (just test if messages are output in the right order by all nodes
20070605063752-93b9a-171c986a65ac8fcdb791ea4ab34f870f087478dc.gz<commit_after>#!/usr/bin/env python
from twisted.internet import reactor
from mesh import Mesh
import sys
NUMNODES = 5
NUMPACKETS = 10
nodes = []
# We're optimists
success = True
class TestMesh(Mesh):
expected = {}
done = 0
def gotOutput(self, node, data):
if self.expected.get(node) == None:
self.expected[node] = int(data)
if (self.expected.get(node, int(data)) != int(data)):
global success
print "Got " + data.rstrip() + " instead of " + \
str(self.expected[node]) + " from " + node.name
success = False
reactor.crash()
self.expected[node] = int(data) + 1
if self.expected[node] == 10:
self.done += 1
if self.done == NUMNODES - 1:
reactor.stop()
m = TestMesh()
for x in xrange(0, NUMNODES):
nodes.append(m.addNode("node" + str(x)))
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 50%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.50)
firstnode = nodes[0]
for x in xrange(0, NUMPACKETS):
firstnode.pushInput(str(x) + "\n")
reactor.run()
if not success:
print "FAILED"
sys.exit(-1)
print "SUCCESS"
|
|
dfa257f02d14ba7757ee65d1910bf581c280607e
|
tests/unit/test_repr.py
|
tests/unit/test_repr.py
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
Test the repr() of object
|
Test the repr() of object
|
Python
|
bsd-3-clause
|
arkaitzj/python-butter
|
Test the repr() of object
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test the repr() of object<commit_after>
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
Test the repr() of objectfrom butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test the repr() of object<commit_after>from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
|
6ee4d6a13a3fb6564e12ef8c8645a96015d39fed
|
seq_spot.py
|
seq_spot.py
|
from matplotlib import pyplot as plt
s=["AATTCGCGCTTACGCTAGGCTAGCTAGGCTCGATTCGAGGCTCGGGATCGATCGGTACGAGGTACGTACTGACTGACT",
"ACTGGGCTATGCGGGATATATATATCGCGACTGACATGCATGCTAGGCGCGCTATAATCGGCGCATATAGCTAGCTAG",
"ACTGACGTACGTAGCTAGCTAGGCTATATAGCGCGCATATCGCGAGTATACGTAGCTAGCTGACTGGCGATATATCGA",
"ACGTGAGCTGATGTGTGAGTACTATATGCGATAGCTACGTAGCTGATCAGCTAGCGATTAGCGCTATAGCTAGCTATG",
"ACTGACTGATATCGATCGGCGCGCGTATAGCGCTATAGCGATCGATGTGACTGATCGATATATATCGGCTATAGCGAT",
"TTGCTAGCTAGATCGTGACTGACTGTGACTGACTGACTGTACGACTGACTGTGACTATCGATACGCTAGATCGACTAT",
"GGCTACGTACGATGCTAGCTAGCTGGGGGGTACGATCGTGACTGACTAAATCGATATATATATAGCTGACTGACTGAT",
"CCCCGCTAGCTATACGTACGCTAGCTAGCTAGCTGCGCGCGATGCGATCGATCGACTGTGACTGACTGACGTGACTGC"]
dic = {"C": (255, 0, 0),
"G": (0 , 255 , 0),
"A": (0, 0, 255),
"T": (255, 255, 0)}
##### ^^^ Not used #####
dic2 = {"A": 1, "C": 2, "G": 3, "T": 4}
tot = 10000
fasta = "psyfer_seq.fasta"
L = []
count = 0
with open(fasta, "rb") as fa:
for line in fa:
if count > 10000:
break
if line[0] == ">":
continue
else:
K = []
for z in line.rstrip():
K.append(dic2[z])
L.append(K + [-1] * (tot - len(K)))
#L.append(K)
count += 1
#print L
im_filename = "test8.png"
fig = plt.figure(1)
plt.imshow(L, interpolation='nearest')
plt.grid(True)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.show()
plt.savefig(im_filename,format="png", bbox_inches='tight', pad_inches=0)
|
Create dodgy vis of reads sequenced
|
Create dodgy vis of reads sequenced
|
Python
|
mit
|
Psy-Fer/seq_spot
|
Create dodgy vis of reads sequenced
|
from matplotlib import pyplot as plt
s=["AATTCGCGCTTACGCTAGGCTAGCTAGGCTCGATTCGAGGCTCGGGATCGATCGGTACGAGGTACGTACTGACTGACT",
"ACTGGGCTATGCGGGATATATATATCGCGACTGACATGCATGCTAGGCGCGCTATAATCGGCGCATATAGCTAGCTAG",
"ACTGACGTACGTAGCTAGCTAGGCTATATAGCGCGCATATCGCGAGTATACGTAGCTAGCTGACTGGCGATATATCGA",
"ACGTGAGCTGATGTGTGAGTACTATATGCGATAGCTACGTAGCTGATCAGCTAGCGATTAGCGCTATAGCTAGCTATG",
"ACTGACTGATATCGATCGGCGCGCGTATAGCGCTATAGCGATCGATGTGACTGATCGATATATATCGGCTATAGCGAT",
"TTGCTAGCTAGATCGTGACTGACTGTGACTGACTGACTGTACGACTGACTGTGACTATCGATACGCTAGATCGACTAT",
"GGCTACGTACGATGCTAGCTAGCTGGGGGGTACGATCGTGACTGACTAAATCGATATATATATAGCTGACTGACTGAT",
"CCCCGCTAGCTATACGTACGCTAGCTAGCTAGCTGCGCGCGATGCGATCGATCGACTGTGACTGACTGACGTGACTGC"]
dic = {"C": (255, 0, 0),
"G": (0 , 255 , 0),
"A": (0, 0, 255),
"T": (255, 255, 0)}
##### ^^^ Not used #####
dic2 = {"A": 1, "C": 2, "G": 3, "T": 4}
tot = 10000
fasta = "psyfer_seq.fasta"
L = []
count = 0
with open(fasta, "rb") as fa:
for line in fa:
if count > 10000:
break
if line[0] == ">":
continue
else:
K = []
for z in line.rstrip():
K.append(dic2[z])
L.append(K + [-1] * (tot - len(K)))
#L.append(K)
count += 1
#print L
im_filename = "test8.png"
fig = plt.figure(1)
plt.imshow(L, interpolation='nearest')
plt.grid(True)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.show()
plt.savefig(im_filename,format="png", bbox_inches='tight', pad_inches=0)
|
<commit_before><commit_msg>Create dodgy vis of reads sequenced<commit_after>
|
from matplotlib import pyplot as plt
s=["AATTCGCGCTTACGCTAGGCTAGCTAGGCTCGATTCGAGGCTCGGGATCGATCGGTACGAGGTACGTACTGACTGACT",
"ACTGGGCTATGCGGGATATATATATCGCGACTGACATGCATGCTAGGCGCGCTATAATCGGCGCATATAGCTAGCTAG",
"ACTGACGTACGTAGCTAGCTAGGCTATATAGCGCGCATATCGCGAGTATACGTAGCTAGCTGACTGGCGATATATCGA",
"ACGTGAGCTGATGTGTGAGTACTATATGCGATAGCTACGTAGCTGATCAGCTAGCGATTAGCGCTATAGCTAGCTATG",
"ACTGACTGATATCGATCGGCGCGCGTATAGCGCTATAGCGATCGATGTGACTGATCGATATATATCGGCTATAGCGAT",
"TTGCTAGCTAGATCGTGACTGACTGTGACTGACTGACTGTACGACTGACTGTGACTATCGATACGCTAGATCGACTAT",
"GGCTACGTACGATGCTAGCTAGCTGGGGGGTACGATCGTGACTGACTAAATCGATATATATATAGCTGACTGACTGAT",
"CCCCGCTAGCTATACGTACGCTAGCTAGCTAGCTGCGCGCGATGCGATCGATCGACTGTGACTGACTGACGTGACTGC"]
dic = {"C": (255, 0, 0),
"G": (0 , 255 , 0),
"A": (0, 0, 255),
"T": (255, 255, 0)}
##### ^^^ Not used #####
dic2 = {"A": 1, "C": 2, "G": 3, "T": 4}
tot = 10000
fasta = "psyfer_seq.fasta"
L = []
count = 0
with open(fasta, "rb") as fa:
for line in fa:
if count > 10000:
break
if line[0] == ">":
continue
else:
K = []
for z in line.rstrip():
K.append(dic2[z])
L.append(K + [-1] * (tot - len(K)))
#L.append(K)
count += 1
#print L
im_filename = "test8.png"
fig = plt.figure(1)
plt.imshow(L, interpolation='nearest')
plt.grid(True)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.show()
plt.savefig(im_filename,format="png", bbox_inches='tight', pad_inches=0)
|
Create dodgy vis of reads sequencedfrom matplotlib import pyplot as plt
s=["AATTCGCGCTTACGCTAGGCTAGCTAGGCTCGATTCGAGGCTCGGGATCGATCGGTACGAGGTACGTACTGACTGACT",
"ACTGGGCTATGCGGGATATATATATCGCGACTGACATGCATGCTAGGCGCGCTATAATCGGCGCATATAGCTAGCTAG",
"ACTGACGTACGTAGCTAGCTAGGCTATATAGCGCGCATATCGCGAGTATACGTAGCTAGCTGACTGGCGATATATCGA",
"ACGTGAGCTGATGTGTGAGTACTATATGCGATAGCTACGTAGCTGATCAGCTAGCGATTAGCGCTATAGCTAGCTATG",
"ACTGACTGATATCGATCGGCGCGCGTATAGCGCTATAGCGATCGATGTGACTGATCGATATATATCGGCTATAGCGAT",
"TTGCTAGCTAGATCGTGACTGACTGTGACTGACTGACTGTACGACTGACTGTGACTATCGATACGCTAGATCGACTAT",
"GGCTACGTACGATGCTAGCTAGCTGGGGGGTACGATCGTGACTGACTAAATCGATATATATATAGCTGACTGACTGAT",
"CCCCGCTAGCTATACGTACGCTAGCTAGCTAGCTGCGCGCGATGCGATCGATCGACTGTGACTGACTGACGTGACTGC"]
dic = {"C": (255, 0, 0),
"G": (0 , 255 , 0),
"A": (0, 0, 255),
"T": (255, 255, 0)}
##### ^^^ Not used #####
dic2 = {"A": 1, "C": 2, "G": 3, "T": 4}
tot = 10000
fasta = "psyfer_seq.fasta"
L = []
count = 0
with open(fasta, "rb") as fa:
for line in fa:
if count > 10000:
break
if line[0] == ">":
continue
else:
K = []
for z in line.rstrip():
K.append(dic2[z])
L.append(K + [-1] * (tot - len(K)))
#L.append(K)
count += 1
#print L
im_filename = "test8.png"
fig = plt.figure(1)
plt.imshow(L, interpolation='nearest')
plt.grid(True)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.show()
plt.savefig(im_filename,format="png", bbox_inches='tight', pad_inches=0)
|
<commit_before><commit_msg>Create dodgy vis of reads sequenced<commit_after>from matplotlib import pyplot as plt
s=["AATTCGCGCTTACGCTAGGCTAGCTAGGCTCGATTCGAGGCTCGGGATCGATCGGTACGAGGTACGTACTGACTGACT",
"ACTGGGCTATGCGGGATATATATATCGCGACTGACATGCATGCTAGGCGCGCTATAATCGGCGCATATAGCTAGCTAG",
"ACTGACGTACGTAGCTAGCTAGGCTATATAGCGCGCATATCGCGAGTATACGTAGCTAGCTGACTGGCGATATATCGA",
"ACGTGAGCTGATGTGTGAGTACTATATGCGATAGCTACGTAGCTGATCAGCTAGCGATTAGCGCTATAGCTAGCTATG",
"ACTGACTGATATCGATCGGCGCGCGTATAGCGCTATAGCGATCGATGTGACTGATCGATATATATCGGCTATAGCGAT",
"TTGCTAGCTAGATCGTGACTGACTGTGACTGACTGACTGTACGACTGACTGTGACTATCGATACGCTAGATCGACTAT",
"GGCTACGTACGATGCTAGCTAGCTGGGGGGTACGATCGTGACTGACTAAATCGATATATATATAGCTGACTGACTGAT",
"CCCCGCTAGCTATACGTACGCTAGCTAGCTAGCTGCGCGCGATGCGATCGATCGACTGTGACTGACTGACGTGACTGC"]
dic = {"C": (255, 0, 0),
"G": (0 , 255 , 0),
"A": (0, 0, 255),
"T": (255, 255, 0)}
##### ^^^ Not used #####
dic2 = {"A": 1, "C": 2, "G": 3, "T": 4}
tot = 10000
fasta = "psyfer_seq.fasta"
L = []
count = 0
with open(fasta, "rb") as fa:
for line in fa:
if count > 10000:
break
if line[0] == ">":
continue
else:
K = []
for z in line.rstrip():
K.append(dic2[z])
L.append(K + [-1] * (tot - len(K)))
#L.append(K)
count += 1
#print L
im_filename = "test8.png"
fig = plt.figure(1)
plt.imshow(L, interpolation='nearest')
plt.grid(True)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.show()
plt.savefig(im_filename,format="png", bbox_inches='tight', pad_inches=0)
|
|
caaabeb103879baf3179955b36e845aa878f15f6
|
tools/build_tfmd_docs.py
|
tools/build_tfmd_docs.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate api reference docs for `tfmd`.
This requires a local installation of `tfmd` and `tensorflow_docs`
```
$ pip install tensorflow_metadata git+https://github.com/tensorflow/docs
```
```
python build_tfmd_docs.py --output_dir=/tmp/tfmd-api
```
"""
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_metadata as tfmd
# `.proto` (which contains all the classes) is not imported by default
import tensorflow_metadata.proto # pylint: disable=unused-import
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/tfmd_api/',
'The path to output the files to')
_CODDE_URL_PREFIX = flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/metadata/tree/master/tensorflow_metadata/proto',
'The url prefix for links to code.')
_SEARCH_HINTS = flags.DEFINE_bool(
'search_hints', True,
'Include metadata search hints in the generated files')
_SITE_PATH = flags.DEFINE_string(
'site_path',
'tfx/tensorflow_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
def main(args):
if args[1:]:
raise ValueError('Unrecognized Command line args', args[1:])
doc_generator = generate_lib.DocGenerator(
root_title='TF-Metadata',
py_modules=[('tfmd.proto', tfmd.proto)],
code_url_prefix=_CODDE_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[])
doc_generator.build(_OUTPUT_DIR.value)
if __name__ == '__main__':
app.run(main)
|
Add api-reference generation script for TFMD.
|
Add api-reference generation script for TFMD.
Output listing: https://paste.googleplex.com/4777420088410112
PiperOrigin-RevId: 431509556
|
Python
|
apache-2.0
|
tensorflow/metadata,tensorflow/metadata
|
Add api-reference generation script for TFMD.
Output listing: https://paste.googleplex.com/4777420088410112
PiperOrigin-RevId: 431509556
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate api reference docs for `tfmd`.
This requires a local installation of `tfmd` and `tensorflow_docs`
```
$ pip install tensorflow_metadata git+https://github.com/tensorflow/docs
```
```
python build_tfmd_docs.py --output_dir=/tmp/tfmd-api
```
"""
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_metadata as tfmd
# `.proto` (which contains all the classes) is not imported by default
import tensorflow_metadata.proto # pylint: disable=unused-import
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/tfmd_api/',
'The path to output the files to')
_CODDE_URL_PREFIX = flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/metadata/tree/master/tensorflow_metadata/proto',
'The url prefix for links to code.')
_SEARCH_HINTS = flags.DEFINE_bool(
'search_hints', True,
'Include metadata search hints in the generated files')
_SITE_PATH = flags.DEFINE_string(
'site_path',
'tfx/tensorflow_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
def main(args):
if args[1:]:
raise ValueError('Unrecognized Command line args', args[1:])
doc_generator = generate_lib.DocGenerator(
root_title='TF-Metadata',
py_modules=[('tfmd.proto', tfmd.proto)],
code_url_prefix=_CODDE_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[])
doc_generator.build(_OUTPUT_DIR.value)
if __name__ == '__main__':
app.run(main)
|
<commit_before><commit_msg>Add api-reference generation script for TFMD.
Output listing: https://paste.googleplex.com/4777420088410112
PiperOrigin-RevId: 431509556<commit_after>
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate api reference docs for `tfmd`.
This requires a local installation of `tfmd` and `tensorflow_docs`
```
$ pip install tensorflow_metadata git+https://github.com/tensorflow/docs
```
```
python build_tfmd_docs.py --output_dir=/tmp/tfmd-api
```
"""
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_metadata as tfmd
# `.proto` (which contains all the classes) is not imported by default
import tensorflow_metadata.proto # pylint: disable=unused-import
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/tfmd_api/',
'The path to output the files to')
_CODDE_URL_PREFIX = flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/metadata/tree/master/tensorflow_metadata/proto',
'The url prefix for links to code.')
_SEARCH_HINTS = flags.DEFINE_bool(
'search_hints', True,
'Include metadata search hints in the generated files')
_SITE_PATH = flags.DEFINE_string(
'site_path',
'tfx/tensorflow_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
def main(args):
if args[1:]:
raise ValueError('Unrecognized Command line args', args[1:])
doc_generator = generate_lib.DocGenerator(
root_title='TF-Metadata',
py_modules=[('tfmd.proto', tfmd.proto)],
code_url_prefix=_CODDE_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[])
doc_generator.build(_OUTPUT_DIR.value)
if __name__ == '__main__':
app.run(main)
|
Add api-reference generation script for TFMD.
Output listing: https://paste.googleplex.com/4777420088410112
PiperOrigin-RevId: 431509556# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate api reference docs for `tfmd`.
This requires a local installation of `tfmd` and `tensorflow_docs`
```
$ pip install tensorflow_metadata git+https://github.com/tensorflow/docs
```
```
python build_tfmd_docs.py --output_dir=/tmp/tfmd-api
```
"""
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_metadata as tfmd
# `.proto` (which contains all the classes) is not imported by default
import tensorflow_metadata.proto # pylint: disable=unused-import
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/tfmd_api/',
'The path to output the files to')
_CODDE_URL_PREFIX = flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/metadata/tree/master/tensorflow_metadata/proto',
'The url prefix for links to code.')
_SEARCH_HINTS = flags.DEFINE_bool(
'search_hints', True,
'Include metadata search hints in the generated files')
_SITE_PATH = flags.DEFINE_string(
'site_path',
'tfx/tensorflow_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
def main(args):
if args[1:]:
raise ValueError('Unrecognized Command line args', args[1:])
doc_generator = generate_lib.DocGenerator(
root_title='TF-Metadata',
py_modules=[('tfmd.proto', tfmd.proto)],
code_url_prefix=_CODDE_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[])
doc_generator.build(_OUTPUT_DIR.value)
if __name__ == '__main__':
app.run(main)
|
<commit_before><commit_msg>Add api-reference generation script for TFMD.
Output listing: https://paste.googleplex.com/4777420088410112
PiperOrigin-RevId: 431509556<commit_after># Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate api reference docs for `tfmd`.
This requires a local installation of `tfmd` and `tensorflow_docs`
```
$ pip install tensorflow_metadata git+https://github.com/tensorflow/docs
```
```
python build_tfmd_docs.py --output_dir=/tmp/tfmd-api
```
"""
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_metadata as tfmd
# `.proto` (which contains all the classes) is not imported by default
import tensorflow_metadata.proto # pylint: disable=unused-import
_OUTPUT_DIR = flags.DEFINE_string('output_dir', '/tmp/tfmd_api/',
'The path to output the files to')
_CODDE_URL_PREFIX = flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/metadata/tree/master/tensorflow_metadata/proto',
'The url prefix for links to code.')
_SEARCH_HINTS = flags.DEFINE_bool(
'search_hints', True,
'Include metadata search hints in the generated files')
_SITE_PATH = flags.DEFINE_string(
'site_path',
'tfx/tensorflow_metadata/api_docs/python',
'Path prefix in the _toc.yaml')
def main(args):
if args[1:]:
raise ValueError('Unrecognized Command line args', args[1:])
doc_generator = generate_lib.DocGenerator(
root_title='TF-Metadata',
py_modules=[('tfmd.proto', tfmd.proto)],
code_url_prefix=_CODDE_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,
callbacks=[])
doc_generator.build(_OUTPUT_DIR.value)
if __name__ == '__main__':
app.run(main)
|
|
c123c8ce1094885e3a4d96573f70ebc1130627f6
|
set2/2-2.py
|
set2/2-2.py
|
# This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash(string):
char_sum = 0
for c in string:
char_sum += ord(c)
return char_sum % 23
password_hash = 17
print("Please enter the password")
password_input = input()
if(hash(password_input) == password_hash):
print("Success, huzzah")
else:
print("Not success, boo")
print("")
print("Press enter to continue")
input()
|
Add hasing on string input game
|
Set2: Add hasing on string input game
|
Python
|
mit
|
SingingTree/WarGames
|
Set2: Add hasing on string input game
|
# This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash(string):
char_sum = 0
for c in string:
char_sum += ord(c)
return char_sum % 23
password_hash = 17
print("Please enter the password")
password_input = input()
if(hash(password_input) == password_hash):
print("Success, huzzah")
else:
print("Not success, boo")
print("")
print("Press enter to continue")
input()
|
<commit_before><commit_msg>Set2: Add hasing on string input game<commit_after>
|
# This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash(string):
char_sum = 0
for c in string:
char_sum += ord(c)
return char_sum % 23
password_hash = 17
print("Please enter the password")
password_input = input()
if(hash(password_input) == password_hash):
print("Success, huzzah")
else:
print("Not success, boo")
print("")
print("Press enter to continue")
input()
|
Set2: Add hasing on string input game# This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash(string):
char_sum = 0
for c in string:
char_sum += ord(c)
return char_sum % 23
password_hash = 17
print("Please enter the password")
password_input = input()
if(hash(password_input) == password_hash):
print("Success, huzzah")
else:
print("Not success, boo")
print("")
print("Press enter to continue")
input()
|
<commit_before><commit_msg>Set2: Add hasing on string input game<commit_after># This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash(string):
char_sum = 0
for c in string:
char_sum += ord(c)
return char_sum % 23
password_hash = 17
print("Please enter the password")
password_input = input()
if(hash(password_input) == password_hash):
print("Success, huzzah")
else:
print("Not success, boo")
print("")
print("Press enter to continue")
input()
|
|
ef4e5cd5c45735a5a82b849b7191fdb8a2a27a59
|
qa/patch_profile.py
|
qa/patch_profile.py
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class PatchProfileTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
alice = self.nodes[0]
api_url = alice["gateway_url"] + "ob/profile"
not_found = TestFailure("PatchProfileTest - FAIL: Profile post endpoint not found")
# create profile
pro = {"name": "Alice", "nsfw": True, "email": "alice@example.com"}
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile POST failed. Reason: %s", r["reason"])
time.sleep(4)
# patch profile
pro_patch = {"nsfw": False, "email": "alice777@example.com"}
r = requests.patch(api_url, data=json.dumps(pro_patch, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile PATCH failed. Reason: %s", r["reason"])
# check profile
r = requests.get(api_url)
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile GET failed. Reason: %s", r["reason"])
else:
resp = json.loads(r.text)
if resp["name"] != "Alice" or resp["nsfw"] != False or resp["email"] != "alice777@example.com":
raise TestFailure("PatchProfileTest - FAIL: Incorrect result of profile PATCH")
print("PatchProfileTest - PASS")
if __name__ == '__main__':
print("Running PatchProfileTest")
PatchProfileTest().main(["--regtest", "--disableexchangerates"])
|
Add test for Profile PATCH method
|
Add test for Profile PATCH method
|
Python
|
mit
|
cpacia/openbazaar-go,jackkleeman/openbazaar-go,gubatron/openbazaar-go,yurizhykin/openbazaar-go,OpenBazaar/openbazaar-go,JustinDrake/openbazaar-go,jackkleeman/openbazaar-go,cpacia/openbazaar-go,gubatron/openbazaar-go,gubatron/openbazaar-go,duomarket/openbazaar-test-nodes,yurizhykin/openbazaar-go,hoffmabc/openbazaar-go,OpenBazaar/openbazaar-go,OpenBazaar/openbazaar-go,duomarket/openbazaar-test-nodes,JustinDrake/openbazaar-go,yurizhykin/openbazaar-go,cpacia/openbazaar-go,hoffmabc/openbazaar-go,JustinDrake/openbazaar-go,hoffmabc/openbazaar-go,duomarket/openbazaar-test-nodes,jackkleeman/openbazaar-go
|
Add test for Profile PATCH method
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class PatchProfileTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
alice = self.nodes[0]
api_url = alice["gateway_url"] + "ob/profile"
not_found = TestFailure("PatchProfileTest - FAIL: Profile post endpoint not found")
# create profile
pro = {"name": "Alice", "nsfw": True, "email": "alice@example.com"}
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile POST failed. Reason: %s", r["reason"])
time.sleep(4)
# patch profile
pro_patch = {"nsfw": False, "email": "alice777@example.com"}
r = requests.patch(api_url, data=json.dumps(pro_patch, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile PATCH failed. Reason: %s", r["reason"])
# check profile
r = requests.get(api_url)
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile GET failed. Reason: %s", r["reason"])
else:
resp = json.loads(r.text)
if resp["name"] != "Alice" or resp["nsfw"] != False or resp["email"] != "alice777@example.com":
raise TestFailure("PatchProfileTest - FAIL: Incorrect result of profile PATCH")
print("PatchProfileTest - PASS")
if __name__ == '__main__':
print("Running PatchProfileTest")
PatchProfileTest().main(["--regtest", "--disableexchangerates"])
|
<commit_before><commit_msg>Add test for Profile PATCH method<commit_after>
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class PatchProfileTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
alice = self.nodes[0]
api_url = alice["gateway_url"] + "ob/profile"
not_found = TestFailure("PatchProfileTest - FAIL: Profile post endpoint not found")
# create profile
pro = {"name": "Alice", "nsfw": True, "email": "alice@example.com"}
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile POST failed. Reason: %s", r["reason"])
time.sleep(4)
# patch profile
pro_patch = {"nsfw": False, "email": "alice777@example.com"}
r = requests.patch(api_url, data=json.dumps(pro_patch, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile PATCH failed. Reason: %s", r["reason"])
# check profile
r = requests.get(api_url)
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile GET failed. Reason: %s", r["reason"])
else:
resp = json.loads(r.text)
if resp["name"] != "Alice" or resp["nsfw"] != False or resp["email"] != "alice777@example.com":
raise TestFailure("PatchProfileTest - FAIL: Incorrect result of profile PATCH")
print("PatchProfileTest - PASS")
if __name__ == '__main__':
print("Running PatchProfileTest")
PatchProfileTest().main(["--regtest", "--disableexchangerates"])
|
Add test for Profile PATCH methodimport requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class PatchProfileTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
alice = self.nodes[0]
api_url = alice["gateway_url"] + "ob/profile"
not_found = TestFailure("PatchProfileTest - FAIL: Profile post endpoint not found")
# create profile
pro = {"name": "Alice", "nsfw": True, "email": "alice@example.com"}
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile POST failed. Reason: %s", r["reason"])
time.sleep(4)
# patch profile
pro_patch = {"nsfw": False, "email": "alice777@example.com"}
r = requests.patch(api_url, data=json.dumps(pro_patch, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile PATCH failed. Reason: %s", r["reason"])
# check profile
r = requests.get(api_url)
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile GET failed. Reason: %s", r["reason"])
else:
resp = json.loads(r.text)
if resp["name"] != "Alice" or resp["nsfw"] != False or resp["email"] != "alice777@example.com":
raise TestFailure("PatchProfileTest - FAIL: Incorrect result of profile PATCH")
print("PatchProfileTest - PASS")
if __name__ == '__main__':
print("Running PatchProfileTest")
PatchProfileTest().main(["--regtest", "--disableexchangerates"])
|
<commit_before><commit_msg>Add test for Profile PATCH method<commit_after>import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class PatchProfileTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
alice = self.nodes[0]
api_url = alice["gateway_url"] + "ob/profile"
not_found = TestFailure("PatchProfileTest - FAIL: Profile post endpoint not found")
# create profile
pro = {"name": "Alice", "nsfw": True, "email": "alice@example.com"}
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile POST failed. Reason: %s", r["reason"])
time.sleep(4)
# patch profile
pro_patch = {"nsfw": False, "email": "alice777@example.com"}
r = requests.patch(api_url, data=json.dumps(pro_patch, indent=4))
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile PATCH failed. Reason: %s", r["reason"])
# check profile
r = requests.get(api_url)
if r.status_code == 404:
raise not_found
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("PatchProfileTest - FAIL: Profile GET failed. Reason: %s", r["reason"])
else:
resp = json.loads(r.text)
if resp["name"] != "Alice" or resp["nsfw"] != False or resp["email"] != "alice777@example.com":
raise TestFailure("PatchProfileTest - FAIL: Incorrect result of profile PATCH")
print("PatchProfileTest - PASS")
if __name__ == '__main__':
print("Running PatchProfileTest")
PatchProfileTest().main(["--regtest", "--disableexchangerates"])
|
|
1af0151c960e084aa3d970e32af396c855e2f90a
|
mccurse/cli.py
|
mccurse/cli.py
|
"""Command line interface to the package."""
import curses
from logging import ERROR, INFO
import click
from . import _, log
from .curse import Game
from .util import default_data_dir
@click.group()
@click.version_option()
@click.option('--refresh', is_flag=True, default=False,
help=_('Force refresh of existing mods list.'))
@click.option('--quiet', '-q', is_flag=True, default=False,
help=_('Silence the process reporting.'))
@click.pass_context
def cli(ctx, quiet, refresh):
"""Unofficial CLI client for Minecraft Curse Forge."""
# Context for the subcommands
ctx.obj = {
'default_game': Game.find('Minecraft'), # Default game to query and use
'token_path': default_data_dir() / 'token.yaml', # Authorization token location
}
# Common setup
# Setup terminal for querying (number of colors, etc.)
curses.setupterm()
# Setup appropriate logging level
log.setLevel(INFO if not quiet else ERROR)
# Refresh game data if necessary
if refresh or not ctx.obj['default_game'].have_fresh_data():
log.info(_('Refreshing game data, please wait.'))
ctx.obj['default_game'].refresh_data()
|
Implement common group setup for CLI
|
Implement common group setup for CLI
|
Python
|
agpl-3.0
|
khardix/mccurse
|
Implement common group setup for CLI
|
"""Command line interface to the package."""
import curses
from logging import ERROR, INFO
import click
from . import _, log
from .curse import Game
from .util import default_data_dir
@click.group()
@click.version_option()
@click.option('--refresh', is_flag=True, default=False,
help=_('Force refresh of existing mods list.'))
@click.option('--quiet', '-q', is_flag=True, default=False,
help=_('Silence the process reporting.'))
@click.pass_context
def cli(ctx, quiet, refresh):
"""Unofficial CLI client for Minecraft Curse Forge."""
# Context for the subcommands
ctx.obj = {
'default_game': Game.find('Minecraft'), # Default game to query and use
'token_path': default_data_dir() / 'token.yaml', # Authorization token location
}
# Common setup
# Setup terminal for querying (number of colors, etc.)
curses.setupterm()
# Setup appropriate logging level
log.setLevel(INFO if not quiet else ERROR)
# Refresh game data if necessary
if refresh or not ctx.obj['default_game'].have_fresh_data():
log.info(_('Refreshing game data, please wait.'))
ctx.obj['default_game'].refresh_data()
|
<commit_before><commit_msg>Implement common group setup for CLI<commit_after>
|
"""Command line interface to the package."""
import curses
from logging import ERROR, INFO
import click
from . import _, log
from .curse import Game
from .util import default_data_dir
@click.group()
@click.version_option()
@click.option('--refresh', is_flag=True, default=False,
help=_('Force refresh of existing mods list.'))
@click.option('--quiet', '-q', is_flag=True, default=False,
help=_('Silence the process reporting.'))
@click.pass_context
def cli(ctx, quiet, refresh):
"""Unofficial CLI client for Minecraft Curse Forge."""
# Context for the subcommands
ctx.obj = {
'default_game': Game.find('Minecraft'), # Default game to query and use
'token_path': default_data_dir() / 'token.yaml', # Authorization token location
}
# Common setup
# Setup terminal for querying (number of colors, etc.)
curses.setupterm()
# Setup appropriate logging level
log.setLevel(INFO if not quiet else ERROR)
# Refresh game data if necessary
if refresh or not ctx.obj['default_game'].have_fresh_data():
log.info(_('Refreshing game data, please wait.'))
ctx.obj['default_game'].refresh_data()
|
Implement common group setup for CLI"""Command line interface to the package."""
import curses
from logging import ERROR, INFO
import click
from . import _, log
from .curse import Game
from .util import default_data_dir
@click.group()
@click.version_option()
@click.option('--refresh', is_flag=True, default=False,
help=_('Force refresh of existing mods list.'))
@click.option('--quiet', '-q', is_flag=True, default=False,
help=_('Silence the process reporting.'))
@click.pass_context
def cli(ctx, quiet, refresh):
"""Unofficial CLI client for Minecraft Curse Forge."""
# Context for the subcommands
ctx.obj = {
'default_game': Game.find('Minecraft'), # Default game to query and use
'token_path': default_data_dir() / 'token.yaml', # Authorization token location
}
# Common setup
# Setup terminal for querying (number of colors, etc.)
curses.setupterm()
# Setup appropriate logging level
log.setLevel(INFO if not quiet else ERROR)
# Refresh game data if necessary
if refresh or not ctx.obj['default_game'].have_fresh_data():
log.info(_('Refreshing game data, please wait.'))
ctx.obj['default_game'].refresh_data()
|
<commit_before><commit_msg>Implement common group setup for CLI<commit_after>"""Command line interface to the package."""
import curses
from logging import ERROR, INFO
import click
from . import _, log
from .curse import Game
from .util import default_data_dir
@click.group()
@click.version_option()
@click.option('--refresh', is_flag=True, default=False,
help=_('Force refresh of existing mods list.'))
@click.option('--quiet', '-q', is_flag=True, default=False,
help=_('Silence the process reporting.'))
@click.pass_context
def cli(ctx, quiet, refresh):
"""Unofficial CLI client for Minecraft Curse Forge."""
# Context for the subcommands
ctx.obj = {
'default_game': Game.find('Minecraft'), # Default game to query and use
'token_path': default_data_dir() / 'token.yaml', # Authorization token location
}
# Common setup
# Setup terminal for querying (number of colors, etc.)
curses.setupterm()
# Setup appropriate logging level
log.setLevel(INFO if not quiet else ERROR)
# Refresh game data if necessary
if refresh or not ctx.obj['default_game'].have_fresh_data():
log.info(_('Refreshing game data, please wait.'))
ctx.obj['default_game'].refresh_data()
|
|
ae0ee6834feaefb77d8f6776b62c0d1b4621c84b
|
byceps/util/sentry.py
|
byceps/util/sentry.py
|
"""
byceps.util.sentry
~~~~~~~~~~~~~~~~~~
Sentry_ integration
.. _Sentry: https://sentry.io/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
Add utilities to integrate with Sentry
|
Add utilities to integrate with Sentry
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
|
Add utilities to integrate with Sentry
|
"""
byceps.util.sentry
~~~~~~~~~~~~~~~~~~
Sentry_ integration
.. _Sentry: https://sentry.io/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
<commit_before><commit_msg>Add utilities to integrate with Sentry<commit_after>
|
"""
byceps.util.sentry
~~~~~~~~~~~~~~~~~~
Sentry_ integration
.. _Sentry: https://sentry.io/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
Add utilities to integrate with Sentry"""
byceps.util.sentry
~~~~~~~~~~~~~~~~~~
Sentry_ integration
.. _Sentry: https://sentry.io/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
<commit_before><commit_msg>Add utilities to integrate with Sentry<commit_after>"""
byceps.util.sentry
~~~~~~~~~~~~~~~~~~
Sentry_ integration
.. _Sentry: https://sentry.io/
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
|
43d5c85a1b91719cabbc4a92bfd65feceb6cedfc
|
py/random-pick-index.py
|
py/random-pick-index.py
|
from random import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
meet = 0
choice = None
for i, n in enumerate(self.nums):
if n == target:
meet += 1
if random() * meet < 1:
choice = i
return choice
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
Add py solution for 398. Random Pick Index
|
Add py solution for 398. Random Pick Index
398. Random Pick Index: https://leetcode.com/problems/random-pick-index/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 398. Random Pick Index
398. Random Pick Index: https://leetcode.com/problems/random-pick-index/
|
from random import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
meet = 0
choice = None
for i, n in enumerate(self.nums):
if n == target:
meet += 1
if random() * meet < 1:
choice = i
return choice
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
<commit_before><commit_msg>Add py solution for 398. Random Pick Index
398. Random Pick Index: https://leetcode.com/problems/random-pick-index/<commit_after>
|
from random import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
meet = 0
choice = None
for i, n in enumerate(self.nums):
if n == target:
meet += 1
if random() * meet < 1:
choice = i
return choice
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
Add py solution for 398. Random Pick Index
398. Random Pick Index: https://leetcode.com/problems/random-pick-index/from random import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
meet = 0
choice = None
for i, n in enumerate(self.nums):
if n == target:
meet += 1
if random() * meet < 1:
choice = i
return choice
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
<commit_before><commit_msg>Add py solution for 398. Random Pick Index
398. Random Pick Index: https://leetcode.com/problems/random-pick-index/<commit_after>from random import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
meet = 0
choice = None
for i, n in enumerate(self.nums):
if n == target:
meet += 1
if random() * meet < 1:
choice = i
return choice
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
|
fbc96cdd95a007bf0c1f6c7439113e8f41cef876
|
py/range-addition-ii.py
|
py/range-addition-ii.py
|
from operator import itemgetter
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
if not ops:
return m * n
return min(map(itemgetter(0), ops)) * min(map(itemgetter(1), ops))
|
Add py solution for 598. Range Addition II
|
Add py solution for 598. Range Addition II
598. Range Addition II: https://leetcode.com/problems/range-addition-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 598. Range Addition II
598. Range Addition II: https://leetcode.com/problems/range-addition-ii/
|
from operator import itemgetter
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
if not ops:
return m * n
return min(map(itemgetter(0), ops)) * min(map(itemgetter(1), ops))
|
<commit_before><commit_msg>Add py solution for 598. Range Addition II
598. Range Addition II: https://leetcode.com/problems/range-addition-ii/<commit_after>
|
from operator import itemgetter
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
if not ops:
return m * n
return min(map(itemgetter(0), ops)) * min(map(itemgetter(1), ops))
|
Add py solution for 598. Range Addition II
598. Range Addition II: https://leetcode.com/problems/range-addition-ii/from operator import itemgetter
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
if not ops:
return m * n
return min(map(itemgetter(0), ops)) * min(map(itemgetter(1), ops))
|
<commit_before><commit_msg>Add py solution for 598. Range Addition II
598. Range Addition II: https://leetcode.com/problems/range-addition-ii/<commit_after>from operator import itemgetter
class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
if not ops:
return m * n
return min(map(itemgetter(0), ops)) * min(map(itemgetter(1), ops))
|
|
a4ae7acabf37de8ef6142c7c402cd663629c4aa9
|
bo5.py
|
bo5.py
|
from collections import namedtuple
import random
W = 800
H = 600
RED = 200, 0, 0
BLUE = 0, 0, 200
GREEN = 0, 200, 0
ball = Rect((W/2, H/2), (30, 30))
Direction = namedtuple('Direction', 'x y')
ball_dir = Direction(5, 5)
bat = Rect((W/2, 0.96 * H), (150, 15))
N_BLOCKS = 8
BLOCK_W = W / N_BLOCKS
BLOCK_H = BLOCK_W / 4
blocks = []
for n_block in range(N_BLOCKS):
block = Rect((n_block * BLOCK_W, 0), (BLOCK_W, BLOCK_H))
blocks.append((block, random.choice([RED, GREEN, BLUE])))
def draw_blocks():
for block, colour in blocks:
print(block)
screen.draw.filled_rect(block, colour)
def draw():
screen.clear()
screen.draw.filled_rect(ball, RED)
screen.draw.filled_rect(bat, RED)
draw_blocks()
def on_key_down():
import sys
sys.exit()
def on_mouse_move(pos):
x, y = pos
bat.center = (x, bat.center[1])
def move(ball):
"""returns a new ball at a new position
"""
global ball_dir
ball.move_ip(ball_dir)
def boubce():
if ball.x > W or ball.x <= 0:
ball_dir = Direction(-1 * ball_dir.x, ball_dir.y)
if ball.y > H or ball.y <= 0:
ball_dir = Direction(ball_dir.x, ball_dir.y * -1)
def update():
move(ball)
|
Add start of top blocks
|
Add start of top blocks
|
Python
|
mit
|
tomviner/breakout
|
Add start of top blocks
|
from collections import namedtuple
import random
W = 800
H = 600
RED = 200, 0, 0
BLUE = 0, 0, 200
GREEN = 0, 200, 0
ball = Rect((W/2, H/2), (30, 30))
Direction = namedtuple('Direction', 'x y')
ball_dir = Direction(5, 5)
bat = Rect((W/2, 0.96 * H), (150, 15))
N_BLOCKS = 8
BLOCK_W = W / N_BLOCKS
BLOCK_H = BLOCK_W / 4
blocks = []
for n_block in range(N_BLOCKS):
block = Rect((n_block * BLOCK_W, 0), (BLOCK_W, BLOCK_H))
blocks.append((block, random.choice([RED, GREEN, BLUE])))
def draw_blocks():
for block, colour in blocks:
print(block)
screen.draw.filled_rect(block, colour)
def draw():
screen.clear()
screen.draw.filled_rect(ball, RED)
screen.draw.filled_rect(bat, RED)
draw_blocks()
def on_key_down():
import sys
sys.exit()
def on_mouse_move(pos):
x, y = pos
bat.center = (x, bat.center[1])
def move(ball):
"""returns a new ball at a new position
"""
global ball_dir
ball.move_ip(ball_dir)
def boubce():
if ball.x > W or ball.x <= 0:
ball_dir = Direction(-1 * ball_dir.x, ball_dir.y)
if ball.y > H or ball.y <= 0:
ball_dir = Direction(ball_dir.x, ball_dir.y * -1)
def update():
move(ball)
|
<commit_before><commit_msg>Add start of top blocks<commit_after>
|
from collections import namedtuple
import random
W = 800
H = 600
RED = 200, 0, 0
BLUE = 0, 0, 200
GREEN = 0, 200, 0
ball = Rect((W/2, H/2), (30, 30))
Direction = namedtuple('Direction', 'x y')
ball_dir = Direction(5, 5)
bat = Rect((W/2, 0.96 * H), (150, 15))
N_BLOCKS = 8
BLOCK_W = W / N_BLOCKS
BLOCK_H = BLOCK_W / 4
blocks = []
for n_block in range(N_BLOCKS):
block = Rect((n_block * BLOCK_W, 0), (BLOCK_W, BLOCK_H))
blocks.append((block, random.choice([RED, GREEN, BLUE])))
def draw_blocks():
for block, colour in blocks:
print(block)
screen.draw.filled_rect(block, colour)
def draw():
screen.clear()
screen.draw.filled_rect(ball, RED)
screen.draw.filled_rect(bat, RED)
draw_blocks()
def on_key_down():
import sys
sys.exit()
def on_mouse_move(pos):
x, y = pos
bat.center = (x, bat.center[1])
def move(ball):
"""returns a new ball at a new position
"""
global ball_dir
ball.move_ip(ball_dir)
def boubce():
if ball.x > W or ball.x <= 0:
ball_dir = Direction(-1 * ball_dir.x, ball_dir.y)
if ball.y > H or ball.y <= 0:
ball_dir = Direction(ball_dir.x, ball_dir.y * -1)
def update():
move(ball)
|
Add start of top blocksfrom collections import namedtuple
import random
W = 800
H = 600
RED = 200, 0, 0
BLUE = 0, 0, 200
GREEN = 0, 200, 0
ball = Rect((W/2, H/2), (30, 30))
Direction = namedtuple('Direction', 'x y')
ball_dir = Direction(5, 5)
bat = Rect((W/2, 0.96 * H), (150, 15))
N_BLOCKS = 8
BLOCK_W = W / N_BLOCKS
BLOCK_H = BLOCK_W / 4
blocks = []
for n_block in range(N_BLOCKS):
block = Rect((n_block * BLOCK_W, 0), (BLOCK_W, BLOCK_H))
blocks.append((block, random.choice([RED, GREEN, BLUE])))
def draw_blocks():
for block, colour in blocks:
print(block)
screen.draw.filled_rect(block, colour)
def draw():
screen.clear()
screen.draw.filled_rect(ball, RED)
screen.draw.filled_rect(bat, RED)
draw_blocks()
def on_key_down():
import sys
sys.exit()
def on_mouse_move(pos):
x, y = pos
bat.center = (x, bat.center[1])
def move(ball):
"""returns a new ball at a new position
"""
global ball_dir
ball.move_ip(ball_dir)
def boubce():
if ball.x > W or ball.x <= 0:
ball_dir = Direction(-1 * ball_dir.x, ball_dir.y)
if ball.y > H or ball.y <= 0:
ball_dir = Direction(ball_dir.x, ball_dir.y * -1)
def update():
move(ball)
|
<commit_before><commit_msg>Add start of top blocks<commit_after>from collections import namedtuple
import random
W = 800
H = 600
RED = 200, 0, 0
BLUE = 0, 0, 200
GREEN = 0, 200, 0
ball = Rect((W/2, H/2), (30, 30))
Direction = namedtuple('Direction', 'x y')
ball_dir = Direction(5, 5)
bat = Rect((W/2, 0.96 * H), (150, 15))
N_BLOCKS = 8
BLOCK_W = W / N_BLOCKS
BLOCK_H = BLOCK_W / 4
blocks = []
for n_block in range(N_BLOCKS):
block = Rect((n_block * BLOCK_W, 0), (BLOCK_W, BLOCK_H))
blocks.append((block, random.choice([RED, GREEN, BLUE])))
def draw_blocks():
for block, colour in blocks:
print(block)
screen.draw.filled_rect(block, colour)
def draw():
screen.clear()
screen.draw.filled_rect(ball, RED)
screen.draw.filled_rect(bat, RED)
draw_blocks()
def on_key_down():
import sys
sys.exit()
def on_mouse_move(pos):
x, y = pos
bat.center = (x, bat.center[1])
def move(ball):
"""returns a new ball at a new position
"""
global ball_dir
ball.move_ip(ball_dir)
def boubce():
if ball.x > W or ball.x <= 0:
ball_dir = Direction(-1 * ball_dir.x, ball_dir.y)
if ball.y > H or ball.y <= 0:
ball_dir = Direction(ball_dir.x, ball_dir.y * -1)
def update():
move(ball)
|
|
bab5a974c78a2b7042de449c6d3b01f9297809f1
|
src/python/utexas/tools/generate_ddl.py
|
src/python/utexas/tools/generate_ddl.py
|
"""
utexas/tools/generate_ddl.py
Print or apply the research data schema.
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from utexas.tools.generate_ddl import main
raise SystemExit(main())
from cargo.flags import (
Flag,
Flags,
)
module_flags = \
Flags(
"Research Data Storage",
Flag(
"-a",
"--apply",
action = "store_true",
help = "create the generated schema",
),
Flag(
"-r",
"--reflect",
action = "store_true",
help = "load the reflected schema",
),
Flag(
"-t",
"--topological",
action = "store_true",
help = "print topologically sorted by dependency",
),
)
def main():
"""
Create core database metadata.
"""
# connect to the database
from cargo.flags import parse_given
from utexas.data import research_connect
parse_given(usage = "usage: %prog [options]")
engine = research_connect()
# load the appropriate schema
if module_flags.given.reflect:
# use the database's schema
from sqlalchemy.schema import MetaData
metadata = MetaData()
metadata.reflect(bind = engine)
else:
# use the project-defined schema
from utexas.data import DatumBase
metadata = DatumBase.metadata
# then do something with it
if module_flags.given.apply:
# apply the DDL to the database
metadata.create_all(engine)
else:
# print the DDL
from sqlalchemy.schema import CreateTable
if module_flags.given.topological:
sorted_tables = metadata.sorted_tables
else:
sorted_tables = sorted(metadata.sorted_tables, key = lambda t: t.name)
for table in sorted_tables:
print CreateTable(table).compile(engine)
|
Add a standalone DDL print/reflection tool.
|
Add a standalone DDL print/reflection tool.
|
Python
|
mit
|
borg-project/borg
|
Add a standalone DDL print/reflection tool.
|
"""
utexas/tools/generate_ddl.py
Print or apply the research data schema.
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from utexas.tools.generate_ddl import main
raise SystemExit(main())
from cargo.flags import (
Flag,
Flags,
)
module_flags = \
Flags(
"Research Data Storage",
Flag(
"-a",
"--apply",
action = "store_true",
help = "create the generated schema",
),
Flag(
"-r",
"--reflect",
action = "store_true",
help = "load the reflected schema",
),
Flag(
"-t",
"--topological",
action = "store_true",
help = "print topologically sorted by dependency",
),
)
def main():
"""
Create core database metadata.
"""
# connect to the database
from cargo.flags import parse_given
from utexas.data import research_connect
parse_given(usage = "usage: %prog [options]")
engine = research_connect()
# load the appropriate schema
if module_flags.given.reflect:
# use the database's schema
from sqlalchemy.schema import MetaData
metadata = MetaData()
metadata.reflect(bind = engine)
else:
# use the project-defined schema
from utexas.data import DatumBase
metadata = DatumBase.metadata
# then do something with it
if module_flags.given.apply:
# apply the DDL to the database
metadata.create_all(engine)
else:
# print the DDL
from sqlalchemy.schema import CreateTable
if module_flags.given.topological:
sorted_tables = metadata.sorted_tables
else:
sorted_tables = sorted(metadata.sorted_tables, key = lambda t: t.name)
for table in sorted_tables:
print CreateTable(table).compile(engine)
|
<commit_before><commit_msg>Add a standalone DDL print/reflection tool.<commit_after>
|
"""
utexas/tools/generate_ddl.py
Print or apply the research data schema.
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from utexas.tools.generate_ddl import main
raise SystemExit(main())
from cargo.flags import (
Flag,
Flags,
)
module_flags = \
Flags(
"Research Data Storage",
Flag(
"-a",
"--apply",
action = "store_true",
help = "create the generated schema",
),
Flag(
"-r",
"--reflect",
action = "store_true",
help = "load the reflected schema",
),
Flag(
"-t",
"--topological",
action = "store_true",
help = "print topologically sorted by dependency",
),
)
def main():
"""
Create core database metadata.
"""
# connect to the database
from cargo.flags import parse_given
from utexas.data import research_connect
parse_given(usage = "usage: %prog [options]")
engine = research_connect()
# load the appropriate schema
if module_flags.given.reflect:
# use the database's schema
from sqlalchemy.schema import MetaData
metadata = MetaData()
metadata.reflect(bind = engine)
else:
# use the project-defined schema
from utexas.data import DatumBase
metadata = DatumBase.metadata
# then do something with it
if module_flags.given.apply:
# apply the DDL to the database
metadata.create_all(engine)
else:
# print the DDL
from sqlalchemy.schema import CreateTable
if module_flags.given.topological:
sorted_tables = metadata.sorted_tables
else:
sorted_tables = sorted(metadata.sorted_tables, key = lambda t: t.name)
for table in sorted_tables:
print CreateTable(table).compile(engine)
|
Add a standalone DDL print/reflection tool."""
utexas/tools/generate_ddl.py
Print or apply the research data schema.
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from utexas.tools.generate_ddl import main
raise SystemExit(main())
from cargo.flags import (
Flag,
Flags,
)
module_flags = \
Flags(
"Research Data Storage",
Flag(
"-a",
"--apply",
action = "store_true",
help = "create the generated schema",
),
Flag(
"-r",
"--reflect",
action = "store_true",
help = "load the reflected schema",
),
Flag(
"-t",
"--topological",
action = "store_true",
help = "print topologically sorted by dependency",
),
)
def main():
"""
Create core database metadata.
"""
# connect to the database
from cargo.flags import parse_given
from utexas.data import research_connect
parse_given(usage = "usage: %prog [options]")
engine = research_connect()
# load the appropriate schema
if module_flags.given.reflect:
# use the database's schema
from sqlalchemy.schema import MetaData
metadata = MetaData()
metadata.reflect(bind = engine)
else:
# use the project-defined schema
from utexas.data import DatumBase
metadata = DatumBase.metadata
# then do something with it
if module_flags.given.apply:
# apply the DDL to the database
metadata.create_all(engine)
else:
# print the DDL
from sqlalchemy.schema import CreateTable
if module_flags.given.topological:
sorted_tables = metadata.sorted_tables
else:
sorted_tables = sorted(metadata.sorted_tables, key = lambda t: t.name)
for table in sorted_tables:
print CreateTable(table).compile(engine)
|
<commit_before><commit_msg>Add a standalone DDL print/reflection tool.<commit_after>"""
utexas/tools/generate_ddl.py
Print or apply the research data schema.
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from utexas.tools.generate_ddl import main
raise SystemExit(main())
from cargo.flags import (
Flag,
Flags,
)
module_flags = \
Flags(
"Research Data Storage",
Flag(
"-a",
"--apply",
action = "store_true",
help = "create the generated schema",
),
Flag(
"-r",
"--reflect",
action = "store_true",
help = "load the reflected schema",
),
Flag(
"-t",
"--topological",
action = "store_true",
help = "print topologically sorted by dependency",
),
)
def main():
"""
Create core database metadata.
"""
# connect to the database
from cargo.flags import parse_given
from utexas.data import research_connect
parse_given(usage = "usage: %prog [options]")
engine = research_connect()
# load the appropriate schema
if module_flags.given.reflect:
# use the database's schema
from sqlalchemy.schema import MetaData
metadata = MetaData()
metadata.reflect(bind = engine)
else:
# use the project-defined schema
from utexas.data import DatumBase
metadata = DatumBase.metadata
# then do something with it
if module_flags.given.apply:
# apply the DDL to the database
metadata.create_all(engine)
else:
# print the DDL
from sqlalchemy.schema import CreateTable
if module_flags.given.topological:
sorted_tables = metadata.sorted_tables
else:
sorted_tables = sorted(metadata.sorted_tables, key = lambda t: t.name)
for table in sorted_tables:
print CreateTable(table).compile(engine)
|
|
f7be1ee85cc3a65c5884773995a20e12037dba4d
|
enqueue-repo.py
|
enqueue-repo.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
enqueues respos from stdin or from command line arguments
"""
import argparse
import re
import sys
from typing import Iterator
from sensibility.miner.connection import redis_client
from sensibility.miner.names import DOWNLOAD_QUEUE
from sensibility.miner.rqueue import Queue
def repository(item: str) -> str:
match = re.match(r'''^
[\w\-.]+
/
[\w\-.]+
$''', item, re.VERBOSE)
if match is None:
raise ValueError(item)
return item
def from_stdin() -> Iterator[str]:
yield from sys.stdin.readlines()
parser = argparse.ArgumentParser()
parser.add_argument('repositories', nargs='*', type=repository,
metavar='owner/name')
if __name__ == '__main__':
args = parser.parse_args()
if len(args.repositories) > 0:
repos = args.repositories
else:
repos = from_stdin()
queue = Queue(DOWNLOAD_QUEUE, redis_client)
for name in repos:
queue << name
|
Make an easy thing to enqueue repos.
|
Make an easy thing to enqueue repos.
|
Python
|
apache-2.0
|
eddieantonio/ad-hoc-miner,eddieantonio/ad-hoc-miner,naturalness/sensibility,naturalness/sensibility,naturalness/sensibility,eddieantonio/ad-hoc-miner,naturalness/sensibility,eddieantonio/ad-hoc-miner
|
Make an easy thing to enqueue repos.
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
enqueues respos from stdin or from command line arguments
"""
import argparse
import re
import sys
from typing import Iterator
from sensibility.miner.connection import redis_client
from sensibility.miner.names import DOWNLOAD_QUEUE
from sensibility.miner.rqueue import Queue
def repository(item: str) -> str:
match = re.match(r'''^
[\w\-.]+
/
[\w\-.]+
$''', item, re.VERBOSE)
if match is None:
raise ValueError(item)
return item
def from_stdin() -> Iterator[str]:
yield from sys.stdin.readlines()
parser = argparse.ArgumentParser()
parser.add_argument('repositories', nargs='*', type=repository,
metavar='owner/name')
if __name__ == '__main__':
args = parser.parse_args()
if len(args.repositories) > 0:
repos = args.repositories
else:
repos = from_stdin()
queue = Queue(DOWNLOAD_QUEUE, redis_client)
for name in repos:
queue << name
|
<commit_before><commit_msg>Make an easy thing to enqueue repos.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
enqueues respos from stdin or from command line arguments
"""
import argparse
import re
import sys
from typing import Iterator
from sensibility.miner.connection import redis_client
from sensibility.miner.names import DOWNLOAD_QUEUE
from sensibility.miner.rqueue import Queue
def repository(item: str) -> str:
match = re.match(r'''^
[\w\-.]+
/
[\w\-.]+
$''', item, re.VERBOSE)
if match is None:
raise ValueError(item)
return item
def from_stdin() -> Iterator[str]:
yield from sys.stdin.readlines()
parser = argparse.ArgumentParser()
parser.add_argument('repositories', nargs='*', type=repository,
metavar='owner/name')
if __name__ == '__main__':
args = parser.parse_args()
if len(args.repositories) > 0:
repos = args.repositories
else:
repos = from_stdin()
queue = Queue(DOWNLOAD_QUEUE, redis_client)
for name in repos:
queue << name
|
Make an easy thing to enqueue repos.#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
enqueues respos from stdin or from command line arguments
"""
import argparse
import re
import sys
from typing import Iterator
from sensibility.miner.connection import redis_client
from sensibility.miner.names import DOWNLOAD_QUEUE
from sensibility.miner.rqueue import Queue
def repository(item: str) -> str:
match = re.match(r'''^
[\w\-.]+
/
[\w\-.]+
$''', item, re.VERBOSE)
if match is None:
raise ValueError(item)
return item
def from_stdin() -> Iterator[str]:
yield from sys.stdin.readlines()
parser = argparse.ArgumentParser()
parser.add_argument('repositories', nargs='*', type=repository,
metavar='owner/name')
if __name__ == '__main__':
args = parser.parse_args()
if len(args.repositories) > 0:
repos = args.repositories
else:
repos = from_stdin()
queue = Queue(DOWNLOAD_QUEUE, redis_client)
for name in repos:
queue << name
|
<commit_before><commit_msg>Make an easy thing to enqueue repos.<commit_after>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
enqueues respos from stdin or from command line arguments
"""
import argparse
import re
import sys
from typing import Iterator
from sensibility.miner.connection import redis_client
from sensibility.miner.names import DOWNLOAD_QUEUE
from sensibility.miner.rqueue import Queue
def repository(item: str) -> str:
match = re.match(r'''^
[\w\-.]+
/
[\w\-.]+
$''', item, re.VERBOSE)
if match is None:
raise ValueError(item)
return item
def from_stdin() -> Iterator[str]:
yield from sys.stdin.readlines()
parser = argparse.ArgumentParser()
parser.add_argument('repositories', nargs='*', type=repository,
metavar='owner/name')
if __name__ == '__main__':
args = parser.parse_args()
if len(args.repositories) > 0:
repos = args.repositories
else:
repos = from_stdin()
queue = Queue(DOWNLOAD_QUEUE, redis_client)
for name in repos:
queue << name
|
|
7f0c6c94b9cfddd28f2ab124fffdb1850e966930
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
test_suite='runtests.main',
)
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
include_package_data=True,
test_suite='runtests.main',
)
|
Include package files when installing from PyPI
|
Include package files when installing from PyPI
|
Python
|
bsd-3-clause
|
treyhunner/django-simple-history,emergence/django-simple-history,pombredanne/django-simple-history,pombredanne/django-simple-history,treyhunner/django-simple-history,luzfcb/django-simple-history,luzfcb/django-simple-history,emergence/django-simple-history
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
test_suite='runtests.main',
)
Include package files when installing from PyPI
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
include_package_data=True,
test_suite='runtests.main',
)
|
<commit_before>from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
test_suite='runtests.main',
)
<commit_msg>Include package files when installing from PyPI<commit_after>
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
include_package_data=True,
test_suite='runtests.main',
)
|
from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
test_suite='runtests.main',
)
Include package files when installing from PyPIfrom setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
include_package_data=True,
test_suite='runtests.main',
)
|
<commit_before>from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
test_suite='runtests.main',
)
<commit_msg>Include package files when installing from PyPI<commit_after>from setuptools import setup, find_packages
setup(
name='django-simple-history',
version='1.2.2.post1',
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
mantainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
tests_require=["Django>=1.3", "webtest", "django-webtest"],
include_package_data=True,
test_suite='runtests.main',
)
|
b423a34696d8e3ef7587bed3eb61876abc923bbe
|
python/hardway/function_testing.py
|
python/hardway/function_testing.py
|
#!/usr/bin/python3
# some experiments with functions
def my_values(value1):
print ("Value of value1 in function my_values: %s" % value1)
print ("Value of value2 in function my_values: %s" % value2)
print ("==========================================================")
def my_values2(newvalue):
print ("Value of newvalue in function my_values2: %s" % newvalue) # newvalue will be lost when we return to the main part
print ("Value of value1 in function my_values2: %s" % value1)
print ("Value of value2 in function my_values2: %s" % value2)
print ("==========================================================")
def add10(x):
x = x + 10
return(x)
def add100():
print ("I am in add100 part and value1 is: %s" % value1)
print ("I am in add100 part and value2 is: %s" % value2)
print ("==========================================================")
# value1 = value1 + 100 # This generates an error message
value1 = 10
value2 = 20
print ("This is a script for testing functions in Python3.")
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
my_values(value1)
my_values2(value1)
# This generates an error message
# print ("Value of newvalue in main part: %s" % newvalue)
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
newvalue2 = add10(value1)
print ("I am in the main part and newvalue2 is: %s (it should be value1 + 10) " % newvalue2)
print ("==========================================================")
print ("Now adding 10 to both value1 and value2")
value1 = value1 + 10
value2 = value2 + 10
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
add100()
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
|
Test script with Python Functions
|
Test script with Python Functions
|
Python
|
apache-2.0
|
petervdb/eLearning,petervdb/eLearning,petervdb/eLearning,petervdb/eLearning,petervdb/eLearning
|
Test script with Python Functions
|
#!/usr/bin/python3
# some experiments with functions
def my_values(value1):
print ("Value of value1 in function my_values: %s" % value1)
print ("Value of value2 in function my_values: %s" % value2)
print ("==========================================================")
def my_values2(newvalue):
print ("Value of newvalue in function my_values2: %s" % newvalue) # newvalue will be lost when we return to the main part
print ("Value of value1 in function my_values2: %s" % value1)
print ("Value of value2 in function my_values2: %s" % value2)
print ("==========================================================")
def add10(x):
x = x + 10
return(x)
def add100():
print ("I am in add100 part and value1 is: %s" % value1)
print ("I am in add100 part and value2 is: %s" % value2)
print ("==========================================================")
# value1 = value1 + 100 # This generates an error message
value1 = 10
value2 = 20
print ("This is a script for testing functions in Python3.")
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
my_values(value1)
my_values2(value1)
# This generates an error message
# print ("Value of newvalue in main part: %s" % newvalue)
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
newvalue2 = add10(value1)
print ("I am in the main part and newvalue2 is: %s (it should be value1 + 10) " % newvalue2)
print ("==========================================================")
print ("Now adding 10 to both value1 and value2")
value1 = value1 + 10
value2 = value2 + 10
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
add100()
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
|
<commit_before><commit_msg>Test script with Python Functions<commit_after>
|
#!/usr/bin/python3
# some experiments with functions
def my_values(value1):
print ("Value of value1 in function my_values: %s" % value1)
print ("Value of value2 in function my_values: %s" % value2)
print ("==========================================================")
def my_values2(newvalue):
print ("Value of newvalue in function my_values2: %s" % newvalue) # newvalue will be lost when we return to the main part
print ("Value of value1 in function my_values2: %s" % value1)
print ("Value of value2 in function my_values2: %s" % value2)
print ("==========================================================")
def add10(x):
x = x + 10
return(x)
def add100():
print ("I am in add100 part and value1 is: %s" % value1)
print ("I am in add100 part and value2 is: %s" % value2)
print ("==========================================================")
# value1 = value1 + 100 # This generates an error message
value1 = 10
value2 = 20
print ("This is a script for testing functions in Python3.")
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
my_values(value1)
my_values2(value1)
# This generates an error message
# print ("Value of newvalue in main part: %s" % newvalue)
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
newvalue2 = add10(value1)
print ("I am in the main part and newvalue2 is: %s (it should be value1 + 10) " % newvalue2)
print ("==========================================================")
print ("Now adding 10 to both value1 and value2")
value1 = value1 + 10
value2 = value2 + 10
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
add100()
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
|
Test script with Python Functions#!/usr/bin/python3
# some experiments with functions
def my_values(value1):
print ("Value of value1 in function my_values: %s" % value1)
print ("Value of value2 in function my_values: %s" % value2)
print ("==========================================================")
def my_values2(newvalue):
print ("Value of newvalue in function my_values2: %s" % newvalue) # newvalue will be lost when we return to the main part
print ("Value of value1 in function my_values2: %s" % value1)
print ("Value of value2 in function my_values2: %s" % value2)
print ("==========================================================")
def add10(x):
x = x + 10
return(x)
def add100():
print ("I am in add100 part and value1 is: %s" % value1)
print ("I am in add100 part and value2 is: %s" % value2)
print ("==========================================================")
# value1 = value1 + 100 # This generates an error message
value1 = 10
value2 = 20
print ("This is a script for testing functions in Python3.")
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
my_values(value1)
my_values2(value1)
# This generates an error message
# print ("Value of newvalue in main part: %s" % newvalue)
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
newvalue2 = add10(value1)
print ("I am in the main part and newvalue2 is: %s (it should be value1 + 10) " % newvalue2)
print ("==========================================================")
print ("Now adding 10 to both value1 and value2")
value1 = value1 + 10
value2 = value2 + 10
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
add100()
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
|
<commit_before><commit_msg>Test script with Python Functions<commit_after>#!/usr/bin/python3
# some experiments with functions
def my_values(value1):
print ("Value of value1 in function my_values: %s" % value1)
print ("Value of value2 in function my_values: %s" % value2)
print ("==========================================================")
def my_values2(newvalue):
print ("Value of newvalue in function my_values2: %s" % newvalue) # newvalue will be lost when we return to the main part
print ("Value of value1 in function my_values2: %s" % value1)
print ("Value of value2 in function my_values2: %s" % value2)
print ("==========================================================")
def add10(x):
x = x + 10
return(x)
def add100():
print ("I am in add100 part and value1 is: %s" % value1)
print ("I am in add100 part and value2 is: %s" % value2)
print ("==========================================================")
# value1 = value1 + 100 # This generates an error message
value1 = 10
value2 = 20
print ("This is a script for testing functions in Python3.")
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
my_values(value1)
my_values2(value1)
# This generates an error message
# print ("Value of newvalue in main part: %s" % newvalue)
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
newvalue2 = add10(value1)
print ("I am in the main part and newvalue2 is: %s (it should be value1 + 10) " % newvalue2)
print ("==========================================================")
print ("Now adding 10 to both value1 and value2")
value1 = value1 + 10
value2 = value2 + 10
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
add100()
print ("I am in the main part and value1 is: %s" % value1)
print ("I am in the main part and value2 is: %s" % value2)
print ("==========================================================")
|
|
0f6913f09b61acef4e464f81558f19982f98697e
|
getPortfolio.py
|
getPortfolio.py
|
#!/usr/bin/env python
"""
Retrieves current list of ticker symbols that are in portfolio.
All symbols are in lower-case.
"""
#TODO: Get portfolio list from a text file.
#TODO: Handle command line parms.
import sys
def get_portfolio():
"""Return a list of ticker symbols for entire portfolio"""
# Currently this list is hardcoded to match my spreadsheet layout
portfolio = [
'BTC'
,'LTC'
,'ETH'
,''
,''
,'ZRC'
,'NMC'
,'MSC'
,'ANC'
,'NXT'
,'XCP'
,''
,''
,'PTS'
,'BTSX'
,''
,''
,'XPM'
,'PPC'
,'FTC'
,'SWARMPRE'
,'DRK'
,'MAID'
,'TOR'
,''
,''
,'DOGE'
,'MEC'
,'QRK'
,'XRP'
]
# Convert to lowercase
for index, ticker in enumerate(portfolio):
portfolio[index] = ticker.lower()
return portfolio
def main():
"""Parse command line options (TODO)"""
print get_portfolio()
if __name__ == "__main__":
main()
|
Make ticker symbols lower case
|
Make ticker symbols lower case
|
Python
|
mit
|
SimonJester/portfolio
|
Make ticker symbols lower case
|
#!/usr/bin/env python
"""
Retrieves current list of ticker symbols that are in portfolio.
All symbols are in lower-case.
"""
#TODO: Get portfolio list from a text file.
#TODO: Handle command line parms.
import sys
def get_portfolio():
"""Return a list of ticker symbols for entire portfolio"""
# Currently this list is hardcoded to match my spreadsheet layout
portfolio = [
'BTC'
,'LTC'
,'ETH'
,''
,''
,'ZRC'
,'NMC'
,'MSC'
,'ANC'
,'NXT'
,'XCP'
,''
,''
,'PTS'
,'BTSX'
,''
,''
,'XPM'
,'PPC'
,'FTC'
,'SWARMPRE'
,'DRK'
,'MAID'
,'TOR'
,''
,''
,'DOGE'
,'MEC'
,'QRK'
,'XRP'
]
# Convert to lowercase
for index, ticker in enumerate(portfolio):
portfolio[index] = ticker.lower()
return portfolio
def main():
"""Parse command line options (TODO)"""
print get_portfolio()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Make ticker symbols lower case<commit_after>
|
#!/usr/bin/env python
"""
Retrieves current list of ticker symbols that are in portfolio.
All symbols are in lower-case.
"""
#TODO: Get portfolio list from a text file.
#TODO: Handle command line parms.
import sys
def get_portfolio():
"""Return a list of ticker symbols for entire portfolio"""
# Currently this list is hardcoded to match my spreadsheet layout
portfolio = [
'BTC'
,'LTC'
,'ETH'
,''
,''
,'ZRC'
,'NMC'
,'MSC'
,'ANC'
,'NXT'
,'XCP'
,''
,''
,'PTS'
,'BTSX'
,''
,''
,'XPM'
,'PPC'
,'FTC'
,'SWARMPRE'
,'DRK'
,'MAID'
,'TOR'
,''
,''
,'DOGE'
,'MEC'
,'QRK'
,'XRP'
]
# Convert to lowercase
for index, ticker in enumerate(portfolio):
portfolio[index] = ticker.lower()
return portfolio
def main():
"""Parse command line options (TODO)"""
print get_portfolio()
if __name__ == "__main__":
main()
|
Make ticker symbols lower case#!/usr/bin/env python
"""
Retrieves current list of ticker symbols that are in portfolio.
All symbols are in lower-case.
"""
#TODO: Get portfolio list from a text file.
#TODO: Handle command line parms.
import sys
def get_portfolio():
"""Return a list of ticker symbols for entire portfolio"""
# Currently this list is hardcoded to match my spreadsheet layout
portfolio = [
'BTC'
,'LTC'
,'ETH'
,''
,''
,'ZRC'
,'NMC'
,'MSC'
,'ANC'
,'NXT'
,'XCP'
,''
,''
,'PTS'
,'BTSX'
,''
,''
,'XPM'
,'PPC'
,'FTC'
,'SWARMPRE'
,'DRK'
,'MAID'
,'TOR'
,''
,''
,'DOGE'
,'MEC'
,'QRK'
,'XRP'
]
# Convert to lowercase
for index, ticker in enumerate(portfolio):
portfolio[index] = ticker.lower()
return portfolio
def main():
"""Parse command line options (TODO)"""
print get_portfolio()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Make ticker symbols lower case<commit_after>#!/usr/bin/env python
"""
Retrieves current list of ticker symbols that are in portfolio.
All symbols are in lower-case.
"""
#TODO: Get portfolio list from a text file.
#TODO: Handle command line parms.
import sys
def get_portfolio():
"""Return a list of ticker symbols for entire portfolio"""
# Currently this list is hardcoded to match my spreadsheet layout
portfolio = [
'BTC'
,'LTC'
,'ETH'
,''
,''
,'ZRC'
,'NMC'
,'MSC'
,'ANC'
,'NXT'
,'XCP'
,''
,''
,'PTS'
,'BTSX'
,''
,''
,'XPM'
,'PPC'
,'FTC'
,'SWARMPRE'
,'DRK'
,'MAID'
,'TOR'
,''
,''
,'DOGE'
,'MEC'
,'QRK'
,'XRP'
]
# Convert to lowercase
for index, ticker in enumerate(portfolio):
portfolio[index] = ticker.lower()
return portfolio
def main():
"""Parse command line options (TODO)"""
print get_portfolio()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.