commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
822ef2f1d182409d51da16c2b589603f1ef9e27f
|
tests/test_utils.py
|
tests/test_utils.py
|
from django_logutils.utils import add_items_to_message
def test_add_items_to_message():
msg = "log message"
items = {'user': 'benny', 'email': 'benny@example.com'}
msg = add_items_to_message(msg, items)
assert msg == 'log message user=benny email=benny@example.com'
|
Add test fro add_items_to_message function.
|
Add test fro add_items_to_message function.
|
Python
|
bsd-3-clause
|
jsmits/django-logutils,jsmits/django-logutils
|
Add test fro add_items_to_message function.
|
from django_logutils.utils import add_items_to_message
def test_add_items_to_message():
msg = "log message"
items = {'user': 'benny', 'email': 'benny@example.com'}
msg = add_items_to_message(msg, items)
assert msg == 'log message user=benny email=benny@example.com'
|
<commit_before><commit_msg>Add test fro add_items_to_message function.<commit_after>
|
from django_logutils.utils import add_items_to_message
def test_add_items_to_message():
msg = "log message"
items = {'user': 'benny', 'email': 'benny@example.com'}
msg = add_items_to_message(msg, items)
assert msg == 'log message user=benny email=benny@example.com'
|
Add test fro add_items_to_message function.from django_logutils.utils import add_items_to_message
def test_add_items_to_message():
msg = "log message"
items = {'user': 'benny', 'email': 'benny@example.com'}
msg = add_items_to_message(msg, items)
assert msg == 'log message user=benny email=benny@example.com'
|
<commit_before><commit_msg>Add test fro add_items_to_message function.<commit_after>from django_logutils.utils import add_items_to_message
def test_add_items_to_message():
msg = "log message"
items = {'user': 'benny', 'email': 'benny@example.com'}
msg = add_items_to_message(msg, items)
assert msg == 'log message user=benny email=benny@example.com'
|
|
0d9519527986eb2255d185ec833f7c415ad5dbd3
|
migrations/versions/168_update_easuid_schema.py
|
migrations/versions/168_update_easuid_schema.py
|
"""update eas schema
Revision ID: 281b07fa75bb
Revises:576f5310e8fc
Create Date: 2015-05-19 01:08:57.101681
"""
# revision identifiers, used by Alembic.
revision = '281b07fa75bb'
down_revision = '576f5310e8fc'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easuid'):
return
conn = op.get_bind()
conn.execute('''ALTER TABLE easuid
ADD COLUMN server_id VARCHAR(64) DEFAULT NULL,
ADD COLUMN easfoldersyncstatus_id INT(11) DEFAULT NULL,
ADD INDEX easfoldersyncstatus_id (easfoldersyncstatus_id),
ADD CONSTRAINT easuid_ibfk_4 FOREIGN KEY (easfoldersyncstatus_id)
REFERENCES easfoldersyncstatus (id) ON DELETE CASCADE,
ADD INDEX ix_easuid_server_id (server_id)
''')
def downgrade():
pass
|
Add migrations for EASUid storage format (part 1 of 3).
|
Add migrations for EASUid storage format (part 1 of 3).
|
Python
|
agpl-3.0
|
PriviPK/privipk-sync-engine,nylas/sync-engine,jobscore/sync-engine,jobscore/sync-engine,ErinCall/sync-engine,jobscore/sync-engine,nylas/sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,Eagles2F/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,closeio/nylas,closeio/nylas,nylas/sync-engine,gale320/sync-engine,ErinCall/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,gale320/sync-engine,closeio/nylas,closeio/nylas,PriviPK/privipk-sync-engine
|
Add migrations for EASUid storage format (part 1 of 3).
|
"""update eas schema
Revision ID: 281b07fa75bb
Revises:576f5310e8fc
Create Date: 2015-05-19 01:08:57.101681
"""
# revision identifiers, used by Alembic.
revision = '281b07fa75bb'
down_revision = '576f5310e8fc'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easuid'):
return
conn = op.get_bind()
conn.execute('''ALTER TABLE easuid
ADD COLUMN server_id VARCHAR(64) DEFAULT NULL,
ADD COLUMN easfoldersyncstatus_id INT(11) DEFAULT NULL,
ADD INDEX easfoldersyncstatus_id (easfoldersyncstatus_id),
ADD CONSTRAINT easuid_ibfk_4 FOREIGN KEY (easfoldersyncstatus_id)
REFERENCES easfoldersyncstatus (id) ON DELETE CASCADE,
ADD INDEX ix_easuid_server_id (server_id)
''')
def downgrade():
pass
|
<commit_before><commit_msg>Add migrations for EASUid storage format (part 1 of 3).<commit_after>
|
"""update eas schema
Revision ID: 281b07fa75bb
Revises:576f5310e8fc
Create Date: 2015-05-19 01:08:57.101681
"""
# revision identifiers, used by Alembic.
revision = '281b07fa75bb'
down_revision = '576f5310e8fc'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easuid'):
return
conn = op.get_bind()
conn.execute('''ALTER TABLE easuid
ADD COLUMN server_id VARCHAR(64) DEFAULT NULL,
ADD COLUMN easfoldersyncstatus_id INT(11) DEFAULT NULL,
ADD INDEX easfoldersyncstatus_id (easfoldersyncstatus_id),
ADD CONSTRAINT easuid_ibfk_4 FOREIGN KEY (easfoldersyncstatus_id)
REFERENCES easfoldersyncstatus (id) ON DELETE CASCADE,
ADD INDEX ix_easuid_server_id (server_id)
''')
def downgrade():
pass
|
Add migrations for EASUid storage format (part 1 of 3)."""update eas schema
Revision ID: 281b07fa75bb
Revises:576f5310e8fc
Create Date: 2015-05-19 01:08:57.101681
"""
# revision identifiers, used by Alembic.
revision = '281b07fa75bb'
down_revision = '576f5310e8fc'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easuid'):
return
conn = op.get_bind()
conn.execute('''ALTER TABLE easuid
ADD COLUMN server_id VARCHAR(64) DEFAULT NULL,
ADD COLUMN easfoldersyncstatus_id INT(11) DEFAULT NULL,
ADD INDEX easfoldersyncstatus_id (easfoldersyncstatus_id),
ADD CONSTRAINT easuid_ibfk_4 FOREIGN KEY (easfoldersyncstatus_id)
REFERENCES easfoldersyncstatus (id) ON DELETE CASCADE,
ADD INDEX ix_easuid_server_id (server_id)
''')
def downgrade():
pass
|
<commit_before><commit_msg>Add migrations for EASUid storage format (part 1 of 3).<commit_after>"""update eas schema
Revision ID: 281b07fa75bb
Revises:576f5310e8fc
Create Date: 2015-05-19 01:08:57.101681
"""
# revision identifiers, used by Alembic.
revision = '281b07fa75bb'
down_revision = '576f5310e8fc'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easuid'):
return
conn = op.get_bind()
conn.execute('''ALTER TABLE easuid
ADD COLUMN server_id VARCHAR(64) DEFAULT NULL,
ADD COLUMN easfoldersyncstatus_id INT(11) DEFAULT NULL,
ADD INDEX easfoldersyncstatus_id (easfoldersyncstatus_id),
ADD CONSTRAINT easuid_ibfk_4 FOREIGN KEY (easfoldersyncstatus_id)
REFERENCES easfoldersyncstatus (id) ON DELETE CASCADE,
ADD INDEX ix_easuid_server_id (server_id)
''')
def downgrade():
pass
|
|
c28127941ed88fdedc084c6227da3b921a5e15ab
|
jsk_apc2015_common/scripts/test_bof_object_recognition.py
|
jsk_apc2015_common/scripts/test_bof_object_recognition.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
|
Add bof object recognition test script
|
[jsk_2015_apc_common] Add bof object recognition test script
|
Python
|
bsd-3-clause
|
pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc
|
[jsk_2015_apc_common] Add bof object recognition test script
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
|
<commit_before><commit_msg>[jsk_2015_apc_common] Add bof object recognition test script<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
|
[jsk_2015_apc_common] Add bof object recognition test script#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
|
<commit_before><commit_msg>[jsk_2015_apc_common] Add bof object recognition test script<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cPickle as pickle
import gzip
import sys
import cv2
from imagesift import get_sift_keypoints
import numpy as np
from sklearn.datasets import load_files
from sklearn.metrics import accuracy_score, classification_report
from sklearn.preprocessing import normalize
parser = argparse.ArgumentParser()
parser.add_argument('container_path')
parser.add_argument('bof_path')
parser.add_argument('clf_path')
args = parser.parse_args()
container_path = args.container_path
bof_path = args.bof_path
clf_path = args.clf_path
bunch_files = load_files(container_path=container_path,
description='images',
shuffle=False,
load_content=False)
with gzip.open(bof_path, 'rb') as f:
bof = pickle.load(f)
with gzip.open(clf_path, 'rb') as f:
clf = pickle.load(f)
descs = []
for fname in bunch_files.filenames:
img = cv2.imread(fname, 0)
_, desc = get_sift_keypoints(img)
descs.append(desc)
X = bof.transform(descs)
normalize(X, copy=False)
y_pred = clf.predict(X)
y = bunch_files.target
print accuracy_score(y, y_pred)
print classification_report(y, y_pred, target_names=clf.target_names_)
|
|
8ce76a8b2e0310b0adeceef60478ad7baeceba8c
|
IPython/utils/tests/test_importstring.py
|
IPython/utils/tests/test_importstring.py
|
"""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_is(os, os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_is(path, path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
|
Add proper tests to importstring.
|
Add proper tests to importstring.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add proper tests to importstring.
|
"""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_is(os, os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_is(path, path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
|
<commit_before><commit_msg>Add proper tests to importstring.<commit_after>
|
"""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_is(os, os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_is(path, path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
|
Add proper tests to importstring."""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_is(os, os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_is(path, path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
|
<commit_before><commit_msg>Add proper tests to importstring.<commit_after>"""Tests for IPython.utils.importstring."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_import_plain():
"Test simple imports"
import os
os2 = import_item('os')
nt.assert_is(os, os2)
def test_import_nested():
"Test nested imports from the stdlib"
from os import path
path2 = import_item('os.path')
nt.assert_is(path, path2)
def test_import_raises():
"Test that failing imports raise the right exception"
nt.assert_raises(ImportError, import_item, 'IPython.foobar')
|
|
dfbff1c8e1e9acdf66da4caf7ace35fd0b2ce161
|
RPxVideoConverter.py
|
RPxVideoConverter.py
|
#!/usr/bin/env python
import time
import os
import glob
import sys
import shutil
def RPxLog(severity, message):
print time.time(), severity, message
def RPxErrLog(message):
RPxLog("E", message)
def RPxInfoLog(message):
RPxLog("I", message)
def RPxDevLog(message):
RPxLog("D", message)
scriptDir = os.path.abspath(os.path.dirname(__file__))
folderWithVideos = sys.argv[1] if (len(sys.argv) > 1) else scriptDir
someErrors = False
filesInOriginalFormat = glob.glob(folderWithVideos + "/*.h264")
if len(filesInOriginalFormat) == 0:
RPxInfoLog("Noting to convert, no h264 RAW files")
sys.exit(0)
RPxInfoLog("Start Converting")
for fileToConvert in filesInOriginalFormat:
destinationFilePath = fileToConvert + ".mp4"
RPxDevLog("Converting " + fileToConvert)
result = os.system("avconv -i \"" + fileToConvert + "\" -c:v copy \"" + destinationFilePath + "\"");
if result != 0:
RPxErrLog("Failed to convert file: " + fileToConvert)
someErrors = True
else:
shutil.copystat(fileToConvert, destinationFilePath)
RPxDevLog("Converting Complete. New file: " + destinationFilePath)
os.remove(fileToConvert);
if someErrors:
RPxInfoLog("Done all, WITH ERRORS")
else:
RPxInfoLog("Done all, no errors")
|
Convert video files captured with raspivid
|
Convert video files captured with raspivid
|
Python
|
apache-2.0
|
RPxDrones/RPxCamera,RPxCopter/RPxCamera,RPxCopter/RPxCamera,RPxDrones/RPxCamera
|
Convert video files captured with raspivid
|
#!/usr/bin/env python
import time
import os
import glob
import sys
import shutil
def RPxLog(severity, message):
print time.time(), severity, message
def RPxErrLog(message):
RPxLog("E", message)
def RPxInfoLog(message):
RPxLog("I", message)
def RPxDevLog(message):
RPxLog("D", message)
scriptDir = os.path.abspath(os.path.dirname(__file__))
folderWithVideos = sys.argv[1] if (len(sys.argv) > 1) else scriptDir
someErrors = False
filesInOriginalFormat = glob.glob(folderWithVideos + "/*.h264")
if len(filesInOriginalFormat) == 0:
RPxInfoLog("Noting to convert, no h264 RAW files")
sys.exit(0)
RPxInfoLog("Start Converting")
for fileToConvert in filesInOriginalFormat:
destinationFilePath = fileToConvert + ".mp4"
RPxDevLog("Converting " + fileToConvert)
result = os.system("avconv -i \"" + fileToConvert + "\" -c:v copy \"" + destinationFilePath + "\"");
if result != 0:
RPxErrLog("Failed to convert file: " + fileToConvert)
someErrors = True
else:
shutil.copystat(fileToConvert, destinationFilePath)
RPxDevLog("Converting Complete. New file: " + destinationFilePath)
os.remove(fileToConvert);
if someErrors:
RPxInfoLog("Done all, WITH ERRORS")
else:
RPxInfoLog("Done all, no errors")
|
<commit_before><commit_msg>Convert video files captured with raspivid<commit_after>
|
#!/usr/bin/env python
import time
import os
import glob
import sys
import shutil
def RPxLog(severity, message):
print time.time(), severity, message
def RPxErrLog(message):
RPxLog("E", message)
def RPxInfoLog(message):
RPxLog("I", message)
def RPxDevLog(message):
RPxLog("D", message)
scriptDir = os.path.abspath(os.path.dirname(__file__))
folderWithVideos = sys.argv[1] if (len(sys.argv) > 1) else scriptDir
someErrors = False
filesInOriginalFormat = glob.glob(folderWithVideos + "/*.h264")
if len(filesInOriginalFormat) == 0:
RPxInfoLog("Noting to convert, no h264 RAW files")
sys.exit(0)
RPxInfoLog("Start Converting")
for fileToConvert in filesInOriginalFormat:
destinationFilePath = fileToConvert + ".mp4"
RPxDevLog("Converting " + fileToConvert)
result = os.system("avconv -i \"" + fileToConvert + "\" -c:v copy \"" + destinationFilePath + "\"");
if result != 0:
RPxErrLog("Failed to convert file: " + fileToConvert)
someErrors = True
else:
shutil.copystat(fileToConvert, destinationFilePath)
RPxDevLog("Converting Complete. New file: " + destinationFilePath)
os.remove(fileToConvert);
if someErrors:
RPxInfoLog("Done all, WITH ERRORS")
else:
RPxInfoLog("Done all, no errors")
|
Convert video files captured with raspivid#!/usr/bin/env python
import time
import os
import glob
import sys
import shutil
def RPxLog(severity, message):
print time.time(), severity, message
def RPxErrLog(message):
RPxLog("E", message)
def RPxInfoLog(message):
RPxLog("I", message)
def RPxDevLog(message):
RPxLog("D", message)
scriptDir = os.path.abspath(os.path.dirname(__file__))
folderWithVideos = sys.argv[1] if (len(sys.argv) > 1) else scriptDir
someErrors = False
filesInOriginalFormat = glob.glob(folderWithVideos + "/*.h264")
if len(filesInOriginalFormat) == 0:
RPxInfoLog("Noting to convert, no h264 RAW files")
sys.exit(0)
RPxInfoLog("Start Converting")
for fileToConvert in filesInOriginalFormat:
destinationFilePath = fileToConvert + ".mp4"
RPxDevLog("Converting " + fileToConvert)
result = os.system("avconv -i \"" + fileToConvert + "\" -c:v copy \"" + destinationFilePath + "\"");
if result != 0:
RPxErrLog("Failed to convert file: " + fileToConvert)
someErrors = True
else:
shutil.copystat(fileToConvert, destinationFilePath)
RPxDevLog("Converting Complete. New file: " + destinationFilePath)
os.remove(fileToConvert);
if someErrors:
RPxInfoLog("Done all, WITH ERRORS")
else:
RPxInfoLog("Done all, no errors")
|
<commit_before><commit_msg>Convert video files captured with raspivid<commit_after>#!/usr/bin/env python
import time
import os
import glob
import sys
import shutil
def RPxLog(severity, message):
print time.time(), severity, message
def RPxErrLog(message):
RPxLog("E", message)
def RPxInfoLog(message):
RPxLog("I", message)
def RPxDevLog(message):
RPxLog("D", message)
scriptDir = os.path.abspath(os.path.dirname(__file__))
folderWithVideos = sys.argv[1] if (len(sys.argv) > 1) else scriptDir
someErrors = False
filesInOriginalFormat = glob.glob(folderWithVideos + "/*.h264")
if len(filesInOriginalFormat) == 0:
RPxInfoLog("Noting to convert, no h264 RAW files")
sys.exit(0)
RPxInfoLog("Start Converting")
for fileToConvert in filesInOriginalFormat:
destinationFilePath = fileToConvert + ".mp4"
RPxDevLog("Converting " + fileToConvert)
result = os.system("avconv -i \"" + fileToConvert + "\" -c:v copy \"" + destinationFilePath + "\"");
if result != 0:
RPxErrLog("Failed to convert file: " + fileToConvert)
someErrors = True
else:
shutil.copystat(fileToConvert, destinationFilePath)
RPxDevLog("Converting Complete. New file: " + destinationFilePath)
os.remove(fileToConvert);
if someErrors:
RPxInfoLog("Done all, WITH ERRORS")
else:
RPxInfoLog("Done all, no errors")
|
|
35c4696b87bd167dc9766a58391faf9b776305ad
|
tests/services/authorization/test_permission_to_role_assignment.py
|
tests/services/authorization/test_permission_to_role_assignment.py
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from tests.base import AbstractAppTestCase
class PermissionToRoleAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.permission = service.create_permission(self.permission_id,
self.permission_id)
self.role_id = 'board_moderator'
self.role = service.create_role(self.role_id, self.role_id)
def test_assign_permission_to_role(self):
permission_id = self.permission.id
role_id = self.role.id
role_permission_ids_before = self.get_permission_ids_for_role(role_id)
self.assertNotIn(self.permission_id, role_permission_ids_before)
service.assign_permission_to_role(permission_id, role_id)
role_permission_ids_after = self.get_permission_ids_for_role(role_id)
self.assertIn(self.permission_id, role_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def get_permission_ids_for_role(self, role_id):
return {p.id for p in self.role.permissions}
|
Test assignment of permissions to roles
|
Test assignment of permissions to roles
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps
|
Test assignment of permissions to roles
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from tests.base import AbstractAppTestCase
class PermissionToRoleAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.permission = service.create_permission(self.permission_id,
self.permission_id)
self.role_id = 'board_moderator'
self.role = service.create_role(self.role_id, self.role_id)
def test_assign_permission_to_role(self):
permission_id = self.permission.id
role_id = self.role.id
role_permission_ids_before = self.get_permission_ids_for_role(role_id)
self.assertNotIn(self.permission_id, role_permission_ids_before)
service.assign_permission_to_role(permission_id, role_id)
role_permission_ids_after = self.get_permission_ids_for_role(role_id)
self.assertIn(self.permission_id, role_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def get_permission_ids_for_role(self, role_id):
return {p.id for p in self.role.permissions}
|
<commit_before><commit_msg>Test assignment of permissions to roles<commit_after>
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from tests.base import AbstractAppTestCase
class PermissionToRoleAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.permission = service.create_permission(self.permission_id,
self.permission_id)
self.role_id = 'board_moderator'
self.role = service.create_role(self.role_id, self.role_id)
def test_assign_permission_to_role(self):
permission_id = self.permission.id
role_id = self.role.id
role_permission_ids_before = self.get_permission_ids_for_role(role_id)
self.assertNotIn(self.permission_id, role_permission_ids_before)
service.assign_permission_to_role(permission_id, role_id)
role_permission_ids_after = self.get_permission_ids_for_role(role_id)
self.assertIn(self.permission_id, role_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def get_permission_ids_for_role(self, role_id):
return {p.id for p in self.role.permissions}
|
Test assignment of permissions to roles"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from tests.base import AbstractAppTestCase
class PermissionToRoleAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.permission = service.create_permission(self.permission_id,
self.permission_id)
self.role_id = 'board_moderator'
self.role = service.create_role(self.role_id, self.role_id)
def test_assign_permission_to_role(self):
permission_id = self.permission.id
role_id = self.role.id
role_permission_ids_before = self.get_permission_ids_for_role(role_id)
self.assertNotIn(self.permission_id, role_permission_ids_before)
service.assign_permission_to_role(permission_id, role_id)
role_permission_ids_after = self.get_permission_ids_for_role(role_id)
self.assertIn(self.permission_id, role_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def get_permission_ids_for_role(self, role_id):
return {p.id for p in self.role.permissions}
|
<commit_before><commit_msg>Test assignment of permissions to roles<commit_after>"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from tests.base import AbstractAppTestCase
class PermissionToRoleAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.permission = service.create_permission(self.permission_id,
self.permission_id)
self.role_id = 'board_moderator'
self.role = service.create_role(self.role_id, self.role_id)
def test_assign_permission_to_role(self):
permission_id = self.permission.id
role_id = self.role.id
role_permission_ids_before = self.get_permission_ids_for_role(role_id)
self.assertNotIn(self.permission_id, role_permission_ids_before)
service.assign_permission_to_role(permission_id, role_id)
role_permission_ids_after = self.get_permission_ids_for_role(role_id)
self.assertIn(self.permission_id, role_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def get_permission_ids_for_role(self, role_id):
return {p.id for p in self.role.permissions}
|
|
2459ddc77429b1a77de9e32ab551649a2abc2517
|
src/plone.server/plone/server/tests/test_transactions.py
|
src/plone.server/plone/server/tests/test_transactions.py
|
# -*- coding: utf-8 -*-
from aiohttp.test_utils import make_mocked_request
from plone.server.browser import View
from plone.server.transactions import RequestAwareDB
from plone.server.transactions import RequestAwareTransactionManager
from plone.server.transactions import TransactionProxy
import pytest
import ZODB.DemoStorage
@pytest.yield_fixture(scope='module')
def conn():
storage = ZODB.DemoStorage.DemoStorage()
ZODB.DB(storage).close() # init storage with root
db = RequestAwareDB(storage)
tm = RequestAwareTransactionManager()
yield db.open(transaction_manager=tm)
@pytest.yield_fixture(scope='module')
def view(conn):
request = make_mocked_request('POST', '/')
yield View(conn.root(), request)
def test_transaction_proxy(view):
tm = view.context._p_jar.transaction_manager
request1 = view.request
t1 = tm.begin(request1)
assert hasattr(request1, '_txn')
assert hasattr(request1, '_txn_time')
request2 = TransactionProxy(view.request)
t2 = tm.begin(request2)
assert hasattr(request2, '_txn')
assert hasattr(request2, '_txn_time')
assert t1 is not t2
assert t1 is request1._txn
assert t2 is request2._txn
assert request1._txn_time < request2._txn_time
|
Add tests for transaction proxy
|
Add tests for transaction proxy
|
Python
|
bsd-2-clause
|
plone/plone.server,plone/plone.server
|
Add tests for transaction proxy
|
# -*- coding: utf-8 -*-
from aiohttp.test_utils import make_mocked_request
from plone.server.browser import View
from plone.server.transactions import RequestAwareDB
from plone.server.transactions import RequestAwareTransactionManager
from plone.server.transactions import TransactionProxy
import pytest
import ZODB.DemoStorage
@pytest.yield_fixture(scope='module')
def conn():
storage = ZODB.DemoStorage.DemoStorage()
ZODB.DB(storage).close() # init storage with root
db = RequestAwareDB(storage)
tm = RequestAwareTransactionManager()
yield db.open(transaction_manager=tm)
@pytest.yield_fixture(scope='module')
def view(conn):
request = make_mocked_request('POST', '/')
yield View(conn.root(), request)
def test_transaction_proxy(view):
tm = view.context._p_jar.transaction_manager
request1 = view.request
t1 = tm.begin(request1)
assert hasattr(request1, '_txn')
assert hasattr(request1, '_txn_time')
request2 = TransactionProxy(view.request)
t2 = tm.begin(request2)
assert hasattr(request2, '_txn')
assert hasattr(request2, '_txn_time')
assert t1 is not t2
assert t1 is request1._txn
assert t2 is request2._txn
assert request1._txn_time < request2._txn_time
|
<commit_before><commit_msg>Add tests for transaction proxy<commit_after>
|
# -*- coding: utf-8 -*-
from aiohttp.test_utils import make_mocked_request
from plone.server.browser import View
from plone.server.transactions import RequestAwareDB
from plone.server.transactions import RequestAwareTransactionManager
from plone.server.transactions import TransactionProxy
import pytest
import ZODB.DemoStorage
@pytest.yield_fixture(scope='module')
def conn():
storage = ZODB.DemoStorage.DemoStorage()
ZODB.DB(storage).close() # init storage with root
db = RequestAwareDB(storage)
tm = RequestAwareTransactionManager()
yield db.open(transaction_manager=tm)
@pytest.yield_fixture(scope='module')
def view(conn):
request = make_mocked_request('POST', '/')
yield View(conn.root(), request)
def test_transaction_proxy(view):
tm = view.context._p_jar.transaction_manager
request1 = view.request
t1 = tm.begin(request1)
assert hasattr(request1, '_txn')
assert hasattr(request1, '_txn_time')
request2 = TransactionProxy(view.request)
t2 = tm.begin(request2)
assert hasattr(request2, '_txn')
assert hasattr(request2, '_txn_time')
assert t1 is not t2
assert t1 is request1._txn
assert t2 is request2._txn
assert request1._txn_time < request2._txn_time
|
Add tests for transaction proxy# -*- coding: utf-8 -*-
from aiohttp.test_utils import make_mocked_request
from plone.server.browser import View
from plone.server.transactions import RequestAwareDB
from plone.server.transactions import RequestAwareTransactionManager
from plone.server.transactions import TransactionProxy
import pytest
import ZODB.DemoStorage
@pytest.yield_fixture(scope='module')
def conn():
storage = ZODB.DemoStorage.DemoStorage()
ZODB.DB(storage).close() # init storage with root
db = RequestAwareDB(storage)
tm = RequestAwareTransactionManager()
yield db.open(transaction_manager=tm)
@pytest.yield_fixture(scope='module')
def view(conn):
request = make_mocked_request('POST', '/')
yield View(conn.root(), request)
def test_transaction_proxy(view):
tm = view.context._p_jar.transaction_manager
request1 = view.request
t1 = tm.begin(request1)
assert hasattr(request1, '_txn')
assert hasattr(request1, '_txn_time')
request2 = TransactionProxy(view.request)
t2 = tm.begin(request2)
assert hasattr(request2, '_txn')
assert hasattr(request2, '_txn_time')
assert t1 is not t2
assert t1 is request1._txn
assert t2 is request2._txn
assert request1._txn_time < request2._txn_time
|
<commit_before><commit_msg>Add tests for transaction proxy<commit_after># -*- coding: utf-8 -*-
from aiohttp.test_utils import make_mocked_request
from plone.server.browser import View
from plone.server.transactions import RequestAwareDB
from plone.server.transactions import RequestAwareTransactionManager
from plone.server.transactions import TransactionProxy
import pytest
import ZODB.DemoStorage
@pytest.yield_fixture(scope='module')
def conn():
storage = ZODB.DemoStorage.DemoStorage()
ZODB.DB(storage).close() # init storage with root
db = RequestAwareDB(storage)
tm = RequestAwareTransactionManager()
yield db.open(transaction_manager=tm)
@pytest.yield_fixture(scope='module')
def view(conn):
request = make_mocked_request('POST', '/')
yield View(conn.root(), request)
def test_transaction_proxy(view):
tm = view.context._p_jar.transaction_manager
request1 = view.request
t1 = tm.begin(request1)
assert hasattr(request1, '_txn')
assert hasattr(request1, '_txn_time')
request2 = TransactionProxy(view.request)
t2 = tm.begin(request2)
assert hasattr(request2, '_txn')
assert hasattr(request2, '_txn_time')
assert t1 is not t2
assert t1 is request1._txn
assert t2 is request2._txn
assert request1._txn_time < request2._txn_time
|
|
639aac964e1c4bc311f4b3e8dd9a0c015f3c81c3
|
zerver/management/commands/populate-stream-tokens.py
|
zerver/management/commands/populate-stream-tokens.py
|
#!/usr/bin/python
from django.core.management.base import BaseCommand
from zerver.lib.utils import generate_random_token
from zerver.models import Stream
class Command(BaseCommand):
help = """Set a token for all streams that don't have one."""
def handle(self, **options):
streams_needing_tokens = Stream.objects.filter(email_token=None)
for stream in streams_needing_tokens:
stream.email_token = generate_random_token(32)
stream.save()
|
Add a management command to generate hashes for all streams that need them.
|
Add a management command to generate hashes for all streams that need them.
(imported from commit b3a05e47dfe69b44f984185e360d79bf04f7885b)
|
Python
|
apache-2.0
|
luyifan/zulip,TigorC/zulip,Vallher/zulip,jphilipsen05/zulip,xuxiao/zulip,tiansiyuan/zulip,ufosky-server/zulip,tommyip/zulip,armooo/zulip,hj3938/zulip,praveenaki/zulip,kaiyuanheshang/zulip,Qgap/zulip,vabs22/zulip,babbage/zulip,arpitpanwar/zulip,suxinde2009/zulip,johnny9/zulip,Cheppers/zulip,littledogboy/zulip,hackerkid/zulip,zacps/zulip,so0k/zulip,brockwhittaker/zulip,moria/zulip,ericzhou2008/zulip,tiansiyuan/zulip,stamhe/zulip,grave-w-grave/zulip,mansilladev/zulip,armooo/zulip,Suninus/zulip,shubhamdhama/zulip,KJin99/zulip,bssrdf/zulip,DazWorrall/zulip,SmartPeople/zulip,xuanhan863/zulip,Juanvulcano/zulip,shrikrishnaholla/zulip,zwily/zulip,noroot/zulip,aliceriot/zulip,tbutter/zulip,wavelets/zulip,krtkmj/zulip,dwrpayne/zulip,shaunstanislaus/zulip,dattatreya303/zulip,esander91/zulip,Frouk/zulip,developerfm/zulip,mahim97/zulip,jphilipsen05/zulip,johnnygaddarr/zulip,JPJPJPOPOP/zulip,pradiptad/zulip,jeffcao/zulip,ikasumiwt/zulip,moria/zulip,vikas-parashar/zulip,amanharitsh123/zulip,Cheppers/zulip,Cheppers/zulip,technicalpickles/zulip,aliceriot/zulip,kokoar/zulip,jessedhillon/zulip,bowlofstew/zulip,DazWorrall/zulip,zorojean/zulip,xuanhan863/zulip,KingxBanana/zulip,calvinleenyc/zulip,LeeRisk/zulip,bowlofstew/zulip,sup95/zulip,wangdeshui/zulip,proliming/zulip,kou/zulip,amallia/zulip,andersk/zulip,themass/zulip,hafeez3000/zulip,grave-w-grave/zulip,eastlhu/zulip,bluesea/zulip,wangdeshui/zulip,grave-w-grave/zulip,ikasumiwt/zulip,noroot/zulip,sharmaeklavya2/zulip,KingxBanana/zulip,LAndreas/zulip,peiwei/zulip,avastu/zulip,rht/zulip,m1ssou/zulip,brockwhittaker/zulip,krtkmj/zulip,babbage/zulip,cosmicAsymmetry/zulip,dwrpayne/zulip,shaunstanislaus/zulip,lfranchi/zulip,bluesea/zulip,dotcool/zulip,hafeez3000/zulip,akuseru/zulip,jackrzhang/zulip,shubhamdhama/zulip,zacps/zulip,RobotCaleb/zulip,eeshangarg/zulip,eeshangarg/zulip,proliming/zulip,christi3k/zulip,Vallher/zulip,kou/zulip,wweiradio/zulip,vakila/zulip,ashwinirudrappa/zulip,TigorC/zulip,calvinleenyc/zulip,shaunstanislaus/zulip,hengqujushi/zulip,tbutter/zulip,alliejones/zulip,voidException/zulip,adnanh/zulip,KingxBanana/zulip,jerryge/zulip,amanharitsh123/zulip,mdavid/zulip,wweiradio/zulip,amallia/zulip,shubhamdhama/zulip,showell/zulip,umkay/zulip,Batterfii/zulip,xuxiao/zulip,susansls/zulip,pradiptad/zulip,tiansiyuan/zulip,saitodisse/zulip,voidException/zulip,dotcool/zulip,RobotCaleb/zulip,itnihao/zulip,kou/zulip,arpitpanwar/zulip,udxxabp/zulip,bssrdf/zulip,umkay/zulip,Drooids/zulip,andersk/zulip,ryanbackman/zulip,rht/zulip,susansls/zulip,johnny9/zulip,hafeez3000/zulip,aakash-cr7/zulip,Batterfii/zulip,qq1012803704/zulip,deer-hope/zulip,m1ssou/zulip,bssrdf/zulip,gkotian/zulip,KingxBanana/zulip,MayB/zulip,nicholasbs/zulip,dhcrzf/zulip,ipernet/zulip,atomic-labs/zulip,karamcnair/zulip,dxq-git/zulip,ryansnowboarder/zulip,LAndreas/zulip,arpitpanwar/zulip,brainwane/zulip,bastianh/zulip,jonesgithub/zulip,glovebx/zulip,Drooids/zulip,yocome/zulip,nicholasbs/zulip,sup95/zulip,MariaFaBella85/zulip,fw1121/zulip,zulip/zulip,calvinleenyc/zulip,armooo/zulip,aliceriot/zulip,paxapy/zulip,jackrzhang/zulip,j831/zulip,developerfm/zulip,yocome/zulip,so0k/zulip,paxapy/zulip,proliming/zulip,zhaoweigg/zulip,umkay/zulip,grave-w-grave/zulip,suxinde2009/zulip,samatdav/zulip,AZtheAsian/zulip,LeeRisk/zulip,hayderimran7/zulip,tdr130/zulip,timabbott/zulip,jessedhillon/zulip,vikas-parashar/zulip,karamcnair/zulip,adnanh/zulip,amyliu345/zulip,blaze225/zulip,gigawhitlocks/zulip,arpith/zulip,technicalpickles/zulip,noroot/zulip,Drooids/zulip,umkay/zulip,ahmadassaf/zulip,ahmadassaf/zulip,andersk/zulip,noroot/zulip,shaunstanislaus/zulip,showell/zulip,zofuthan/zulip,Diptanshu8/zulip,johnny9/zulip,deer-hope/zulip,umkay/zulip,reyha/zulip,samatdav/zulip,Frouk/zulip,peiwei/zulip,pradiptad/zulip,udxxabp/zulip,mohsenSy/zulip,vaidap/zulip,lfranchi/zulip,jerryge/zulip,esander91/zulip,hayderimran7/zulip,dotcool/zulip,jonesgithub/zulip,timabbott/zulip,aakash-cr7/zulip,zachallaun/zulip,jrowan/zulip,johnnygaddarr/zulip,jerryge/zulip,PhilSk/zulip,bssrdf/zulip,guiquanz/zulip,grave-w-grave/zulip,dattatreya303/zulip,kaiyuanheshang/zulip,hayderimran7/zulip,vaidap/zulip,ericzhou2008/zulip,firstblade/zulip,dwrpayne/zulip,synicalsyntax/zulip,mdavid/zulip,ApsOps/zulip,Gabriel0402/zulip,zwily/zulip,xuxiao/zulip,stamhe/zulip,amallia/zulip,bluesea/zulip,PaulPetring/zulip,eeshangarg/zulip,xuxiao/zulip,PaulPetring/zulip,luyifan/zulip,kou/zulip,atomic-labs/zulip,jphilipsen05/zulip,wdaher/zulip,karamcnair/zulip,wdaher/zulip,dxq-git/zulip,Cheppers/zulip,natanovia/zulip,dhcrzf/zulip,aps-sids/zulip,ufosky-server/zulip,sup95/zulip,Cheppers/zulip,littledogboy/zulip,jimmy54/zulip,yuvipanda/zulip,kokoar/zulip,joyhchen/zulip,seapasulli/zulip,technicalpickles/zulip,andersk/zulip,showell/zulip,hengqujushi/zulip,vikas-parashar/zulip,ericzhou2008/zulip,moria/zulip,amyliu345/zulip,swinghu/zulip,isht3/zulip,avastu/zulip,joshisa/zulip,eeshangarg/zulip,ashwinirudrappa/zulip,christi3k/zulip,EasonYi/zulip,saitodisse/zulip,johnnygaddarr/zulip,mahim97/zulip,hustlzp/zulip,praveenaki/zulip,wavelets/zulip,mansilladev/zulip,so0k/zulip,Qgap/zulip,KingxBanana/zulip,blaze225/zulip,qq1012803704/zulip,technicalpickles/zulip,ryanbackman/zulip,zulip/zulip,sonali0901/zulip,schatt/zulip,jackrzhang/zulip,LeeRisk/zulip,ashwinirudrappa/zulip,noroot/zulip,SmartPeople/zulip,dnmfarrell/zulip,m1ssou/zulip,zhaoweigg/zulip,shubhamdhama/zulip,gigawhitlocks/zulip,mohsenSy/zulip,Galexrt/zulip,hafeez3000/zulip,gigawhitlocks/zulip,mansilladev/zulip,mahim97/zulip,shrikrishnaholla/zulip,jainayush975/zulip,synicalsyntax/zulip,MariaFaBella85/zulip,Juanvulcano/zulip,ufosky-server/zulip,punchagan/zulip,eeshangarg/zulip,zachallaun/zulip,firstblade/zulip,susansls/zulip,yocome/zulip,ericzhou2008/zulip,johnny9/zulip,niftynei/zulip,rishig/zulip,levixie/zulip,tbutter/zulip,KingxBanana/zulip,so0k/zulip,swinghu/zulip,MariaFaBella85/zulip,xuanhan863/zulip,Galexrt/zulip,Juanvulcano/zulip,Gabriel0402/zulip,swinghu/zulip,nicholasbs/zulip,adnanh/zulip,littledogboy/zulip,isht3/zulip,udxxabp/zulip,ericzhou2008/zulip,atomic-labs/zulip,zwily/zulip,gigawhitlocks/zulip,souravbadami/zulip,stamhe/zulip,shubhamdhama/zulip,punchagan/zulip,dnmfarrell/zulip,samatdav/zulip,Batterfii/zulip,Diptanshu8/zulip,hayderimran7/zulip,cosmicAsymmetry/zulip,mahim97/zulip,JanzTam/zulip,glovebx/zulip,m1ssou/zulip,ryansnowboarder/zulip,xuanhan863/zulip,adnanh/zulip,armooo/zulip,wdaher/zulip,souravbadami/zulip,aps-sids/zulip,firstblade/zulip,zhaoweigg/zulip,mdavid/zulip,xuxiao/zulip,PhilSk/zulip,firstblade/zulip,jessedhillon/zulip,JanzTam/zulip,gkotian/zulip,hustlzp/zulip,willingc/zulip,littledogboy/zulip,ericzhou2008/zulip,armooo/zulip,ryansnowboarder/zulip,AZtheAsian/zulip,zulip/zulip,tommyip/zulip,Vallher/zulip,itnihao/zulip,arpith/zulip,gigawhitlocks/zulip,tbutter/zulip,verma-varsha/zulip,themass/zulip,bluesea/zulip,vakila/zulip,moria/zulip,hengqujushi/zulip,alliejones/zulip,eastlhu/zulip,akuseru/zulip,johnny9/zulip,luyifan/zulip,huangkebo/zulip,jimmy54/zulip,zhaoweigg/zulip,showell/zulip,zulip/zulip,yuvipanda/zulip,shubhamdhama/zulip,JPJPJPOPOP/zulip,schatt/zulip,ipernet/zulip,amyliu345/zulip,gkotian/zulip,dattatreya303/zulip,developerfm/zulip,MayB/zulip,schatt/zulip,mohsenSy/zulip,susansls/zulip,willingc/zulip,Cheppers/zulip,so0k/zulip,Juanvulcano/zulip,susansls/zulip,themass/zulip,krtkmj/zulip,deer-hope/zulip,armooo/zulip,wweiradio/zulip,synicalsyntax/zulip,RobotCaleb/zulip,jackrzhang/zulip,JanzTam/zulip,hj3938/zulip,hustlzp/zulip,souravbadami/zulip,easyfmxu/zulip,jainayush975/zulip,deer-hope/zulip,Jianchun1/zulip,littledogboy/zulip,Frouk/zulip,Galexrt/zulip,ahmadassaf/zulip,LAndreas/zulip,ipernet/zulip,PaulPetring/zulip,joshisa/zulip,AZtheAsian/zulip,luyifan/zulip,AZtheAsian/zulip,Qgap/zulip,souravbadami/zulip,verma-varsha/zulip,voidException/zulip,aliceriot/zulip,sonali0901/zulip,hj3938/zulip,DazWorrall/zulip,codeKonami/zulip,samatdav/zulip,zulip/zulip,showell/zulip,dhcrzf/zulip,paxapy/zulip,jimmy54/zulip,paxapy/zulip,synicalsyntax/zulip,peguin40/zulip,swinghu/zulip,nicholasbs/zulip,armooo/zulip,seapasulli/zulip,pradiptad/zulip,hayderimran7/zulip,atomic-labs/zulip,adnanh/zulip,natanovia/zulip,wweiradio/zulip,yocome/zulip,MayB/zulip,zorojean/zulip,suxinde2009/zulip,itnihao/zulip,levixie/zulip,JanzTam/zulip,christi3k/zulip,hackerkid/zulip,ryansnowboarder/zulip,themass/zulip,dnmfarrell/zulip,voidException/zulip,noroot/zulip,shrikrishnaholla/zulip,j831/zulip,SmartPeople/zulip,reyha/zulip,Suninus/zulip,kaiyuanheshang/zulip,Jianchun1/zulip,Vallher/zulip,qq1012803704/zulip,huangkebo/zulip,kaiyuanheshang/zulip,wangdeshui/zulip,ApsOps/zulip,peguin40/zulip,wweiradio/zulip,zulip/zulip,zulip/zulip,luyifan/zulip,Vallher/zulip,avastu/zulip,aakash-cr7/zulip,m1ssou/zulip,tommyip/zulip,peiwei/zulip,udxxabp/zulip,timabbott/zulip,dotcool/zulip,blaze225/zulip,LeeRisk/zulip,jerryge/zulip,SmartPeople/zulip,suxinde2009/zulip,esander91/zulip,sonali0901/zulip,jonesgithub/zulip,thomasboyt/zulip,pradiptad/zulip,Suninus/zulip,littledogboy/zulip,voidException/zulip,tommyip/zulip,showell/zulip,andersk/zulip,brainwane/zulip,mdavid/zulip,KJin99/zulip,isht3/zulip,Galexrt/zulip,hackerkid/zulip,PaulPetring/zulip,grave-w-grave/zulip,thomasboyt/zulip,gkotian/zulip,moria/zulip,PhilSk/zulip,alliejones/zulip,adnanh/zulip,atomic-labs/zulip,ufosky-server/zulip,synicalsyntax/zulip,RobotCaleb/zulip,huangkebo/zulip,vakila/zulip,hustlzp/zulip,littledogboy/zulip,glovebx/zulip,vabs22/zulip,Suninus/zulip,MariaFaBella85/zulip,vakila/zulip,bowlofstew/zulip,MariaFaBella85/zulip,bluesea/zulip,aliceriot/zulip,RobotCaleb/zulip,Suninus/zulip,KJin99/zulip,johnny9/zulip,qq1012803704/zulip,tdr130/zulip,jerryge/zulip,JanzTam/zulip,kokoar/zulip,calvinleenyc/zulip,hustlzp/zulip,wangdeshui/zulip,aps-sids/zulip,themass/zulip,shrikrishnaholla/zulip,ericzhou2008/zulip,bitemyapp/zulip,hengqujushi/zulip,alliejones/zulip,brockwhittaker/zulip,developerfm/zulip,EasonYi/zulip,wangdeshui/zulip,JPJPJPOPOP/zulip,hj3938/zulip,qq1012803704/zulip,tiansiyuan/zulip,peiwei/zulip,rishig/zulip,ashwinirudrappa/zulip,jrowan/zulip,codeKonami/zulip,udxxabp/zulip,shaunstanislaus/zulip,rishig/zulip,he15his/zulip,joyhchen/zulip,Gabriel0402/zulip,willingc/zulip,huangkebo/zulip,yuvipanda/zulip,brainwane/zulip,akuseru/zulip,dawran6/zulip,jimmy54/zulip,shrikrishnaholla/zulip,mdavid/zulip,peiwei/zulip,synicalsyntax/zulip,deer-hope/zulip,he15his/zulip,dwrpayne/zulip,JPJPJPOPOP/zulip,natanovia/zulip,Suninus/zulip,j831/zulip,mohsenSy/zulip,aps-sids/zulip,stamhe/zulip,peiwei/zulip,atomic-labs/zulip,esander91/zulip,yocome/zulip,suxinde2009/zulip,j831/zulip,Cheppers/zulip,sonali0901/zulip,yuvipanda/zulip,adnanh/zulip,tiansiyuan/zulip,ApsOps/zulip,proliming/zulip,xuanhan863/zulip,peguin40/zulip,zofuthan/zulip,sup95/zulip,peguin40/zulip,Drooids/zulip,itnihao/zulip,hackerkid/zulip,seapasulli/zulip,he15his/zulip,Frouk/zulip,reyha/zulip,ryansnowboarder/zulip,zorojean/zulip,kokoar/zulip,zachallaun/zulip,calvinleenyc/zulip,swinghu/zulip,jeffcao/zulip,dotcool/zulip,voidException/zulip,shrikrishnaholla/zulip,willingc/zulip,babbage/zulip,Batterfii/zulip,peguin40/zulip,jessedhillon/zulip,dxq-git/zulip,amanharitsh123/zulip,tdr130/zulip,themass/zulip,fw1121/zulip,DazWorrall/zulip,reyha/zulip,zorojean/zulip,vakila/zulip,developerfm/zulip,vakila/zulip,shubhamdhama/zulip,jessedhillon/zulip,jrowan/zulip,amallia/zulip,souravbadami/zulip,avastu/zulip,PaulPetring/zulip,rht/zulip,MayB/zulip,dwrpayne/zulip,kokoar/zulip,karamcnair/zulip,jessedhillon/zulip,reyha/zulip,aliceriot/zulip,babbage/zulip,arpith/zulip,ryanbackman/zulip,susansls/zulip,aps-sids/zulip,KJin99/zulip,mdavid/zulip,babbage/zulip,joshisa/zulip,Galexrt/zulip,guiquanz/zulip,amallia/zulip,KJin99/zulip,Drooids/zulip,fw1121/zulip,rht/zulip,bowlofstew/zulip,krtkmj/zulip,wweiradio/zulip,noroot/zulip,ufosky-server/zulip,bluesea/zulip,esander91/zulip,j831/zulip,Suninus/zulip,thomasboyt/zulip,punchagan/zulip,hengqujushi/zulip,Jianchun1/zulip,kou/zulip,zachallaun/zulip,Gabriel0402/zulip,dxq-git/zulip,DazWorrall/zulip,kou/zulip,praveenaki/zulip,Batterfii/zulip,he15his/zulip,joshisa/zulip,peiwei/zulip,hafeez3000/zulip,zwily/zulip,isht3/zulip,zofuthan/zulip,dnmfarrell/zulip,bowlofstew/zulip,willingc/zulip,luyifan/zulip,jerryge/zulip,seapasulli/zulip,bitemyapp/zulip,vakila/zulip,codeKonami/zulip,krtkmj/zulip,EasonYi/zulip,zorojean/zulip,mohsenSy/zulip,seapasulli/zulip,brockwhittaker/zulip,jonesgithub/zulip,tbutter/zulip,EasonYi/zulip,technicalpickles/zulip,zacps/zulip,ipernet/zulip,dnmfarrell/zulip,seapasulli/zulip,udxxabp/zulip,Galexrt/zulip,jimmy54/zulip,tdr130/zulip,Jianchun1/zulip,lfranchi/zulip,jackrzhang/zulip,lfranchi/zulip,johnnygaddarr/zulip,eastlhu/zulip,saitodisse/zulip,verma-varsha/zulip,tdr130/zulip,stamhe/zulip,bastianh/zulip,Vallher/zulip,TigorC/zulip,tommyip/zulip,vaidap/zulip,brainwane/zulip,punchagan/zulip,ikasumiwt/zulip,j831/zulip,firstblade/zulip,Diptanshu8/zulip,guiquanz/zulip,bssrdf/zulip,vabs22/zulip,tiansiyuan/zulip,moria/zulip,Jianchun1/zulip,dxq-git/zulip,amyliu345/zulip,itnihao/zulip,MayB/zulip,JanzTam/zulip,joyhchen/zulip,jimmy54/zulip,ipernet/zulip,jphilipsen05/zulip,Batterfii/zulip,ryanbackman/zulip,karamcnair/zulip,ApsOps/zulip,eastlhu/zulip,wavelets/zulip,alliejones/zulip,krtkmj/zulip,rishig/zulip,vaidap/zulip,KJin99/zulip,levixie/zulip,karamcnair/zulip,zwily/zulip,thomasboyt/zulip,JPJPJPOPOP/zulip,ipernet/zulip,aps-sids/zulip,Drooids/zulip,ufosky-server/zulip,zhaoweigg/zulip,alliejones/zulip,mahim97/zulip,johnnygaddarr/zulip,rishig/zulip,bastianh/zulip,blaze225/zulip,schatt/zulip,jainayush975/zulip,bssrdf/zulip,saitodisse/zulip,brockwhittaker/zulip,timabbott/zulip,gigawhitlocks/zulip,wavelets/zulip,amallia/zulip,bitemyapp/zulip,kaiyuanheshang/zulip,shaunstanislaus/zulip,schatt/zulip,levixie/zulip,Gabriel0402/zulip,zorojean/zulip,EasonYi/zulip,verma-varsha/zulip,Frouk/zulip,akuseru/zulip,jeffcao/zulip,Frouk/zulip,ryanbackman/zulip,bitemyapp/zulip,guiquanz/zulip,sharmaeklavya2/zulip,ahmadassaf/zulip,proliming/zulip,akuseru/zulip,moria/zulip,deer-hope/zulip,Jianchun1/zulip,stamhe/zulip,zhaoweigg/zulip,timabbott/zulip,niftynei/zulip,jeffcao/zulip,joshisa/zulip,so0k/zulip,avastu/zulip,dwrpayne/zulip,samatdav/zulip,codeKonami/zulip,hackerkid/zulip,mansilladev/zulip,verma-varsha/zulip,MariaFaBella85/zulip,ryanbackman/zulip,KJin99/zulip,sharmaeklavya2/zulip,timabbott/zulip,brainwane/zulip,suxinde2009/zulip,tommyip/zulip,EasonYi/zulip,aakash-cr7/zulip,hustlzp/zulip,krtkmj/zulip,MayB/zulip,RobotCaleb/zulip,dnmfarrell/zulip,bitemyapp/zulip,m1ssou/zulip,codeKonami/zulip,vikas-parashar/zulip,niftynei/zulip,easyfmxu/zulip,paxapy/zulip,niftynei/zulip,themass/zulip,jrowan/zulip,gigawhitlocks/zulip,saitodisse/zulip,zwily/zulip,ikasumiwt/zulip,xuxiao/zulip,arpitpanwar/zulip,sharmaeklavya2/zulip,vabs22/zulip,akuseru/zulip,xuanhan863/zulip,arpitpanwar/zulip,joshisa/zulip,vikas-parashar/zulip,ApsOps/zulip,rishig/zulip,zachallaun/zulip,jonesgithub/zulip,Vallher/zulip,hustlzp/zulip,brainwane/zulip,luyifan/zulip,praveenaki/zulip,ApsOps/zulip,deer-hope/zulip,amallia/zulip,jeffcao/zulip,tbutter/zulip,easyfmxu/zulip,xuxiao/zulip,ahmadassaf/zulip,joyhchen/zulip,christi3k/zulip,avastu/zulip,wdaher/zulip,dhcrzf/zulip,thomasboyt/zulip,PhilSk/zulip,aps-sids/zulip,bitemyapp/zulip,andersk/zulip,ikasumiwt/zulip,dwrpayne/zulip,rht/zulip,aakash-cr7/zulip,lfranchi/zulip,mansilladev/zulip,sonali0901/zulip,bastianh/zulip,seapasulli/zulip,itnihao/zulip,saitodisse/zulip,AZtheAsian/zulip,dattatreya303/zulip,huangkebo/zulip,TigorC/zulip,karamcnair/zulip,arpith/zulip,kokoar/zulip,alliejones/zulip,kou/zulip,Frouk/zulip,tdr130/zulip,shrikrishnaholla/zulip,dawran6/zulip,zachallaun/zulip,stamhe/zulip,nicholasbs/zulip,eastlhu/zulip,swinghu/zulip,proliming/zulip,AZtheAsian/zulip,ahmadassaf/zulip,willingc/zulip,PaulPetring/zulip,saitodisse/zulip,easyfmxu/zulip,fw1121/zulip,eeshangarg/zulip,wavelets/zulip,dattatreya303/zulip,Drooids/zulip,PaulPetring/zulip,reyha/zulip,peguin40/zulip,amanharitsh123/zulip,LAndreas/zulip,nicholasbs/zulip,johnnygaddarr/zulip,umkay/zulip,esander91/zulip,punchagan/zulip,jonesgithub/zulip,yuvipanda/zulip,zofuthan/zulip,dhcrzf/zulip,wdaher/zulip,zhaoweigg/zulip,vikas-parashar/zulip,LAndreas/zulip,dawran6/zulip,technicalpickles/zulip,zofuthan/zulip,qq1012803704/zulip,paxapy/zulip,rht/zulip,eeshangarg/zulip,thomasboyt/zulip,timabbott/zulip,brainwane/zulip,he15his/zulip,wavelets/zulip,shaunstanislaus/zulip,easyfmxu/zulip,technicalpickles/zulip,Gabriel0402/zulip,m1ssou/zulip,LeeRisk/zulip,umkay/zulip,DazWorrall/zulip,bitemyapp/zulip,sup95/zulip,yocome/zulip,Galexrt/zulip,MariaFaBella85/zulip,Qgap/zulip,bastianh/zulip,guiquanz/zulip,eastlhu/zulip,wweiradio/zulip,glovebx/zulip,tommyip/zulip,arpith/zulip,joshisa/zulip,yuvipanda/zulip,tiansiyuan/zulip,jerryge/zulip,hayderimran7/zulip,ikasumiwt/zulip,dawran6/zulip,hackerkid/zulip,dhcrzf/zulip,akuseru/zulip,sharmaeklavya2/zulip,huangkebo/zulip,swinghu/zulip,cosmicAsymmetry/zulip,arpitpanwar/zulip,kaiyuanheshang/zulip,Diptanshu8/zulip,amyliu345/zulip,babbage/zulip,cosmicAsymmetry/zulip,dxq-git/zulip,samatdav/zulip,sonali0901/zulip,schatt/zulip,suxinde2009/zulip,jeffcao/zulip,isht3/zulip,natanovia/zulip,jphilipsen05/zulip,avastu/zulip,bowlofstew/zulip,kaiyuanheshang/zulip,tbutter/zulip,jeffcao/zulip,ashwinirudrappa/zulip,itnihao/zulip,praveenaki/zulip,pradiptad/zulip,nicholasbs/zulip,jonesgithub/zulip,blaze225/zulip,codeKonami/zulip,johnnygaddarr/zulip,wangdeshui/zulip,punchagan/zulip,atomic-labs/zulip,jainayush975/zulip,joyhchen/zulip,jainayush975/zulip,gkotian/zulip,hengqujushi/zulip,zorojean/zulip,TigorC/zulip,RobotCaleb/zulip,isht3/zulip,arpith/zulip,PhilSk/zulip,zofuthan/zulip,zacps/zulip,easyfmxu/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,ApsOps/zulip,synicalsyntax/zulip,dhcrzf/zulip,wdaher/zulip,jphilipsen05/zulip,sharmaeklavya2/zulip,blaze225/zulip,niftynei/zulip,praveenaki/zulip,qq1012803704/zulip,aakash-cr7/zulip,voidException/zulip,jainayush975/zulip,Qgap/zulip,ikasumiwt/zulip,tdr130/zulip,ryansnowboarder/zulip,LAndreas/zulip,natanovia/zulip,levixie/zulip,dawran6/zulip,pradiptad/zulip,arpitpanwar/zulip,bluesea/zulip,mdavid/zulip,dotcool/zulip,gkotian/zulip,jrowan/zulip,hj3938/zulip,hackerkid/zulip,brockwhittaker/zulip,vaidap/zulip,bowlofstew/zulip,mahim97/zulip,andersk/zulip,sup95/zulip,bastianh/zulip,Juanvulcano/zulip,amanharitsh123/zulip,christi3k/zulip,esander91/zulip,MayB/zulip,TigorC/zulip,ufosky-server/zulip,firstblade/zulip,ashwinirudrappa/zulip,dawran6/zulip,dattatreya303/zulip,levixie/zulip,hengqujushi/zulip,glovebx/zulip,bssrdf/zulip,amanharitsh123/zulip,LAndreas/zulip,dotcool/zulip,zwily/zulip,LeeRisk/zulip,hafeez3000/zulip,vabs22/zulip,xuanhan863/zulip,natanovia/zulip,guiquanz/zulip,udxxabp/zulip,hayderimran7/zulip,mansilladev/zulip,praveenaki/zulip,punchagan/zulip,gkotian/zulip,glovebx/zulip,amyliu345/zulip,johnny9/zulip,Juanvulcano/zulip,Batterfii/zulip,Diptanshu8/zulip,jessedhillon/zulip,SmartPeople/zulip,developerfm/zulip,fw1121/zulip,ashwinirudrappa/zulip,showell/zulip,schatt/zulip,wavelets/zulip,easyfmxu/zulip,zofuthan/zulip,Qgap/zulip,aliceriot/zulip,jackrzhang/zulip,lfranchi/zulip,christi3k/zulip,fw1121/zulip,calvinleenyc/zulip,wangdeshui/zulip,huangkebo/zulip,codeKonami/zulip,cosmicAsymmetry/zulip,glovebx/zulip,jrowan/zulip,EasonYi/zulip,developerfm/zulip,jackrzhang/zulip,eastlhu/zulip,he15his/zulip,hj3938/zulip,wdaher/zulip,kokoar/zulip,so0k/zulip,zacps/zulip,ipernet/zulip,JPJPJPOPOP/zulip,bastianh/zulip,he15his/zulip,proliming/zulip,rishig/zulip,vabs22/zulip,levixie/zulip,fw1121/zulip,Qgap/zulip,yuvipanda/zulip,dnmfarrell/zulip,babbage/zulip,LeeRisk/zulip,souravbadami/zulip,niftynei/zulip,hj3938/zulip,zacps/zulip,ahmadassaf/zulip,dxq-git/zulip,joyhchen/zulip,SmartPeople/zulip,Diptanshu8/zulip,mohsenSy/zulip,jimmy54/zulip,thomasboyt/zulip,yocome/zulip,natanovia/zulip,JanzTam/zulip,guiquanz/zulip,rht/zulip,mansilladev/zulip,willingc/zulip,zachallaun/zulip,DazWorrall/zulip,hafeez3000/zulip,Gabriel0402/zulip,PhilSk/zulip,ryansnowboarder/zulip,firstblade/zulip,lfranchi/zulip,vaidap/zulip
|
Add a management command to generate hashes for all streams that need them.
(imported from commit b3a05e47dfe69b44f984185e360d79bf04f7885b)
|
#!/usr/bin/python
from django.core.management.base import BaseCommand
from zerver.lib.utils import generate_random_token
from zerver.models import Stream
class Command(BaseCommand):
help = """Set a token for all streams that don't have one."""
def handle(self, **options):
streams_needing_tokens = Stream.objects.filter(email_token=None)
for stream in streams_needing_tokens:
stream.email_token = generate_random_token(32)
stream.save()
|
<commit_before><commit_msg>Add a management command to generate hashes for all streams that need them.
(imported from commit b3a05e47dfe69b44f984185e360d79bf04f7885b)<commit_after>
|
#!/usr/bin/python
from django.core.management.base import BaseCommand
from zerver.lib.utils import generate_random_token
from zerver.models import Stream
class Command(BaseCommand):
help = """Set a token for all streams that don't have one."""
def handle(self, **options):
streams_needing_tokens = Stream.objects.filter(email_token=None)
for stream in streams_needing_tokens:
stream.email_token = generate_random_token(32)
stream.save()
|
Add a management command to generate hashes for all streams that need them.
(imported from commit b3a05e47dfe69b44f984185e360d79bf04f7885b)#!/usr/bin/python
from django.core.management.base import BaseCommand
from zerver.lib.utils import generate_random_token
from zerver.models import Stream
class Command(BaseCommand):
help = """Set a token for all streams that don't have one."""
def handle(self, **options):
streams_needing_tokens = Stream.objects.filter(email_token=None)
for stream in streams_needing_tokens:
stream.email_token = generate_random_token(32)
stream.save()
|
<commit_before><commit_msg>Add a management command to generate hashes for all streams that need them.
(imported from commit b3a05e47dfe69b44f984185e360d79bf04f7885b)<commit_after>#!/usr/bin/python
from django.core.management.base import BaseCommand
from zerver.lib.utils import generate_random_token
from zerver.models import Stream
class Command(BaseCommand):
help = """Set a token for all streams that don't have one."""
def handle(self, **options):
streams_needing_tokens = Stream.objects.filter(email_token=None)
for stream in streams_needing_tokens:
stream.email_token = generate_random_token(32)
stream.save()
|
|
7409b02842708c693695b473cca14546180a4484
|
tests/unit/python/foglamp/tasks/purge/test_purge_main.py
|
tests/unit/python/foglamp/tasks/purge/test_purge_main.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test tasks/purge/__main__.py entry point
"""
import pytest
from unittest.mock import patch, MagicMock
from foglamp.tasks import purge
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.audit_logger import AuditLogger
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.fixture
async def _purge_instance():
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__"):
with patch.object(logger, "setup"):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge()
return p
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
def test_main(_purge_instance):
with patch.object(purge, "__name__", "__main__"):
purge.purge_process = _purge_instance
with patch.object(Purge, 'run', return_value=None):
purge.purge_process.run()
purge.purge_process.run.assert_called_once_with()
|
Test for purge main entry point
|
Test for purge main entry point
|
Python
|
apache-2.0
|
foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP
|
Test for purge main entry point
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test tasks/purge/__main__.py entry point
"""
import pytest
from unittest.mock import patch, MagicMock
from foglamp.tasks import purge
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.audit_logger import AuditLogger
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.fixture
async def _purge_instance():
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__"):
with patch.object(logger, "setup"):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge()
return p
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
def test_main(_purge_instance):
with patch.object(purge, "__name__", "__main__"):
purge.purge_process = _purge_instance
with patch.object(Purge, 'run', return_value=None):
purge.purge_process.run()
purge.purge_process.run.assert_called_once_with()
|
<commit_before><commit_msg>Test for purge main entry point<commit_after>
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test tasks/purge/__main__.py entry point
"""
import pytest
from unittest.mock import patch, MagicMock
from foglamp.tasks import purge
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.audit_logger import AuditLogger
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.fixture
async def _purge_instance():
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__"):
with patch.object(logger, "setup"):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge()
return p
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
def test_main(_purge_instance):
with patch.object(purge, "__name__", "__main__"):
purge.purge_process = _purge_instance
with patch.object(Purge, 'run', return_value=None):
purge.purge_process.run()
purge.purge_process.run.assert_called_once_with()
|
Test for purge main entry point# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test tasks/purge/__main__.py entry point
"""
import pytest
from unittest.mock import patch, MagicMock
from foglamp.tasks import purge
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.audit_logger import AuditLogger
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.fixture
async def _purge_instance():
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__"):
with patch.object(logger, "setup"):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge()
return p
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
def test_main(_purge_instance):
with patch.object(purge, "__name__", "__main__"):
purge.purge_process = _purge_instance
with patch.object(Purge, 'run', return_value=None):
purge.purge_process.run()
purge.purge_process.run.assert_called_once_with()
|
<commit_before><commit_msg>Test for purge main entry point<commit_after># -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test tasks/purge/__main__.py entry point
"""
import pytest
from unittest.mock import patch, MagicMock
from foglamp.tasks import purge
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.audit_logger import AuditLogger
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.fixture
async def _purge_instance():
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__"):
with patch.object(logger, "setup"):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge()
return p
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
def test_main(_purge_instance):
with patch.object(purge, "__name__", "__main__"):
purge.purge_process = _purge_instance
with patch.object(Purge, 'run', return_value=None):
purge.purge_process.run()
purge.purge_process.run.assert_called_once_with()
|
|
1b39f2427a1ba777968908344836fd597bcea428
|
clic/web/fixtures.py
|
clic/web/fixtures.py
|
import os
from models import db, Subset
# importing from admin rather than db, because it needs app context
from admin import app
if __name__ == '__main__':
# http://stackoverflow.com/a/19008403/2115409
db.init_app(app)
with app.app_context():
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
# db.create_all()
# change to the right folder
os.chdir('/Users/johan/projects/annotation/data/annotation/output/DNov/textract/')
os.listdir(os.curdir)
for folder in os.listdir(os.curdir):
for input_file in os.listdir(folder):
# parse the file names, fi. BH_non_suspended_quotes.txt
name = input_file.split('_')[0] # BH
# BH_non_suspended_quotes.txt -> # BH_non_suspended_quotes
# -> # non suspended quotes
kind = input_file.split('.')[0].split('_')[1:]
kind = '-'.join(kind)
with open(folder + '/' + input_file) as input:
contents = input.readlines()
print name, kind, len(contents)
for itm in contents:
db.session.add(Subset(book=name, abbr=name, kind=kind, text=itm))
db.session.commit()
|
Add script to populate the Subset table
|
Add script to populate the Subset table
|
Python
|
mit
|
CentreForCorpusResearch/clic,CentreForResearchInAppliedLinguistics/clic,CentreForResearchInAppliedLinguistics/clic,CentreForCorpusResearch/clic,CentreForResearchInAppliedLinguistics/clic,CentreForCorpusResearch/clic
|
Add script to populate the Subset table
|
import os
from models import db, Subset
# importing from admin rather than db, because it needs app context
from admin import app
if __name__ == '__main__':
# http://stackoverflow.com/a/19008403/2115409
db.init_app(app)
with app.app_context():
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
# db.create_all()
# change to the right folder
os.chdir('/Users/johan/projects/annotation/data/annotation/output/DNov/textract/')
os.listdir(os.curdir)
for folder in os.listdir(os.curdir):
for input_file in os.listdir(folder):
# parse the file names, fi. BH_non_suspended_quotes.txt
name = input_file.split('_')[0] # BH
# BH_non_suspended_quotes.txt -> # BH_non_suspended_quotes
# -> # non suspended quotes
kind = input_file.split('.')[0].split('_')[1:]
kind = '-'.join(kind)
with open(folder + '/' + input_file) as input:
contents = input.readlines()
print name, kind, len(contents)
for itm in contents:
db.session.add(Subset(book=name, abbr=name, kind=kind, text=itm))
db.session.commit()
|
<commit_before><commit_msg>Add script to populate the Subset table<commit_after>
|
import os
from models import db, Subset
# importing from admin rather than db, because it needs app context
from admin import app
if __name__ == '__main__':
# http://stackoverflow.com/a/19008403/2115409
db.init_app(app)
with app.app_context():
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
# db.create_all()
# change to the right folder
os.chdir('/Users/johan/projects/annotation/data/annotation/output/DNov/textract/')
os.listdir(os.curdir)
for folder in os.listdir(os.curdir):
for input_file in os.listdir(folder):
# parse the file names, fi. BH_non_suspended_quotes.txt
name = input_file.split('_')[0] # BH
# BH_non_suspended_quotes.txt -> # BH_non_suspended_quotes
# -> # non suspended quotes
kind = input_file.split('.')[0].split('_')[1:]
kind = '-'.join(kind)
with open(folder + '/' + input_file) as input:
contents = input.readlines()
print name, kind, len(contents)
for itm in contents:
db.session.add(Subset(book=name, abbr=name, kind=kind, text=itm))
db.session.commit()
|
Add script to populate the Subset tableimport os
from models import db, Subset
# importing from admin rather than db, because it needs app context
from admin import app
if __name__ == '__main__':
# http://stackoverflow.com/a/19008403/2115409
db.init_app(app)
with app.app_context():
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
# db.create_all()
# change to the right folder
os.chdir('/Users/johan/projects/annotation/data/annotation/output/DNov/textract/')
os.listdir(os.curdir)
for folder in os.listdir(os.curdir):
for input_file in os.listdir(folder):
# parse the file names, fi. BH_non_suspended_quotes.txt
name = input_file.split('_')[0] # BH
# BH_non_suspended_quotes.txt -> # BH_non_suspended_quotes
# -> # non suspended quotes
kind = input_file.split('.')[0].split('_')[1:]
kind = '-'.join(kind)
with open(folder + '/' + input_file) as input:
contents = input.readlines()
print name, kind, len(contents)
for itm in contents:
db.session.add(Subset(book=name, abbr=name, kind=kind, text=itm))
db.session.commit()
|
<commit_before><commit_msg>Add script to populate the Subset table<commit_after>import os
from models import db, Subset
# importing from admin rather than db, because it needs app context
from admin import app
if __name__ == '__main__':
# http://stackoverflow.com/a/19008403/2115409
db.init_app(app)
with app.app_context():
# Extensions like Flask-SQLAlchemy now know what the "current" app
# is while within this block. Therefore, you can now run........
# db.create_all()
# change to the right folder
os.chdir('/Users/johan/projects/annotation/data/annotation/output/DNov/textract/')
os.listdir(os.curdir)
for folder in os.listdir(os.curdir):
for input_file in os.listdir(folder):
# parse the file names, fi. BH_non_suspended_quotes.txt
name = input_file.split('_')[0] # BH
# BH_non_suspended_quotes.txt -> # BH_non_suspended_quotes
# -> # non suspended quotes
kind = input_file.split('.')[0].split('_')[1:]
kind = '-'.join(kind)
with open(folder + '/' + input_file) as input:
contents = input.readlines()
print name, kind, len(contents)
for itm in contents:
db.session.add(Subset(book=name, abbr=name, kind=kind, text=itm))
db.session.commit()
|
|
1536dbee308527e2b8f7a3c734e1020b20e962f0
|
sx.py
|
sx.py
|
#!/usr/bin/python3
##
# Copyright (c) 2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
import sys
import semantix.shell
import semantix.utils.shell
sys.path.insert(0, os.path.dirname(__file__))
def run(argv):
return semantix.utils.shell.main(argv)
if __name__ == '__main__':
sys.exit(run(sys.argv))
|
Replace the clumsy Makefile with proper command infrastructure
|
Replace the clumsy Makefile with proper command infrastructure
Semantix now has a proper infrastructure for shell commands, complete
with argument passing and complex sub-commands. Every semantix-based
project (including semantix itself) will how have an executable script
'sx.py' in the root of that project that will serve as an entry point
for semantix commands.
Additionally, there is a new convenience tool `sx' (in semantix/tools)
that automatically finds and executes sx.py when run from within a
project directory hierarchy. Place it into your PATH.
So instead of `make test' one now writes `sx test' or
`./sx.py test'
|
Python
|
mit
|
sprymix/importkit
|
Replace the clumsy Makefile with proper command infrastructure
Semantix now has a proper infrastructure for shell commands, complete
with argument passing and complex sub-commands. Every semantix-based
project (including semantix itself) will how have an executable script
'sx.py' in the root of that project that will serve as an entry point
for semantix commands.
Additionally, there is a new convenience tool `sx' (in semantix/tools)
that automatically finds and executes sx.py when run from within a
project directory hierarchy. Place it into your PATH.
So instead of `make test' one now writes `sx test' or
`./sx.py test'
|
#!/usr/bin/python3
##
# Copyright (c) 2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
import sys
import semantix.shell
import semantix.utils.shell
sys.path.insert(0, os.path.dirname(__file__))
def run(argv):
return semantix.utils.shell.main(argv)
if __name__ == '__main__':
sys.exit(run(sys.argv))
|
<commit_before><commit_msg>Replace the clumsy Makefile with proper command infrastructure
Semantix now has a proper infrastructure for shell commands, complete
with argument passing and complex sub-commands. Every semantix-based
project (including semantix itself) will how have an executable script
'sx.py' in the root of that project that will serve as an entry point
for semantix commands.
Additionally, there is a new convenience tool `sx' (in semantix/tools)
that automatically finds and executes sx.py when run from within a
project directory hierarchy. Place it into your PATH.
So instead of `make test' one now writes `sx test' or
`./sx.py test'<commit_after>
|
#!/usr/bin/python3
##
# Copyright (c) 2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
import sys
import semantix.shell
import semantix.utils.shell
sys.path.insert(0, os.path.dirname(__file__))
def run(argv):
return semantix.utils.shell.main(argv)
if __name__ == '__main__':
sys.exit(run(sys.argv))
|
Replace the clumsy Makefile with proper command infrastructure
Semantix now has a proper infrastructure for shell commands, complete
with argument passing and complex sub-commands. Every semantix-based
project (including semantix itself) will how have an executable script
'sx.py' in the root of that project that will serve as an entry point
for semantix commands.
Additionally, there is a new convenience tool `sx' (in semantix/tools)
that automatically finds and executes sx.py when run from within a
project directory hierarchy. Place it into your PATH.
So instead of `make test' one now writes `sx test' or
`./sx.py test'#!/usr/bin/python3
##
# Copyright (c) 2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
import sys
import semantix.shell
import semantix.utils.shell
sys.path.insert(0, os.path.dirname(__file__))
def run(argv):
return semantix.utils.shell.main(argv)
if __name__ == '__main__':
sys.exit(run(sys.argv))
|
<commit_before><commit_msg>Replace the clumsy Makefile with proper command infrastructure
Semantix now has a proper infrastructure for shell commands, complete
with argument passing and complex sub-commands. Every semantix-based
project (including semantix itself) will how have an executable script
'sx.py' in the root of that project that will serve as an entry point
for semantix commands.
Additionally, there is a new convenience tool `sx' (in semantix/tools)
that automatically finds and executes sx.py when run from within a
project directory hierarchy. Place it into your PATH.
So instead of `make test' one now writes `sx test' or
`./sx.py test'<commit_after>#!/usr/bin/python3
##
# Copyright (c) 2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import os
import sys
import semantix.shell
import semantix.utils.shell
sys.path.insert(0, os.path.dirname(__file__))
def run(argv):
return semantix.utils.shell.main(argv)
if __name__ == '__main__':
sys.exit(run(sys.argv))
|
|
36924c92d91c7e29743a4a7b799c3e87a92cf38e
|
django_yadt/management/commands/yadt_invalidate_variant.py
|
django_yadt/management/commands/yadt_invalidate_variant.py
|
from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
variant.invalidate_all()
|
Add command to invalidate all of a particular variant.
|
Add command to invalidate all of a particular variant.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
Python
|
bsd-3-clause
|
lamby/django-yadt,thread/django-yadt
|
Add command to invalidate all of a particular variant.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
variant.invalidate_all()
|
<commit_before><commit_msg>Add command to invalidate all of a particular variant.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
variant.invalidate_all()
|
Add command to invalidate all of a particular variant.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
variant.invalidate_all()
|
<commit_before><commit_msg>Add command to invalidate all of a particular variant.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>from django.core.management.base import BaseCommand, CommandError
from ...utils import get_variant
class Command(BaseCommand):
USAGE = "<app_label> <model> <field> <variant>"
def handle(self, *args, **options):
try:
app_label, model_name, field_name, variant_name = args
except ValueError:
raise CommandError(self.USAGE)
variant = get_variant(app_label, model_name, field_name, variant_name)
variant.invalidate_all()
|
|
4011063de1cc14fd758dd8537ebf10c8c1491149
|
migrations/versions/1d8e0b3b949_add_description_to_list.py
|
migrations/versions/1d8e0b3b949_add_description_to_list.py
|
"""Add description to list
Revision ID: 1d8e0b3b949
Revises: 594033d136f
Create Date: 2014-07-20 13:37:41.064387
"""
# revision identifiers, used by Alembic.
revision = '1d8e0b3b949'
down_revision = '594033d136f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('list', sa.Column('description', sa.String, nullable=True))
def downgrade():
op.drop_column('list', 'description')
|
Add a description to word lists.
|
Add a description to word lists.
|
Python
|
mit
|
gmwils/cihui
|
Add a description to word lists.
|
"""Add description to list
Revision ID: 1d8e0b3b949
Revises: 594033d136f
Create Date: 2014-07-20 13:37:41.064387
"""
# revision identifiers, used by Alembic.
revision = '1d8e0b3b949'
down_revision = '594033d136f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('list', sa.Column('description', sa.String, nullable=True))
def downgrade():
op.drop_column('list', 'description')
|
<commit_before><commit_msg>Add a description to word lists.<commit_after>
|
"""Add description to list
Revision ID: 1d8e0b3b949
Revises: 594033d136f
Create Date: 2014-07-20 13:37:41.064387
"""
# revision identifiers, used by Alembic.
revision = '1d8e0b3b949'
down_revision = '594033d136f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('list', sa.Column('description', sa.String, nullable=True))
def downgrade():
op.drop_column('list', 'description')
|
Add a description to word lists."""Add description to list
Revision ID: 1d8e0b3b949
Revises: 594033d136f
Create Date: 2014-07-20 13:37:41.064387
"""
# revision identifiers, used by Alembic.
revision = '1d8e0b3b949'
down_revision = '594033d136f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('list', sa.Column('description', sa.String, nullable=True))
def downgrade():
op.drop_column('list', 'description')
|
<commit_before><commit_msg>Add a description to word lists.<commit_after>"""Add description to list
Revision ID: 1d8e0b3b949
Revises: 594033d136f
Create Date: 2014-07-20 13:37:41.064387
"""
# revision identifiers, used by Alembic.
revision = '1d8e0b3b949'
down_revision = '594033d136f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('list', sa.Column('description', sa.String, nullable=True))
def downgrade():
op.drop_column('list', 'description')
|
|
92f14f351c3674531c011923443f8b3b528543d9
|
binstar_client/tests/test_copy.py
|
binstar_client/tests/test_copy.py
|
from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.scripts.cli import main
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_channel(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-channel', 'dev', '--to-channel', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
if __name__ == '__main__':
unittest.main()
|
Add test for `anaconda copy`
|
Add test for `anaconda copy`
|
Python
|
bsd-3-clause
|
Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client
|
Add test for `anaconda copy`
|
from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.scripts.cli import main
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_channel(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-channel', 'dev', '--to-channel', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for `anaconda copy`<commit_after>
|
from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.scripts.cli import main
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_channel(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-channel', 'dev', '--to-channel', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
if __name__ == '__main__':
unittest.main()
|
Add test for `anaconda copy`from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.scripts.cli import main
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_channel(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-channel', 'dev', '--to-channel', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for `anaconda copy`<commit_after>from __future__ import unicode_literals
# Standard library imports
import json
import unittest
# Local imports
from binstar_client.scripts.cli import main
from binstar_client.tests.fixture import CLITestCase
from binstar_client.tests.urlmock import urlpatch
class Test(CLITestCase):
@urlpatch
def test_copy_label(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-label', 'dev', '--to-label', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
@urlpatch
def test_copy_channel(self, urls):
urls.register(method='GET', path='/channels/u1', content='["dev"]')
copy = urls.register(method='POST', path='/copy/package/u1/p1/1.0', content='[{"basename": "copied-file_1.0.tgz"}]')
main(['--show-traceback', 'copy', '--from-channel', 'dev', '--to-channel', 'release/xyz', 'u1/p1/1.0'], False)
urls.assertAllCalled()
req = json.loads(copy.req.body)
self.assertEqual(req['from_channel'], 'dev')
self.assertEqual(req['to_channel'], 'release/xyz')
if __name__ == '__main__':
unittest.main()
|
|
a9b8b3e059bdd5024fccb2efe134a3f98a89d830
|
static/extension/uma_rpt_policy/UmaClientAuthzRptPolicy.py
|
static/extension/uma_rpt_policy/UmaClientAuthzRptPolicy.py
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2017, Gluu
#
# Author: Jose Gonzalez
# Adapted from previous 3.0.1 script of Yuriy Movchan
#
# oxConfigurationProperty required:
# allowed_clients - comma separated list of dns of allowed clients
# (i.e. the SCIM RP client)
from org.xdi.model.custom.script.type.uma import UmaRptPolicyType
from org.xdi.model.uma import ClaimDefinitionBuilder
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList, HashSet
from java.lang import String
class UmaRptPolicy(UmaRptPolicyType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "RPT Policy. Initializing ..."
self.clientsSet = self.prepareClientsSet(configurationAttributes)
print "RPT Policy. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "RPT Policy. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def getRequiredClaims(self, context):
json = """[
]"""
return ClaimDefinitionBuilder.build(json)
def authorize(self, context): # context is reference of org.xdi.oxauth.uma.authorization.UmaAuthorizationContext
print "RPT Policy. Authorizing ..."
client_id = context.getHttpRequest().getParameter("client_id")
print "UmaRptPolicy. client_id=", client_id
if (StringHelper.isEmpty(client_id)):
return False
if (self.clientsSet.contains(client_id)):
print "UmaRptPolicy. Authorizing client"
return True
else:
print "UmaRptPolicy. Client isn't authorized"
return False
def prepareClientsSet(self, configurationAttributes):
clientsSet = HashSet()
if (not configurationAttributes.containsKey("allowed_clients")):
return clientsSet
allowedClientsList = configurationAttributes.get("allowed_clients").getValue2()
if (StringHelper.isEmpty(allowedClientsList)):
print "UmaRptPolicy. The property allowed_clients is empty"
return clientsSet
allowedClientsListArray = StringHelper.split(allowedClientsList, ",")
if (ArrayHelper.isEmpty(allowedClientsListArray)):
print "UmaRptPolicy. No clients specified in allowed_clients property"
return clientsSet
# Convert to HashSet to quick search
i = 0
count = len(allowedClientsListArray)
while (i < count):
client = allowedClientsListArray[i]
clientsSet.add(client)
i = i + 1
return clientsSet
|
Add UMA 2.0 SCIM/Passport script
|
Add UMA 2.0 SCIM/Passport script
|
Python
|
mit
|
GluuFederation/community-edition-setup,GluuFederation/community-edition-setup,GluuFederation/community-edition-setup
|
Add UMA 2.0 SCIM/Passport script
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2017, Gluu
#
# Author: Jose Gonzalez
# Adapted from previous 3.0.1 script of Yuriy Movchan
#
# oxConfigurationProperty required:
# allowed_clients - comma separated list of dns of allowed clients
# (i.e. the SCIM RP client)
from org.xdi.model.custom.script.type.uma import UmaRptPolicyType
from org.xdi.model.uma import ClaimDefinitionBuilder
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList, HashSet
from java.lang import String
class UmaRptPolicy(UmaRptPolicyType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "RPT Policy. Initializing ..."
self.clientsSet = self.prepareClientsSet(configurationAttributes)
print "RPT Policy. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "RPT Policy. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def getRequiredClaims(self, context):
json = """[
]"""
return ClaimDefinitionBuilder.build(json)
def authorize(self, context): # context is reference of org.xdi.oxauth.uma.authorization.UmaAuthorizationContext
print "RPT Policy. Authorizing ..."
client_id = context.getHttpRequest().getParameter("client_id")
print "UmaRptPolicy. client_id=", client_id
if (StringHelper.isEmpty(client_id)):
return False
if (self.clientsSet.contains(client_id)):
print "UmaRptPolicy. Authorizing client"
return True
else:
print "UmaRptPolicy. Client isn't authorized"
return False
def prepareClientsSet(self, configurationAttributes):
clientsSet = HashSet()
if (not configurationAttributes.containsKey("allowed_clients")):
return clientsSet
allowedClientsList = configurationAttributes.get("allowed_clients").getValue2()
if (StringHelper.isEmpty(allowedClientsList)):
print "UmaRptPolicy. The property allowed_clients is empty"
return clientsSet
allowedClientsListArray = StringHelper.split(allowedClientsList, ",")
if (ArrayHelper.isEmpty(allowedClientsListArray)):
print "UmaRptPolicy. No clients specified in allowed_clients property"
return clientsSet
# Convert to HashSet to quick search
i = 0
count = len(allowedClientsListArray)
while (i < count):
client = allowedClientsListArray[i]
clientsSet.add(client)
i = i + 1
return clientsSet
|
<commit_before><commit_msg>Add UMA 2.0 SCIM/Passport script<commit_after>
|
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2017, Gluu
#
# Author: Jose Gonzalez
# Adapted from previous 3.0.1 script of Yuriy Movchan
#
# oxConfigurationProperty required:
# allowed_clients - comma separated list of dns of allowed clients
# (i.e. the SCIM RP client)
from org.xdi.model.custom.script.type.uma import UmaRptPolicyType
from org.xdi.model.uma import ClaimDefinitionBuilder
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList, HashSet
from java.lang import String
class UmaRptPolicy(UmaRptPolicyType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "RPT Policy. Initializing ..."
self.clientsSet = self.prepareClientsSet(configurationAttributes)
print "RPT Policy. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "RPT Policy. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def getRequiredClaims(self, context):
json = """[
]"""
return ClaimDefinitionBuilder.build(json)
def authorize(self, context): # context is reference of org.xdi.oxauth.uma.authorization.UmaAuthorizationContext
print "RPT Policy. Authorizing ..."
client_id = context.getHttpRequest().getParameter("client_id")
print "UmaRptPolicy. client_id=", client_id
if (StringHelper.isEmpty(client_id)):
return False
if (self.clientsSet.contains(client_id)):
print "UmaRptPolicy. Authorizing client"
return True
else:
print "UmaRptPolicy. Client isn't authorized"
return False
def prepareClientsSet(self, configurationAttributes):
clientsSet = HashSet()
if (not configurationAttributes.containsKey("allowed_clients")):
return clientsSet
allowedClientsList = configurationAttributes.get("allowed_clients").getValue2()
if (StringHelper.isEmpty(allowedClientsList)):
print "UmaRptPolicy. The property allowed_clients is empty"
return clientsSet
allowedClientsListArray = StringHelper.split(allowedClientsList, ",")
if (ArrayHelper.isEmpty(allowedClientsListArray)):
print "UmaRptPolicy. No clients specified in allowed_clients property"
return clientsSet
# Convert to HashSet to quick search
i = 0
count = len(allowedClientsListArray)
while (i < count):
client = allowedClientsListArray[i]
clientsSet.add(client)
i = i + 1
return clientsSet
|
Add UMA 2.0 SCIM/Passport script# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2017, Gluu
#
# Author: Jose Gonzalez
# Adapted from previous 3.0.1 script of Yuriy Movchan
#
# oxConfigurationProperty required:
# allowed_clients - comma separated list of dns of allowed clients
# (i.e. the SCIM RP client)
from org.xdi.model.custom.script.type.uma import UmaRptPolicyType
from org.xdi.model.uma import ClaimDefinitionBuilder
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList, HashSet
from java.lang import String
class UmaRptPolicy(UmaRptPolicyType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "RPT Policy. Initializing ..."
self.clientsSet = self.prepareClientsSet(configurationAttributes)
print "RPT Policy. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "RPT Policy. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def getRequiredClaims(self, context):
json = """[
]"""
return ClaimDefinitionBuilder.build(json)
def authorize(self, context): # context is reference of org.xdi.oxauth.uma.authorization.UmaAuthorizationContext
print "RPT Policy. Authorizing ..."
client_id = context.getHttpRequest().getParameter("client_id")
print "UmaRptPolicy. client_id=", client_id
if (StringHelper.isEmpty(client_id)):
return False
if (self.clientsSet.contains(client_id)):
print "UmaRptPolicy. Authorizing client"
return True
else:
print "UmaRptPolicy. Client isn't authorized"
return False
def prepareClientsSet(self, configurationAttributes):
clientsSet = HashSet()
if (not configurationAttributes.containsKey("allowed_clients")):
return clientsSet
allowedClientsList = configurationAttributes.get("allowed_clients").getValue2()
if (StringHelper.isEmpty(allowedClientsList)):
print "UmaRptPolicy. The property allowed_clients is empty"
return clientsSet
allowedClientsListArray = StringHelper.split(allowedClientsList, ",")
if (ArrayHelper.isEmpty(allowedClientsListArray)):
print "UmaRptPolicy. No clients specified in allowed_clients property"
return clientsSet
# Convert to HashSet to quick search
i = 0
count = len(allowedClientsListArray)
while (i < count):
client = allowedClientsListArray[i]
clientsSet.add(client)
i = i + 1
return clientsSet
|
<commit_before><commit_msg>Add UMA 2.0 SCIM/Passport script<commit_after># oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2017, Gluu
#
# Author: Jose Gonzalez
# Adapted from previous 3.0.1 script of Yuriy Movchan
#
# oxConfigurationProperty required:
# allowed_clients - comma separated list of dns of allowed clients
# (i.e. the SCIM RP client)
from org.xdi.model.custom.script.type.uma import UmaRptPolicyType
from org.xdi.model.uma import ClaimDefinitionBuilder
from org.xdi.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList, HashSet
from java.lang import String
class UmaRptPolicy(UmaRptPolicyType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "RPT Policy. Initializing ..."
self.clientsSet = self.prepareClientsSet(configurationAttributes)
print "RPT Policy. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "RPT Policy. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def getRequiredClaims(self, context):
json = """[
]"""
return ClaimDefinitionBuilder.build(json)
def authorize(self, context): # context is reference of org.xdi.oxauth.uma.authorization.UmaAuthorizationContext
print "RPT Policy. Authorizing ..."
client_id = context.getHttpRequest().getParameter("client_id")
print "UmaRptPolicy. client_id=", client_id
if (StringHelper.isEmpty(client_id)):
return False
if (self.clientsSet.contains(client_id)):
print "UmaRptPolicy. Authorizing client"
return True
else:
print "UmaRptPolicy. Client isn't authorized"
return False
def prepareClientsSet(self, configurationAttributes):
clientsSet = HashSet()
if (not configurationAttributes.containsKey("allowed_clients")):
return clientsSet
allowedClientsList = configurationAttributes.get("allowed_clients").getValue2()
if (StringHelper.isEmpty(allowedClientsList)):
print "UmaRptPolicy. The property allowed_clients is empty"
return clientsSet
allowedClientsListArray = StringHelper.split(allowedClientsList, ",")
if (ArrayHelper.isEmpty(allowedClientsListArray)):
print "UmaRptPolicy. No clients specified in allowed_clients property"
return clientsSet
# Convert to HashSet to quick search
i = 0
count = len(allowedClientsListArray)
while (i < count):
client = allowedClientsListArray[i]
clientsSet.add(client)
i = i + 1
return clientsSet
|
|
fa9f9a0f76c86bfabd2e63db2add2829a749f639
|
tensorflow/contrib/hadoop/python/ops/hadoop_dataset_ops.py
|
tensorflow/contrib/hadoop/python/ops/hadoop_dataset_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class SequenceFileDataset(Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames, output_types=(dtypes.string, dtypes.string)):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = tf.contrib.hadoop.SequenceFileDataset(
"/foo/bar.seq", (tf.string, tf.string))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
output_types: A tuple of `tf.DType` objects representing the types of the
key-value pairs returned. Only `(tf.string, tf.string)` is supported
at the moment.
"""
super(SequenceFileDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sequence_file_dataset(
self._filenames, nest.flatten(self.output_types))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return self._output_types
|
Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
|
Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com>
|
Python
|
apache-2.0
|
hehongliang/tensorflow,dancingdan/tensorflow,aam-at/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,alsrgv/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,ageron/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,seanli9jan/tensorflow,kobejean/tensorflow,ppwwyyxx/tensorflow,davidzchen/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,alshedivat/tensorflow,theflofly/tensorflow,jendap/tensorflow,yongtang/tensorflow,petewarden/tensorflow,karllessard/tensorflow,hfp/tensorflow-xsmm,ppwwyyxx/tensorflow,ageron/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,girving/tensorflow,xzturn/tensorflow,snnn/tensorflow,jendap/tensorflow,snnn/tensorflow,annarev/tensorflow,ageron/tensorflow,ageron/tensorflow,renyi533/tensorflow,ppwwyyxx/tensorflow,dongjoon-hyun/tensorflow,aam-at/tensorflow,karllessard/tensorflow,alsrgv/tensorflow,Bismarrck/tensorflow,theflofly/tensorflow,DavidNorman/tensorflow,chemelnucfin/tensorflow,asimshankar/tensorflow,apark263/tensorflow,apark263/tensorflow,annarev/tensorflow,Bismarrck/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,ageron/tensorflow,ZhangXinNan/tensorflow,cxxgtxy/tensorflow,brchiu/tensorflow,tensorflow/tensorflow-pywrap_saved_model,manipopopo/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,snnn/tensorflow,hehongliang/tensorflow,cxxgtxy/tensorflow,kevin-coder/tensorflow-fork,aam-at/tensorflow,brchiu/tensorflow,annarev/tensorflow,DavidNorman/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,gautam1858/tensorflow,ZhangXinNan/tensorflow,Intel-tensorflow/tensorflow,asimshankar/tensorflow,alshedivat/tensorflow,girving/tensorflow,gautam1858/tensorflow,adit-chandra/tensorflow,xzturn/tensorflow,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,brchiu/tensorflow,annarev/tensorflow,petewarden/tensorflow,aldian/tensorflow,ZhangXinNan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,dongjoon-hyun/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,aldian/tensorflow,xodus7/tensorflow,snnn/tensorflow,adit-chandra/tensorflow,xodus7/tensorflow,gunan/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,seanli9jan/tensorflow,AnishShah/tensorflow,ageron/tensorflow,tensorflow/tensorflow,aam-at/tensorflow,apark263/tensorflow,manipopopo/tensorflow,jendap/tensorflow,ageron/tensorflow,Intel-tensorflow/tensorflow,hehongliang/tensorflow,yongtang/tensorflow,AnishShah/tensorflow,jbedorf/tensorflow,frreiss/tensorflow-fred,ZhangXinNan/tensorflow,adit-chandra/tensorflow,girving/tensorflow,jhseu/tensorflow,arborh/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,chemelnucfin/tensorflow,frreiss/tensorflow-fred,adit-chandra/tensorflow,freedomtan/tensorflow,kevin-coder/tensorflow-fork,DavidNorman/tensorflow,manipopopo/tensorflow,kobejean/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,alsrgv/tensorflow,davidzchen/tensorflow,davidzchen/tensorflow,Bismarrck/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,jbedorf/tensorflow,tensorflow/tensorflow,jendap/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,seanli9jan/tensorflow,sarvex/tensorflow,theflofly/tensorflow,jhseu/tensorflow,dancingdan/tensorflow,kevin-coder/tensorflow-fork,alshedivat/tensorflow,kobejean/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,dancingdan/tensorflow,renyi533/tensorflow,renyi533/tensorflow,aam-at/tensorflow,jbedorf/tensorflow,kevin-coder/tensorflow-fork,dongjoon-hyun/tensorflow,xodus7/tensorflow,jhseu/tensorflow,dongjoon-hyun/tensorflow,gunan/tensorflow,karllessard/tensorflow,petewarden/tensorflow,brchiu/tensorflow,xzturn/tensorflow,seanli9jan/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,ppwwyyxx/tensorflow,xzturn/tensorflow,arborh/tensorflow,xodus7/tensorflow,cxxgtxy/tensorflow,petewarden/tensorflow,chemelnucfin/tensorflow,theflofly/tensorflow,apark263/tensorflow,manipopopo/tensorflow,chemelnucfin/tensorflow,hfp/tensorflow-xsmm,Intel-Corporation/tensorflow,hehongliang/tensorflow,seanli9jan/tensorflow,alshedivat/tensorflow,DavidNorman/tensorflow,Bismarrck/tensorflow,manipopopo/tensorflow,annarev/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,AnishShah/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,chemelnucfin/tensorflow,aam-at/tensorflow,brchiu/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow-pywrap_saved_model,brchiu/tensorflow,snnn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,adit-chandra/tensorflow,girving/tensorflow,renyi533/tensorflow,kevin-coder/tensorflow-fork,AnishShah/tensorflow,tensorflow/tensorflow-pywrap_saved_model,dongjoon-hyun/tensorflow,petewarden/tensorflow,jhseu/tensorflow,hfp/tensorflow-xsmm,aldian/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,dancingdan/tensorflow,apark263/tensorflow,karllessard/tensorflow,aam-at/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow,xodus7/tensorflow,jbedorf/tensorflow,gunan/tensorflow,DavidNorman/tensorflow,jbedorf/tensorflow,arborh/tensorflow,xzturn/tensorflow,cxxgtxy/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,annarev/tensorflow,xzturn/tensorflow,aam-at/tensorflow,xzturn/tensorflow,Bismarrck/tensorflow,girving/tensorflow,AnishShah/tensorflow,alsrgv/tensorflow,hfp/tensorflow-xsmm,ZhangXinNan/tensorflow,renyi533/tensorflow,kobejean/tensorflow,theflofly/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,kobejean/tensorflow,ZhangXinNan/tensorflow,ageron/tensorflow,gunan/tensorflow,apark263/tensorflow,sarvex/tensorflow,theflofly/tensorflow,karllessard/tensorflow,chemelnucfin/tensorflow,hfp/tensorflow-xsmm,manipopopo/tensorflow,paolodedios/tensorflow,jbedorf/tensorflow,seanli9jan/tensorflow,jendap/tensorflow,gunan/tensorflow,gautam1858/tensorflow,davidzchen/tensorflow,davidzchen/tensorflow,gunan/tensorflow,dancingdan/tensorflow,seanli9jan/tensorflow,ghchinoy/tensorflow,gunan/tensorflow,karllessard/tensorflow,sarvex/tensorflow,jhseu/tensorflow,snnn/tensorflow,ghchinoy/tensorflow,snnn/tensorflow,xodus7/tensorflow,alsrgv/tensorflow,paolodedios/tensorflow,ghchinoy/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,arborh/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,snnn/tensorflow,renyi533/tensorflow,apark263/tensorflow,chemelnucfin/tensorflow,ageron/tensorflow,gautam1858/tensorflow,kobejean/tensorflow,AnishShah/tensorflow,ZhangXinNan/tensorflow,freedomtan/tensorflow,ppwwyyxx/tensorflow,kobejean/tensorflow,Bismarrck/tensorflow,freedomtan/tensorflow,arborh/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,kobejean/tensorflow,Bismarrck/tensorflow,kevin-coder/tensorflow-fork,jhseu/tensorflow,cxxgtxy/tensorflow,annarev/tensorflow,apark263/tensorflow,asimshankar/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow,jbedorf/tensorflow,ZhangXinNan/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,renyi533/tensorflow,renyi533/tensorflow,jbedorf/tensorflow,Intel-tensorflow/tensorflow,seanli9jan/tensorflow,Intel-Corporation/tensorflow,asimshankar/tensorflow,sarvex/tensorflow,Bismarrck/tensorflow,brchiu/tensorflow,cxxgtxy/tensorflow,xodus7/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,snnn/tensorflow,paolodedios/tensorflow,hehongliang/tensorflow,alsrgv/tensorflow,manipopopo/tensorflow,davidzchen/tensorflow,xodus7/tensorflow,dongjoon-hyun/tensorflow,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,asimshankar/tensorflow,jendap/tensorflow,tensorflow/tensorflow-pywrap_saved_model,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,theflofly/tensorflow,ZhangXinNan/tensorflow,karllessard/tensorflow,jbedorf/tensorflow,davidzchen/tensorflow,seanli9jan/tensorflow,frreiss/tensorflow-fred,alshedivat/tensorflow,jhseu/tensorflow,jendap/tensorflow,xzturn/tensorflow,DavidNorman/tensorflow,adit-chandra/tensorflow,hehongliang/tensorflow,hfp/tensorflow-xsmm,DavidNorman/tensorflow,annarev/tensorflow,hfp/tensorflow-xsmm,adit-chandra/tensorflow,ghchinoy/tensorflow,petewarden/tensorflow,gunan/tensorflow,ppwwyyxx/tensorflow,frreiss/tensorflow-fred,ppwwyyxx/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,asimshankar/tensorflow,frreiss/tensorflow-fred,cxxgtxy/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,DavidNorman/tensorflow,xodus7/tensorflow,dancingdan/tensorflow,gunan/tensorflow,girving/tensorflow,kobejean/tensorflow,sarvex/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow,Bismarrck/tensorflow,jhseu/tensorflow,kevin-coder/tensorflow-fork,hfp/tensorflow-xsmm,AnishShah/tensorflow,hehongliang/tensorflow,alshedivat/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,xodus7/tensorflow,hfp/tensorflow-xsmm,theflofly/tensorflow,hfp/tensorflow-xsmm,adit-chandra/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,apark263/tensorflow,arborh/tensorflow,jbedorf/tensorflow,asimshankar/tensorflow,DavidNorman/tensorflow,ghchinoy/tensorflow,seanli9jan/tensorflow,davidzchen/tensorflow,girving/tensorflow,AnishShah/tensorflow,theflofly/tensorflow,alsrgv/tensorflow,DavidNorman/tensorflow,ageron/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,petewarden/tensorflow,aldian/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,dongjoon-hyun/tensorflow,asimshankar/tensorflow,renyi533/tensorflow,apark263/tensorflow,Intel-tensorflow/tensorflow,jbedorf/tensorflow,dancingdan/tensorflow,adit-chandra/tensorflow,ghchinoy/tensorflow,gautam1858/tensorflow,xzturn/tensorflow,ageron/tensorflow,seanli9jan/tensorflow,dongjoon-hyun/tensorflow,dancingdan/tensorflow,frreiss/tensorflow-fred,alsrgv/tensorflow,arborh/tensorflow,alshedivat/tensorflow,manipopopo/tensorflow,ghchinoy/tensorflow,jhseu/tensorflow,ghchinoy/tensorflow,ghchinoy/tensorflow,asimshankar/tensorflow,theflofly/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,manipopopo/tensorflow,renyi533/tensorflow,freedomtan/tensorflow,ZhangXinNan/tensorflow,aam-at/tensorflow,aldian/tensorflow,brchiu/tensorflow,gautam1858/tensorflow,arborh/tensorflow,aam-at/tensorflow,jendap/tensorflow,kevin-coder/tensorflow-fork,annarev/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,alshedivat/tensorflow,gautam1858/tensorflow,brchiu/tensorflow,dancingdan/tensorflow,Bismarrck/tensorflow,sarvex/tensorflow,Intel-tensorflow/tensorflow,arborh/tensorflow,chemelnucfin/tensorflow,kobejean/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,jhseu/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,AnishShah/tensorflow,dongjoon-hyun/tensorflow,gunan/tensorflow,yongtang/tensorflow,petewarden/tensorflow,girving/tensorflow,aldian/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,ghchinoy/tensorflow,adit-chandra/tensorflow,aldian/tensorflow,snnn/tensorflow,AnishShah/tensorflow,adit-chandra/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,hfp/tensorflow-xsmm,renyi533/tensorflow,Bismarrck/tensorflow,asimshankar/tensorflow,yongtang/tensorflow,snnn/tensorflow,asimshankar/tensorflow,ghchinoy/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,kevin-coder/tensorflow-fork,adit-chandra/tensorflow,AnishShah/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,girving/tensorflow,xzturn/tensorflow,ZhangXinNan/tensorflow,ageron/tensorflow,xodus7/tensorflow,jendap/tensorflow,Intel-Corporation/tensorflow,jendap/tensorflow,xzturn/tensorflow,karllessard/tensorflow,apark263/tensorflow,brchiu/tensorflow,girving/tensorflow,jbedorf/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,petewarden/tensorflow,brchiu/tensorflow,theflofly/tensorflow,dongjoon-hyun/tensorflow,dancingdan/tensorflow,frreiss/tensorflow-fred,jendap/tensorflow,freedomtan/tensorflow,chemelnucfin/tensorflow,chemelnucfin/tensorflow,dancingdan/tensorflow,kobejean/tensorflow,yongtang/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,girving/tensorflow,paolodedios/tensorflow,kevin-coder/tensorflow-fork,davidzchen/tensorflow,paolodedios/tensorflow
|
Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com>
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class SequenceFileDataset(Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames, output_types=(dtypes.string, dtypes.string)):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = tf.contrib.hadoop.SequenceFileDataset(
"/foo/bar.seq", (tf.string, tf.string))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
output_types: A tuple of `tf.DType` objects representing the types of the
key-value pairs returned. Only `(tf.string, tf.string)` is supported
at the moment.
"""
super(SequenceFileDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sequence_file_dataset(
self._filenames, nest.flatten(self.output_types))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return self._output_types
|
<commit_before><commit_msg>Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com><commit_after>
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class SequenceFileDataset(Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames, output_types=(dtypes.string, dtypes.string)):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = tf.contrib.hadoop.SequenceFileDataset(
"/foo/bar.seq", (tf.string, tf.string))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
output_types: A tuple of `tf.DType` objects representing the types of the
key-value pairs returned. Only `(tf.string, tf.string)` is supported
at the moment.
"""
super(SequenceFileDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sequence_file_dataset(
self._filenames, nest.flatten(self.output_types))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return self._output_types
|
Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class SequenceFileDataset(Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames, output_types=(dtypes.string, dtypes.string)):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = tf.contrib.hadoop.SequenceFileDataset(
"/foo/bar.seq", (tf.string, tf.string))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
output_types: A tuple of `tf.DType` objects representing the types of the
key-value pairs returned. Only `(tf.string, tf.string)` is supported
at the moment.
"""
super(SequenceFileDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sequence_file_dataset(
self._filenames, nest.flatten(self.output_types))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return self._output_types
|
<commit_before><commit_msg>Add python wrapper for tf.contrib.hadoop.SequenceFileDataset
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com><commit_after># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceFile Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.hadoop.python.ops import hadoop_op_loader # pylint: disable=unused-import
from tensorflow.contrib.hadoop.python.ops import gen_dataset_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class SequenceFileDataset(Dataset):
"""A Sequence File Dataset that reads the sequence file."""
def __init__(self, filenames, output_types=(dtypes.string, dtypes.string)):
"""Create a `SequenceFileDataset`.
`SequenceFileDataset` allows a user to read data from a hadoop sequence
file. A sequence file consists of (key value) pairs sequentially. At
the moment, `org.apache.hadoop.io.Text` is the only serialization type
being supported, and there is no compression support.
For example:
```python
dataset = tf.contrib.hadoop.SequenceFileDataset(
"/foo/bar.seq", (tf.string, tf.string))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the (key, value) pairs inside a hadoop sequence file.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
output_types: A tuple of `tf.DType` objects representing the types of the
key-value pairs returned. Only `(tf.string, tf.string)` is supported
at the moment.
"""
super(SequenceFileDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sequence_file_dataset(
self._filenames, nest.flatten(self.output_types))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return self._output_types
|
|
e3aa9d95f9b2ded1c36820c3049156fb3913a183
|
src/dashboard/src/main/management/commands/update_elasticsearch_mappings.py
|
src/dashboard/src/main/management/commands/update_elasticsearch_mappings.py
|
# -*- coding: utf-8 -*-
"""Update Elasticsearch mappings for Archivematica 1.12
This command updates the Elasticsearch mappings for the aips and
aipfiles indices for Archivematica 1.12 to enable sorting on all fields
displayed in the new Archival Storage DataTable and populates the new
filePath.raw subfield in the aipfiles index using an Update By Query.
Execution example:
./manage.py update_elasticsearch_mappings
"""
from __future__ import absolute_import, print_function
import sys
from django.conf import settings
from elasticsearch import ElasticsearchException
from main.management.commands import DashboardCommand
import elasticSearchFunctions as es
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
# Check that the AIPs index is enabled before proceeding.
if es.AIPS_INDEX not in settings.SEARCH_ENABLED:
self.error(
"The AIPs indexes are not enabled. Please, make sure to "
"set the *_SEARCH_ENABLED environment variables to `true` "
"to enable the AIPs and Transfers indexes, or to `aips` "
"to only enable the AIPs indexes."
)
sys.exit(1)
try:
es.setup_reading_from_conf(settings)
es_client = es.get_client()
except ElasticsearchException:
self.error("Error: Elasticsearch may not be running.")
sys.exit(1)
# Update the AIPs index mappings.
es_client.indices.put_mapping(
index=es.AIPS_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionids": {"type": "keyword"},
"status": {"type": "keyword"},
"file_count": {"type": "integer"},
}
},
)
# Update the AIP files index mapping.
es_client.indices.put_mapping(
index=es.AIP_FILES_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionid": {"type": "keyword"},
"status": {"type": "keyword"},
"filePath": {
"type": "text",
"analyzer": "file_path_and_name",
"fields": {"raw": {"type": "keyword"}},
},
}
},
)
# Perform an update by query on the aipfiles index to populate
# the filePath.raw subfield from existing text values. We do
# not specify a query to ensure that all documents are updated.
es_client.update_by_query(es.AIP_FILES_INDEX)
|
Add management command to update ES mappings
|
Add management command to update ES mappings
This management command uses Put Mapping to update the mappings for the
aips and aipfiles indices, to ensure proper sorting in the Archival
Storage tab DataTable, and runs an Update By Query to populate the new
filePath.raw subfield in the aipfiles index.
This command must be run immediately following the upgrade to
Archivematica 1.12, prior to new AIPs being ingested, as Put Mapping is
unable to change the data type of existing ES fields. New string fields
will be automatically set to "text" rather than "keyword" if not
explicitly defined in the mapping, which will prevent the Put Mapping
command from working.
|
Python
|
agpl-3.0
|
artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica
|
Add management command to update ES mappings
This management command uses Put Mapping to update the mappings for the
aips and aipfiles indices, to ensure proper sorting in the Archival
Storage tab DataTable, and runs an Update By Query to populate the new
filePath.raw subfield in the aipfiles index.
This command must be run immediately following the upgrade to
Archivematica 1.12, prior to new AIPs being ingested, as Put Mapping is
unable to change the data type of existing ES fields. New string fields
will be automatically set to "text" rather than "keyword" if not
explicitly defined in the mapping, which will prevent the Put Mapping
command from working.
|
# -*- coding: utf-8 -*-
"""Update Elasticsearch mappings for Archivematica 1.12
This command updates the Elasticsearch mappings for the aips and
aipfiles indices for Archivematica 1.12 to enable sorting on all fields
displayed in the new Archival Storage DataTable and populates the new
filePath.raw subfield in the aipfiles index using an Update By Query.
Execution example:
./manage.py update_elasticsearch_mappings
"""
from __future__ import absolute_import, print_function
import sys
from django.conf import settings
from elasticsearch import ElasticsearchException
from main.management.commands import DashboardCommand
import elasticSearchFunctions as es
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
# Check that the AIPs index is enabled before proceeding.
if es.AIPS_INDEX not in settings.SEARCH_ENABLED:
self.error(
"The AIPs indexes are not enabled. Please, make sure to "
"set the *_SEARCH_ENABLED environment variables to `true` "
"to enable the AIPs and Transfers indexes, or to `aips` "
"to only enable the AIPs indexes."
)
sys.exit(1)
try:
es.setup_reading_from_conf(settings)
es_client = es.get_client()
except ElasticsearchException:
self.error("Error: Elasticsearch may not be running.")
sys.exit(1)
# Update the AIPs index mappings.
es_client.indices.put_mapping(
index=es.AIPS_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionids": {"type": "keyword"},
"status": {"type": "keyword"},
"file_count": {"type": "integer"},
}
},
)
# Update the AIP files index mapping.
es_client.indices.put_mapping(
index=es.AIP_FILES_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionid": {"type": "keyword"},
"status": {"type": "keyword"},
"filePath": {
"type": "text",
"analyzer": "file_path_and_name",
"fields": {"raw": {"type": "keyword"}},
},
}
},
)
# Perform an update by query on the aipfiles index to populate
# the filePath.raw subfield from existing text values. We do
# not specify a query to ensure that all documents are updated.
es_client.update_by_query(es.AIP_FILES_INDEX)
|
<commit_before><commit_msg>Add management command to update ES mappings
This management command uses Put Mapping to update the mappings for the
aips and aipfiles indices, to ensure proper sorting in the Archival
Storage tab DataTable, and runs an Update By Query to populate the new
filePath.raw subfield in the aipfiles index.
This command must be run immediately following the upgrade to
Archivematica 1.12, prior to new AIPs being ingested, as Put Mapping is
unable to change the data type of existing ES fields. New string fields
will be automatically set to "text" rather than "keyword" if not
explicitly defined in the mapping, which will prevent the Put Mapping
command from working.<commit_after>
|
# -*- coding: utf-8 -*-
"""Update Elasticsearch mappings for Archivematica 1.12
This command updates the Elasticsearch mappings for the aips and
aipfiles indices for Archivematica 1.12 to enable sorting on all fields
displayed in the new Archival Storage DataTable and populates the new
filePath.raw subfield in the aipfiles index using an Update By Query.
Execution example:
./manage.py update_elasticsearch_mappings
"""
from __future__ import absolute_import, print_function
import sys
from django.conf import settings
from elasticsearch import ElasticsearchException
from main.management.commands import DashboardCommand
import elasticSearchFunctions as es
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
# Check that the AIPs index is enabled before proceeding.
if es.AIPS_INDEX not in settings.SEARCH_ENABLED:
self.error(
"The AIPs indexes are not enabled. Please, make sure to "
"set the *_SEARCH_ENABLED environment variables to `true` "
"to enable the AIPs and Transfers indexes, or to `aips` "
"to only enable the AIPs indexes."
)
sys.exit(1)
try:
es.setup_reading_from_conf(settings)
es_client = es.get_client()
except ElasticsearchException:
self.error("Error: Elasticsearch may not be running.")
sys.exit(1)
# Update the AIPs index mappings.
es_client.indices.put_mapping(
index=es.AIPS_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionids": {"type": "keyword"},
"status": {"type": "keyword"},
"file_count": {"type": "integer"},
}
},
)
# Update the AIP files index mapping.
es_client.indices.put_mapping(
index=es.AIP_FILES_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionid": {"type": "keyword"},
"status": {"type": "keyword"},
"filePath": {
"type": "text",
"analyzer": "file_path_and_name",
"fields": {"raw": {"type": "keyword"}},
},
}
},
)
# Perform an update by query on the aipfiles index to populate
# the filePath.raw subfield from existing text values. We do
# not specify a query to ensure that all documents are updated.
es_client.update_by_query(es.AIP_FILES_INDEX)
|
Add management command to update ES mappings
This management command uses Put Mapping to update the mappings for the
aips and aipfiles indices, to ensure proper sorting in the Archival
Storage tab DataTable, and runs an Update By Query to populate the new
filePath.raw subfield in the aipfiles index.
This command must be run immediately following the upgrade to
Archivematica 1.12, prior to new AIPs being ingested, as Put Mapping is
unable to change the data type of existing ES fields. New string fields
will be automatically set to "text" rather than "keyword" if not
explicitly defined in the mapping, which will prevent the Put Mapping
command from working.# -*- coding: utf-8 -*-
"""Update Elasticsearch mappings for Archivematica 1.12
This command updates the Elasticsearch mappings for the aips and
aipfiles indices for Archivematica 1.12 to enable sorting on all fields
displayed in the new Archival Storage DataTable and populates the new
filePath.raw subfield in the aipfiles index using an Update By Query.
Execution example:
./manage.py update_elasticsearch_mappings
"""
from __future__ import absolute_import, print_function
import sys
from django.conf import settings
from elasticsearch import ElasticsearchException
from main.management.commands import DashboardCommand
import elasticSearchFunctions as es
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
# Check that the AIPs index is enabled before proceeding.
if es.AIPS_INDEX not in settings.SEARCH_ENABLED:
self.error(
"The AIPs indexes are not enabled. Please, make sure to "
"set the *_SEARCH_ENABLED environment variables to `true` "
"to enable the AIPs and Transfers indexes, or to `aips` "
"to only enable the AIPs indexes."
)
sys.exit(1)
try:
es.setup_reading_from_conf(settings)
es_client = es.get_client()
except ElasticsearchException:
self.error("Error: Elasticsearch may not be running.")
sys.exit(1)
# Update the AIPs index mappings.
es_client.indices.put_mapping(
index=es.AIPS_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionids": {"type": "keyword"},
"status": {"type": "keyword"},
"file_count": {"type": "integer"},
}
},
)
# Update the AIP files index mapping.
es_client.indices.put_mapping(
index=es.AIP_FILES_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionid": {"type": "keyword"},
"status": {"type": "keyword"},
"filePath": {
"type": "text",
"analyzer": "file_path_and_name",
"fields": {"raw": {"type": "keyword"}},
},
}
},
)
# Perform an update by query on the aipfiles index to populate
# the filePath.raw subfield from existing text values. We do
# not specify a query to ensure that all documents are updated.
es_client.update_by_query(es.AIP_FILES_INDEX)
|
<commit_before><commit_msg>Add management command to update ES mappings
This management command uses Put Mapping to update the mappings for the
aips and aipfiles indices, to ensure proper sorting in the Archival
Storage tab DataTable, and runs an Update By Query to populate the new
filePath.raw subfield in the aipfiles index.
This command must be run immediately following the upgrade to
Archivematica 1.12, prior to new AIPs being ingested, as Put Mapping is
unable to change the data type of existing ES fields. New string fields
will be automatically set to "text" rather than "keyword" if not
explicitly defined in the mapping, which will prevent the Put Mapping
command from working.<commit_after># -*- coding: utf-8 -*-
"""Update Elasticsearch mappings for Archivematica 1.12
This command updates the Elasticsearch mappings for the aips and
aipfiles indices for Archivematica 1.12 to enable sorting on all fields
displayed in the new Archival Storage DataTable and populates the new
filePath.raw subfield in the aipfiles index using an Update By Query.
Execution example:
./manage.py update_elasticsearch_mappings
"""
from __future__ import absolute_import, print_function
import sys
from django.conf import settings
from elasticsearch import ElasticsearchException
from main.management.commands import DashboardCommand
import elasticSearchFunctions as es
class Command(DashboardCommand):
help = __doc__
def handle(self, *args, **options):
# Check that the AIPs index is enabled before proceeding.
if es.AIPS_INDEX not in settings.SEARCH_ENABLED:
self.error(
"The AIPs indexes are not enabled. Please, make sure to "
"set the *_SEARCH_ENABLED environment variables to `true` "
"to enable the AIPs and Transfers indexes, or to `aips` "
"to only enable the AIPs indexes."
)
sys.exit(1)
try:
es.setup_reading_from_conf(settings)
es_client = es.get_client()
except ElasticsearchException:
self.error("Error: Elasticsearch may not be running.")
sys.exit(1)
# Update the AIPs index mappings.
es_client.indices.put_mapping(
index=es.AIPS_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionids": {"type": "keyword"},
"status": {"type": "keyword"},
"file_count": {"type": "integer"},
}
},
)
# Update the AIP files index mapping.
es_client.indices.put_mapping(
index=es.AIP_FILES_INDEX,
doc_type=es.DOC_TYPE,
body={
"properties": {
"accessionid": {"type": "keyword"},
"status": {"type": "keyword"},
"filePath": {
"type": "text",
"analyzer": "file_path_and_name",
"fields": {"raw": {"type": "keyword"}},
},
}
},
)
# Perform an update by query on the aipfiles index to populate
# the filePath.raw subfield from existing text values. We do
# not specify a query to ensure that all documents are updated.
es_client.update_by_query(es.AIP_FILES_INDEX)
|
|
269439513e2f9f84e89592565b20d9ff193fe210
|
pyes/scriptfields.py
|
pyes/scriptfields.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Armando Guereca'
class ScriptFields:
_internal_name = "script_fields"
"""
This object create the script_fields definition
"""
def __init__(self, field_name, script, params = None):
self.fields={}
self.add_field(field_name, script, params or {})
def add_field(self, field_name, script, params = None):
"""
Add a field to script_fields
"""
data = {}
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
self.fields[field_name] = data
def add_parameter(self, field_name, param_name, param_value):
"""
Add a parameter to a field into script_fields
The ScriptFields object will be returned, so calls to this can be chained.
"""
try:
self.fields[field_name]['params'][param_name] = param_value
except Exception as ex:
raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name,param_value,ex))
return self
def serialize(self):
return self.fields
|
Add ScriptFields object used as parameter script_fields of Search object
|
Add ScriptFields object used as parameter script_fields of Search object
|
Python
|
bsd-3-clause
|
rookdev/pyes,jayzeng/pyes,mouadino/pyes,jayzeng/pyes,mouadino/pyes,aparo/pyes,HackLinux/pyes,aparo/pyes,haiwen/pyes,Fiedzia/pyes,rookdev/pyes,mavarick/pyes,aparo/pyes,HackLinux/pyes,haiwen/pyes,HackLinux/pyes,Fiedzia/pyes,jayzeng/pyes,mavarick/pyes,haiwen/pyes,mavarick/pyes,Fiedzia/pyes
|
Add ScriptFields object used as parameter script_fields of Search object
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Armando Guereca'
class ScriptFields:
_internal_name = "script_fields"
"""
This object create the script_fields definition
"""
def __init__(self, field_name, script, params = None):
self.fields={}
self.add_field(field_name, script, params or {})
def add_field(self, field_name, script, params = None):
"""
Add a field to script_fields
"""
data = {}
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
self.fields[field_name] = data
def add_parameter(self, field_name, param_name, param_value):
"""
Add a parameter to a field into script_fields
The ScriptFields object will be returned, so calls to this can be chained.
"""
try:
self.fields[field_name]['params'][param_name] = param_value
except Exception as ex:
raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name,param_value,ex))
return self
def serialize(self):
return self.fields
|
<commit_before><commit_msg>Add ScriptFields object used as parameter script_fields of Search object<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Armando Guereca'
class ScriptFields:
_internal_name = "script_fields"
"""
This object create the script_fields definition
"""
def __init__(self, field_name, script, params = None):
self.fields={}
self.add_field(field_name, script, params or {})
def add_field(self, field_name, script, params = None):
"""
Add a field to script_fields
"""
data = {}
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
self.fields[field_name] = data
def add_parameter(self, field_name, param_name, param_value):
"""
Add a parameter to a field into script_fields
The ScriptFields object will be returned, so calls to this can be chained.
"""
try:
self.fields[field_name]['params'][param_name] = param_value
except Exception as ex:
raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name,param_value,ex))
return self
def serialize(self):
return self.fields
|
Add ScriptFields object used as parameter script_fields of Search object#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Armando Guereca'
class ScriptFields:
_internal_name = "script_fields"
"""
This object create the script_fields definition
"""
def __init__(self, field_name, script, params = None):
self.fields={}
self.add_field(field_name, script, params or {})
def add_field(self, field_name, script, params = None):
"""
Add a field to script_fields
"""
data = {}
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
self.fields[field_name] = data
def add_parameter(self, field_name, param_name, param_value):
"""
Add a parameter to a field into script_fields
The ScriptFields object will be returned, so calls to this can be chained.
"""
try:
self.fields[field_name]['params'][param_name] = param_value
except Exception as ex:
raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name,param_value,ex))
return self
def serialize(self):
return self.fields
|
<commit_before><commit_msg>Add ScriptFields object used as parameter script_fields of Search object<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Armando Guereca'
class ScriptFields:
_internal_name = "script_fields"
"""
This object create the script_fields definition
"""
def __init__(self, field_name, script, params = None):
self.fields={}
self.add_field(field_name, script, params or {})
def add_field(self, field_name, script, params = None):
"""
Add a field to script_fields
"""
data = {}
if script:
data['script'] = script
else:
raise ScriptFieldsError("Script is required for script_fields definition")
if params:
if isinstance(params, dict):
if len(params):
data['params'] = params
else:
raise ScriptFieldsError("Parameters should be a valid dictionary")
self.fields[field_name] = data
def add_parameter(self, field_name, param_name, param_value):
"""
Add a parameter to a field into script_fields
The ScriptFields object will be returned, so calls to this can be chained.
"""
try:
self.fields[field_name]['params'][param_name] = param_value
except Exception as ex:
raise ScriptFieldsError("Error adding parameter %s with value %s :%s" % (param_name,param_value,ex))
return self
def serialize(self):
return self.fields
|
|
e17ef12deb0d80f89f9791225f128bbad91667b2
|
polyaxon/pipelines/celery_task.py
|
polyaxon/pipelines/celery_task.py
|
from pipelines.models import Operation
from polyaxon.celery_api import CeleryTask
class OperationTask(CeleryTask):
"""Base operation celery task with basic logging."""
_operation = None
def run(self, *args, **kwargs):
self._operation = Operation.objects.get(id=kwargs['query_id'])
super(OperationTask, self).run(*args, **kwargs)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Update query status and send email notification to a user"""
super(OperationTask, self).on_failure(exc, task_id, args, kwargs, einfo)
self._operation.on_failure()
def on_retry(self, exc, task_id, args, kwargs, einfo):
super(OperationTask, self).on_retry(exc, task_id, args, kwargs, einfo)
self._operation.on_retry()
def on_success(self, retval, task_id, args, kwargs):
"""Send email notification and a file, if requested to do so by a user"""
super(OperationTask, self).on_success(retval, task_id, args, kwargs)
self._operation.on_success()
|
Add base celery operation task
|
Add base celery operation task
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add base celery operation task
|
from pipelines.models import Operation
from polyaxon.celery_api import CeleryTask
class OperationTask(CeleryTask):
"""Base operation celery task with basic logging."""
_operation = None
def run(self, *args, **kwargs):
self._operation = Operation.objects.get(id=kwargs['query_id'])
super(OperationTask, self).run(*args, **kwargs)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Update query status and send email notification to a user"""
super(OperationTask, self).on_failure(exc, task_id, args, kwargs, einfo)
self._operation.on_failure()
def on_retry(self, exc, task_id, args, kwargs, einfo):
super(OperationTask, self).on_retry(exc, task_id, args, kwargs, einfo)
self._operation.on_retry()
def on_success(self, retval, task_id, args, kwargs):
"""Send email notification and a file, if requested to do so by a user"""
super(OperationTask, self).on_success(retval, task_id, args, kwargs)
self._operation.on_success()
|
<commit_before><commit_msg>Add base celery operation task<commit_after>
|
from pipelines.models import Operation
from polyaxon.celery_api import CeleryTask
class OperationTask(CeleryTask):
"""Base operation celery task with basic logging."""
_operation = None
def run(self, *args, **kwargs):
self._operation = Operation.objects.get(id=kwargs['query_id'])
super(OperationTask, self).run(*args, **kwargs)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Update query status and send email notification to a user"""
super(OperationTask, self).on_failure(exc, task_id, args, kwargs, einfo)
self._operation.on_failure()
def on_retry(self, exc, task_id, args, kwargs, einfo):
super(OperationTask, self).on_retry(exc, task_id, args, kwargs, einfo)
self._operation.on_retry()
def on_success(self, retval, task_id, args, kwargs):
"""Send email notification and a file, if requested to do so by a user"""
super(OperationTask, self).on_success(retval, task_id, args, kwargs)
self._operation.on_success()
|
Add base celery operation taskfrom pipelines.models import Operation
from polyaxon.celery_api import CeleryTask
class OperationTask(CeleryTask):
"""Base operation celery task with basic logging."""
_operation = None
def run(self, *args, **kwargs):
self._operation = Operation.objects.get(id=kwargs['query_id'])
super(OperationTask, self).run(*args, **kwargs)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Update query status and send email notification to a user"""
super(OperationTask, self).on_failure(exc, task_id, args, kwargs, einfo)
self._operation.on_failure()
def on_retry(self, exc, task_id, args, kwargs, einfo):
super(OperationTask, self).on_retry(exc, task_id, args, kwargs, einfo)
self._operation.on_retry()
def on_success(self, retval, task_id, args, kwargs):
"""Send email notification and a file, if requested to do so by a user"""
super(OperationTask, self).on_success(retval, task_id, args, kwargs)
self._operation.on_success()
|
<commit_before><commit_msg>Add base celery operation task<commit_after>from pipelines.models import Operation
from polyaxon.celery_api import CeleryTask
class OperationTask(CeleryTask):
"""Base operation celery task with basic logging."""
_operation = None
def run(self, *args, **kwargs):
self._operation = Operation.objects.get(id=kwargs['query_id'])
super(OperationTask, self).run(*args, **kwargs)
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Update query status and send email notification to a user"""
super(OperationTask, self).on_failure(exc, task_id, args, kwargs, einfo)
self._operation.on_failure()
def on_retry(self, exc, task_id, args, kwargs, einfo):
super(OperationTask, self).on_retry(exc, task_id, args, kwargs, einfo)
self._operation.on_retry()
def on_success(self, retval, task_id, args, kwargs):
"""Send email notification and a file, if requested to do so by a user"""
super(OperationTask, self).on_success(retval, task_id, args, kwargs)
self._operation.on_success()
|
|
d13c21f2b0d6a0b72a3ba2f3e24d198d644737ec
|
sdks/python/apache_beam/runners/portability/expansion_service_main.py
|
sdks/python/apache_beam/runners/portability/expansion_service_main.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import argparse
import logging
import signal
import sys
import grpc
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import fully_qualified_named_transform
from apache_beam.utils import thread_pool_executor
_LOGGER = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
parser.add_argument('--fully_qualified_name_glob', default=None)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(
pipeline_args + ["--experiments=beam_fn_api", "--sdk_location=container"])
with fully_qualified_named_transform.FullyQualifiedNamedTransform.with_filter(
known_args.fully_qualified_name_glob):
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(pipeline_options), server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
server)
server.add_insecure_port('localhost:{}'.format(known_args.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', known_args.port)
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
|
Add Python expansion service entry point.
|
Add Python expansion service entry point.
|
Python
|
apache-2.0
|
robertwb/incubator-beam,chamikaramj/beam,chamikaramj/beam,lukecwik/incubator-beam,apache/beam,lukecwik/incubator-beam,robertwb/incubator-beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,lukecwik/incubator-beam,apache/beam,chamikaramj/beam,chamikaramj/beam,apache/beam,lukecwik/incubator-beam,robertwb/incubator-beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,apache/beam,apache/beam,chamikaramj/beam,lukecwik/incubator-beam,robertwb/incubator-beam,lukecwik/incubator-beam,apache/beam,apache/beam,chamikaramj/beam,apache/beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,chamikaramj/beam,lukecwik/incubator-beam,robertwb/incubator-beam,robertwb/incubator-beam,lukecwik/incubator-beam,lukecwik/incubator-beam,robertwb/incubator-beam,lukecwik/incubator-beam
|
Add Python expansion service entry point.
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import argparse
import logging
import signal
import sys
import grpc
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import fully_qualified_named_transform
from apache_beam.utils import thread_pool_executor
_LOGGER = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
parser.add_argument('--fully_qualified_name_glob', default=None)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(
pipeline_args + ["--experiments=beam_fn_api", "--sdk_location=container"])
with fully_qualified_named_transform.FullyQualifiedNamedTransform.with_filter(
known_args.fully_qualified_name_glob):
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(pipeline_options), server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
server)
server.add_insecure_port('localhost:{}'.format(known_args.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', known_args.port)
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
|
<commit_before><commit_msg>Add Python expansion service entry point.<commit_after>
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import argparse
import logging
import signal
import sys
import grpc
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import fully_qualified_named_transform
from apache_beam.utils import thread_pool_executor
_LOGGER = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
parser.add_argument('--fully_qualified_name_glob', default=None)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(
pipeline_args + ["--experiments=beam_fn_api", "--sdk_location=container"])
with fully_qualified_named_transform.FullyQualifiedNamedTransform.with_filter(
known_args.fully_qualified_name_glob):
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(pipeline_options), server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
server)
server.add_insecure_port('localhost:{}'.format(known_args.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', known_args.port)
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
|
Add Python expansion service entry point.#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import argparse
import logging
import signal
import sys
import grpc
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import fully_qualified_named_transform
from apache_beam.utils import thread_pool_executor
_LOGGER = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
parser.add_argument('--fully_qualified_name_glob', default=None)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(
pipeline_args + ["--experiments=beam_fn_api", "--sdk_location=container"])
with fully_qualified_named_transform.FullyQualifiedNamedTransform.with_filter(
known_args.fully_qualified_name_glob):
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(pipeline_options), server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
server)
server.add_insecure_port('localhost:{}'.format(known_args.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', known_args.port)
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
|
<commit_before><commit_msg>Add Python expansion service entry point.<commit_after>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import argparse
import logging
import signal
import sys
import grpc
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import fully_qualified_named_transform
from apache_beam.utils import thread_pool_executor
_LOGGER = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', type=int, help='port on which to serve the job api')
parser.add_argument('--fully_qualified_name_glob', default=None)
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(
pipeline_args + ["--experiments=beam_fn_api", "--sdk_location=container"])
with fully_qualified_named_transform.FullyQualifiedNamedTransform.with_filter(
known_args.fully_qualified_name_glob):
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_expansion_api_pb2_grpc.add_ExpansionServiceServicer_to_server(
expansion_service.ExpansionServiceServicer(pipeline_options), server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
server)
server.add_insecure_port('localhost:{}'.format(known_args.port))
server.start()
_LOGGER.info('Listening for expansion requests at %d', known_args.port)
def cleanup(unused_signum, unused_frame):
_LOGGER.info('Shutting down expansion service.')
server.stop(None)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
# blocking main thread forever.
signal.pause()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
|
|
c13307bb4dd28b06bc1c4bb0c8182bed41a2e608
|
corehq/apps/sms/management/commands/set_backend_ids.py
|
corehq/apps/sms/management/commands/set_backend_ids.py
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.sms.mixin import VerifiedNumber
class Command(BaseCommand):
args = 'domain [backend_id] [--test]'
help = ('Updates the backend_id on all VerifiedNumber entries for the '
'given domain. If backend_id is not specified, it is set to None. '
'VerifiedNumbers belonging to cases are not processed as the '
'contact_backend_id case property must be updated to properly '
'reflect that.')
option_list = BaseCommand.option_list + (
make_option('--test',
action='store_true',
dest='test',
default=False,
help=('Include this option to only print the backend_id '
'discrepancies and not update them.')),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Usage: python manage.py set_backend_ids domain [backend_id] [--test]')
domain = args[0]
if len(args) > 1:
backend_id = args[1]
else:
backend_id = None
test_only = options['test']
for vn in VerifiedNumber.by_domain(domain):
if (not vn.backend_id) and (not backend_id):
pass
elif vn.backend_id == backend_id:
pass
elif test_only:
print '%s %s, number %s has backend %s instead of %s' % \
(vn.owner_doc_type, vn.owner_id, vn.phone_number,
'None' if vn.backend_id is None else "'%s'" % vn.backend_id, backend_id)
else:
if vn.owner_doc_type == "CommCareCase":
print 'Cannot update backend_id for %s because it is a case' % vn.owner_id
else:
print 'Updating backend_id from %s to %s for %s %s, number %s' % \
(vn.backend_id, backend_id, vn.owner_doc_type, vn.owner_id, vn.phone_number)
vn.backend_id = backend_id
vn.save()
|
Add script for setting contact-level backend ids in bulk
|
Add script for setting contact-level backend ids in bulk
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq
|
Add script for setting contact-level backend ids in bulk
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.sms.mixin import VerifiedNumber
class Command(BaseCommand):
args = 'domain [backend_id] [--test]'
help = ('Updates the backend_id on all VerifiedNumber entries for the '
'given domain. If backend_id is not specified, it is set to None. '
'VerifiedNumbers belonging to cases are not processed as the '
'contact_backend_id case property must be updated to properly '
'reflect that.')
option_list = BaseCommand.option_list + (
make_option('--test',
action='store_true',
dest='test',
default=False,
help=('Include this option to only print the backend_id '
'discrepancies and not update them.')),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Usage: python manage.py set_backend_ids domain [backend_id] [--test]')
domain = args[0]
if len(args) > 1:
backend_id = args[1]
else:
backend_id = None
test_only = options['test']
for vn in VerifiedNumber.by_domain(domain):
if (not vn.backend_id) and (not backend_id):
pass
elif vn.backend_id == backend_id:
pass
elif test_only:
print '%s %s, number %s has backend %s instead of %s' % \
(vn.owner_doc_type, vn.owner_id, vn.phone_number,
'None' if vn.backend_id is None else "'%s'" % vn.backend_id, backend_id)
else:
if vn.owner_doc_type == "CommCareCase":
print 'Cannot update backend_id for %s because it is a case' % vn.owner_id
else:
print 'Updating backend_id from %s to %s for %s %s, number %s' % \
(vn.backend_id, backend_id, vn.owner_doc_type, vn.owner_id, vn.phone_number)
vn.backend_id = backend_id
vn.save()
|
<commit_before><commit_msg>Add script for setting contact-level backend ids in bulk<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.sms.mixin import VerifiedNumber
class Command(BaseCommand):
args = 'domain [backend_id] [--test]'
help = ('Updates the backend_id on all VerifiedNumber entries for the '
'given domain. If backend_id is not specified, it is set to None. '
'VerifiedNumbers belonging to cases are not processed as the '
'contact_backend_id case property must be updated to properly '
'reflect that.')
option_list = BaseCommand.option_list + (
make_option('--test',
action='store_true',
dest='test',
default=False,
help=('Include this option to only print the backend_id '
'discrepancies and not update them.')),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Usage: python manage.py set_backend_ids domain [backend_id] [--test]')
domain = args[0]
if len(args) > 1:
backend_id = args[1]
else:
backend_id = None
test_only = options['test']
for vn in VerifiedNumber.by_domain(domain):
if (not vn.backend_id) and (not backend_id):
pass
elif vn.backend_id == backend_id:
pass
elif test_only:
print '%s %s, number %s has backend %s instead of %s' % \
(vn.owner_doc_type, vn.owner_id, vn.phone_number,
'None' if vn.backend_id is None else "'%s'" % vn.backend_id, backend_id)
else:
if vn.owner_doc_type == "CommCareCase":
print 'Cannot update backend_id for %s because it is a case' % vn.owner_id
else:
print 'Updating backend_id from %s to %s for %s %s, number %s' % \
(vn.backend_id, backend_id, vn.owner_doc_type, vn.owner_id, vn.phone_number)
vn.backend_id = backend_id
vn.save()
|
Add script for setting contact-level backend ids in bulkfrom django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.sms.mixin import VerifiedNumber
class Command(BaseCommand):
args = 'domain [backend_id] [--test]'
help = ('Updates the backend_id on all VerifiedNumber entries for the '
'given domain. If backend_id is not specified, it is set to None. '
'VerifiedNumbers belonging to cases are not processed as the '
'contact_backend_id case property must be updated to properly '
'reflect that.')
option_list = BaseCommand.option_list + (
make_option('--test',
action='store_true',
dest='test',
default=False,
help=('Include this option to only print the backend_id '
'discrepancies and not update them.')),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Usage: python manage.py set_backend_ids domain [backend_id] [--test]')
domain = args[0]
if len(args) > 1:
backend_id = args[1]
else:
backend_id = None
test_only = options['test']
for vn in VerifiedNumber.by_domain(domain):
if (not vn.backend_id) and (not backend_id):
pass
elif vn.backend_id == backend_id:
pass
elif test_only:
print '%s %s, number %s has backend %s instead of %s' % \
(vn.owner_doc_type, vn.owner_id, vn.phone_number,
'None' if vn.backend_id is None else "'%s'" % vn.backend_id, backend_id)
else:
if vn.owner_doc_type == "CommCareCase":
print 'Cannot update backend_id for %s because it is a case' % vn.owner_id
else:
print 'Updating backend_id from %s to %s for %s %s, number %s' % \
(vn.backend_id, backend_id, vn.owner_doc_type, vn.owner_id, vn.phone_number)
vn.backend_id = backend_id
vn.save()
|
<commit_before><commit_msg>Add script for setting contact-level backend ids in bulk<commit_after>from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.sms.mixin import VerifiedNumber
class Command(BaseCommand):
args = 'domain [backend_id] [--test]'
help = ('Updates the backend_id on all VerifiedNumber entries for the '
'given domain. If backend_id is not specified, it is set to None. '
'VerifiedNumbers belonging to cases are not processed as the '
'contact_backend_id case property must be updated to properly '
'reflect that.')
option_list = BaseCommand.option_list + (
make_option('--test',
action='store_true',
dest='test',
default=False,
help=('Include this option to only print the backend_id '
'discrepancies and not update them.')),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Usage: python manage.py set_backend_ids domain [backend_id] [--test]')
domain = args[0]
if len(args) > 1:
backend_id = args[1]
else:
backend_id = None
test_only = options['test']
for vn in VerifiedNumber.by_domain(domain):
if (not vn.backend_id) and (not backend_id):
pass
elif vn.backend_id == backend_id:
pass
elif test_only:
print '%s %s, number %s has backend %s instead of %s' % \
(vn.owner_doc_type, vn.owner_id, vn.phone_number,
'None' if vn.backend_id is None else "'%s'" % vn.backend_id, backend_id)
else:
if vn.owner_doc_type == "CommCareCase":
print 'Cannot update backend_id for %s because it is a case' % vn.owner_id
else:
print 'Updating backend_id from %s to %s for %s %s, number %s' % \
(vn.backend_id, backend_id, vn.owner_doc_type, vn.owner_id, vn.phone_number)
vn.backend_id = backend_id
vn.save()
|
|
0f106ca30805a3541000f5129b203882ba7cfbb9
|
packages/adminrouter/extra/src/test-harness/tests/test_security.py
|
packages/adminrouter/extra/src/test-harness/tests/test_security.py
|
import pytest
import requests
class TestRedirect:
@pytest.mark.parametrize(
'path', ('/mesos_dns', '/net', '/exhibitor', '/mesos')
)
def test_redirect(self, master_ar_process, valid_user_header, path):
"""
URL's with no slash on end may redirect to the same URL with a
slash appended. If this redirection uses the Host header to write
the redirection, then it is susceptible to a client being tricked
into setting the Host header to a bad host, and then redirecting
the request (including an Authorization header) to the bad host.
"""
url = master_ar_process.make_url_from_path(path)
headers = valid_user_header.copy()
headers['Host'] = 'bad.host'
resp = requests.get(
url,
allow_redirects=False,
headers=headers
)
resp.raise_for_status()
if resp.status_code in (301, 302, 303, 307):
assert 'bad.host' not in resp.headers['Location']
assert resp.headers['Location'] == '//'
else:
assert resp.status_code == 200
|
Add tests to demonstrate bad Host header problem
|
Add tests to demonstrate bad Host header problem
|
Python
|
apache-2.0
|
dcos/dcos,GoelDeepak/dcos,kensipe/dcos,mesosphere-mergebot/dcos,kensipe/dcos,kensipe/dcos,dcos/dcos,dcos/dcos,mesosphere-mergebot/mergebot-test-dcos,dcos/dcos,GoelDeepak/dcos,mesosphere-mergebot/dcos,mesosphere-mergebot/mergebot-test-dcos,mesosphere-mergebot/mergebot-test-dcos,mesosphere-mergebot/dcos,GoelDeepak/dcos,dcos/dcos,kensipe/dcos,GoelDeepak/dcos,mesosphere-mergebot/mergebot-test-dcos,mesosphere-mergebot/dcos
|
Add tests to demonstrate bad Host header problem
|
import pytest
import requests
class TestRedirect:
@pytest.mark.parametrize(
'path', ('/mesos_dns', '/net', '/exhibitor', '/mesos')
)
def test_redirect(self, master_ar_process, valid_user_header, path):
"""
URL's with no slash on end may redirect to the same URL with a
slash appended. If this redirection uses the Host header to write
the redirection, then it is susceptible to a client being tricked
into setting the Host header to a bad host, and then redirecting
the request (including an Authorization header) to the bad host.
"""
url = master_ar_process.make_url_from_path(path)
headers = valid_user_header.copy()
headers['Host'] = 'bad.host'
resp = requests.get(
url,
allow_redirects=False,
headers=headers
)
resp.raise_for_status()
if resp.status_code in (301, 302, 303, 307):
assert 'bad.host' not in resp.headers['Location']
assert resp.headers['Location'] == '//'
else:
assert resp.status_code == 200
|
<commit_before><commit_msg>Add tests to demonstrate bad Host header problem<commit_after>
|
import pytest
import requests
class TestRedirect:
@pytest.mark.parametrize(
'path', ('/mesos_dns', '/net', '/exhibitor', '/mesos')
)
def test_redirect(self, master_ar_process, valid_user_header, path):
"""
URL's with no slash on end may redirect to the same URL with a
slash appended. If this redirection uses the Host header to write
the redirection, then it is susceptible to a client being tricked
into setting the Host header to a bad host, and then redirecting
the request (including an Authorization header) to the bad host.
"""
url = master_ar_process.make_url_from_path(path)
headers = valid_user_header.copy()
headers['Host'] = 'bad.host'
resp = requests.get(
url,
allow_redirects=False,
headers=headers
)
resp.raise_for_status()
if resp.status_code in (301, 302, 303, 307):
assert 'bad.host' not in resp.headers['Location']
assert resp.headers['Location'] == '//'
else:
assert resp.status_code == 200
|
Add tests to demonstrate bad Host header problemimport pytest
import requests
class TestRedirect:
@pytest.mark.parametrize(
'path', ('/mesos_dns', '/net', '/exhibitor', '/mesos')
)
def test_redirect(self, master_ar_process, valid_user_header, path):
"""
URL's with no slash on end may redirect to the same URL with a
slash appended. If this redirection uses the Host header to write
the redirection, then it is susceptible to a client being tricked
into setting the Host header to a bad host, and then redirecting
the request (including an Authorization header) to the bad host.
"""
url = master_ar_process.make_url_from_path(path)
headers = valid_user_header.copy()
headers['Host'] = 'bad.host'
resp = requests.get(
url,
allow_redirects=False,
headers=headers
)
resp.raise_for_status()
if resp.status_code in (301, 302, 303, 307):
assert 'bad.host' not in resp.headers['Location']
assert resp.headers['Location'] == '//'
else:
assert resp.status_code == 200
|
<commit_before><commit_msg>Add tests to demonstrate bad Host header problem<commit_after>import pytest
import requests
class TestRedirect:
@pytest.mark.parametrize(
'path', ('/mesos_dns', '/net', '/exhibitor', '/mesos')
)
def test_redirect(self, master_ar_process, valid_user_header, path):
"""
URL's with no slash on end may redirect to the same URL with a
slash appended. If this redirection uses the Host header to write
the redirection, then it is susceptible to a client being tricked
into setting the Host header to a bad host, and then redirecting
the request (including an Authorization header) to the bad host.
"""
url = master_ar_process.make_url_from_path(path)
headers = valid_user_header.copy()
headers['Host'] = 'bad.host'
resp = requests.get(
url,
allow_redirects=False,
headers=headers
)
resp.raise_for_status()
if resp.status_code in (301, 302, 303, 307):
assert 'bad.host' not in resp.headers['Location']
assert resp.headers['Location'] == '//'
else:
assert resp.status_code == 200
|
|
f5a6b3f36e676bddadd0c9f04678989c8fca019a
|
test/unit/test_easy_click_lots.py
|
test/unit/test_easy_click_lots.py
|
import unittest
import sys
sys.path.append('../..') # TODO anybetter way?
from ebroker.EasyClickLots import EasyClickLots
class TestConfig(unittest.TestCase):
def setUp(self):
self.lots = EasyClickLots()
def test_cez_lots(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.lot, 50)
def test_rounding(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.round_lots(80), 50)
self.assertEqual(cez_lot.round_lots(120), 100)
if __name__ == '__main__':
unittest.main()
|
Add test for EC lots
|
Add test for EC lots
|
Python
|
mit
|
vjuranek/e-broker-client
|
Add test for EC lots
|
import unittest
import sys
sys.path.append('../..') # TODO anybetter way?
from ebroker.EasyClickLots import EasyClickLots
class TestConfig(unittest.TestCase):
def setUp(self):
self.lots = EasyClickLots()
def test_cez_lots(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.lot, 50)
def test_rounding(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.round_lots(80), 50)
self.assertEqual(cez_lot.round_lots(120), 100)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for EC lots<commit_after>
|
import unittest
import sys
sys.path.append('../..') # TODO anybetter way?
from ebroker.EasyClickLots import EasyClickLots
class TestConfig(unittest.TestCase):
def setUp(self):
self.lots = EasyClickLots()
def test_cez_lots(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.lot, 50)
def test_rounding(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.round_lots(80), 50)
self.assertEqual(cez_lot.round_lots(120), 100)
if __name__ == '__main__':
unittest.main()
|
Add test for EC lotsimport unittest
import sys
sys.path.append('../..') # TODO anybetter way?
from ebroker.EasyClickLots import EasyClickLots
class TestConfig(unittest.TestCase):
def setUp(self):
self.lots = EasyClickLots()
def test_cez_lots(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.lot, 50)
def test_rounding(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.round_lots(80), 50)
self.assertEqual(cez_lot.round_lots(120), 100)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for EC lots<commit_after>import unittest
import sys
sys.path.append('../..') # TODO anybetter way?
from ebroker.EasyClickLots import EasyClickLots
class TestConfig(unittest.TestCase):
def setUp(self):
self.lots = EasyClickLots()
def test_cez_lots(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.lot, 50)
def test_rounding(self):
cez_lot = self.lots.get_ec_lot(EasyClickLots.CEZ)
self.assertEqual(cez_lot.round_lots(80), 50)
self.assertEqual(cez_lot.round_lots(120), 100)
if __name__ == '__main__':
unittest.main()
|
|
a9a3d77c3969889dafaac5b6dd1c133f9510496f
|
eulathingy/thingys/migrations/0004_remove_thingysection_section_name.py
|
eulathingy/thingys/migrations/0004_remove_thingysection_section_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thingys', '0003_thingysentence'),
]
operations = [
migrations.RemoveField(
model_name='thingysection',
name='section_name',
),
]
|
Remove section name - migrate
|
Remove section name - migrate
|
Python
|
mit
|
DigitalMockingbird/EULAThingy,DigitalMockingbird/EULAThingy
|
Remove section name - migrate
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thingys', '0003_thingysentence'),
]
operations = [
migrations.RemoveField(
model_name='thingysection',
name='section_name',
),
]
|
<commit_before><commit_msg>Remove section name - migrate<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thingys', '0003_thingysentence'),
]
operations = [
migrations.RemoveField(
model_name='thingysection',
name='section_name',
),
]
|
Remove section name - migrate# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thingys', '0003_thingysentence'),
]
operations = [
migrations.RemoveField(
model_name='thingysection',
name='section_name',
),
]
|
<commit_before><commit_msg>Remove section name - migrate<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thingys', '0003_thingysentence'),
]
operations = [
migrations.RemoveField(
model_name='thingysection',
name='section_name',
),
]
|
|
3fdd2de650c24933ae32eb07b85ede34165702fc
|
py/maximum-length-of-repeated-subarray.py
|
py/maximum-length-of-repeated-subarray.py
|
class Solution(object):
def findLength(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
lA, lB = len(A), len(B)
if lA > lB:
lA, lB = lB, lA
A, B = B, A
ans = 0
prev = [0] * (lA + 1)
for b in B:
ary = [0] * (lA + 1)
for j, a in enumerate(A, 1):
if a == b:
ary[j] = prev[j - 1] + 1
ans = max(ans, ary[j])
prev = ary
return ans
|
Add py solution for 718. Maximum Length of Repeated Subarray
|
Add py solution for 718. Maximum Length of Repeated Subarray
718. Maximum Length of Repeated Subarray: https://leetcode.com/problems/maximum-length-of-repeated-subarray/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 718. Maximum Length of Repeated Subarray
718. Maximum Length of Repeated Subarray: https://leetcode.com/problems/maximum-length-of-repeated-subarray/
|
class Solution(object):
def findLength(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
lA, lB = len(A), len(B)
if lA > lB:
lA, lB = lB, lA
A, B = B, A
ans = 0
prev = [0] * (lA + 1)
for b in B:
ary = [0] * (lA + 1)
for j, a in enumerate(A, 1):
if a == b:
ary[j] = prev[j - 1] + 1
ans = max(ans, ary[j])
prev = ary
return ans
|
<commit_before><commit_msg>Add py solution for 718. Maximum Length of Repeated Subarray
718. Maximum Length of Repeated Subarray: https://leetcode.com/problems/maximum-length-of-repeated-subarray/<commit_after>
|
class Solution(object):
def findLength(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
lA, lB = len(A), len(B)
if lA > lB:
lA, lB = lB, lA
A, B = B, A
ans = 0
prev = [0] * (lA + 1)
for b in B:
ary = [0] * (lA + 1)
for j, a in enumerate(A, 1):
if a == b:
ary[j] = prev[j - 1] + 1
ans = max(ans, ary[j])
prev = ary
return ans
|
Add py solution for 718. Maximum Length of Repeated Subarray
718. Maximum Length of Repeated Subarray: https://leetcode.com/problems/maximum-length-of-repeated-subarray/class Solution(object):
def findLength(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
lA, lB = len(A), len(B)
if lA > lB:
lA, lB = lB, lA
A, B = B, A
ans = 0
prev = [0] * (lA + 1)
for b in B:
ary = [0] * (lA + 1)
for j, a in enumerate(A, 1):
if a == b:
ary[j] = prev[j - 1] + 1
ans = max(ans, ary[j])
prev = ary
return ans
|
<commit_before><commit_msg>Add py solution for 718. Maximum Length of Repeated Subarray
718. Maximum Length of Repeated Subarray: https://leetcode.com/problems/maximum-length-of-repeated-subarray/<commit_after>class Solution(object):
def findLength(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
lA, lB = len(A), len(B)
if lA > lB:
lA, lB = lB, lA
A, B = B, A
ans = 0
prev = [0] * (lA + 1)
for b in B:
ary = [0] * (lA + 1)
for j, a in enumerate(A, 1):
if a == b:
ary[j] = prev[j - 1] + 1
ans = max(ans, ary[j])
prev = ary
return ans
|
|
94b7a913ca0d9ce5d4c58539954afef55d21c3a7
|
components/dash-core-components/tests/integration/dropdown/test_remove_option.py
|
components/dash-core-components/tests/integration/dropdown/test_remove_option.py
|
import json
from dash import Dash, html, dcc, Output, Input
from dash.exceptions import PreventUpdate
sample_dropdown_options = [
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
]
def test_ddro001_remove_option_single(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = 'SF'
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
if not val:
raise PreventUpdate
return val or 'None'
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', 'None')
def test_ddro002_remove_option_multi(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = ['MTL', 'SF']
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
multi=True,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
return json.dumps(val)
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', '["MTL"]')
|
Add test dropdown remove options.
|
Add test dropdown remove options.
|
Python
|
mit
|
plotly/dash,plotly/dash,plotly/dash,plotly/dash,plotly/dash
|
Add test dropdown remove options.
|
import json
from dash import Dash, html, dcc, Output, Input
from dash.exceptions import PreventUpdate
sample_dropdown_options = [
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
]
def test_ddro001_remove_option_single(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = 'SF'
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
if not val:
raise PreventUpdate
return val or 'None'
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', 'None')
def test_ddro002_remove_option_multi(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = ['MTL', 'SF']
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
multi=True,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
return json.dumps(val)
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', '["MTL"]')
|
<commit_before><commit_msg>Add test dropdown remove options.<commit_after>
|
import json
from dash import Dash, html, dcc, Output, Input
from dash.exceptions import PreventUpdate
sample_dropdown_options = [
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
]
def test_ddro001_remove_option_single(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = 'SF'
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
if not val:
raise PreventUpdate
return val or 'None'
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', 'None')
def test_ddro002_remove_option_multi(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = ['MTL', 'SF']
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
multi=True,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
return json.dumps(val)
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', '["MTL"]')
|
Add test dropdown remove options.import json
from dash import Dash, html, dcc, Output, Input
from dash.exceptions import PreventUpdate
sample_dropdown_options = [
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
]
def test_ddro001_remove_option_single(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = 'SF'
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
if not val:
raise PreventUpdate
return val or 'None'
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', 'None')
def test_ddro002_remove_option_multi(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = ['MTL', 'SF']
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
multi=True,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
return json.dumps(val)
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', '["MTL"]')
|
<commit_before><commit_msg>Add test dropdown remove options.<commit_after>import json
from dash import Dash, html, dcc, Output, Input
from dash.exceptions import PreventUpdate
sample_dropdown_options = [
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
]
def test_ddro001_remove_option_single(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = 'SF'
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
if not val:
raise PreventUpdate
return val or 'None'
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', 'None')
def test_ddro002_remove_option_multi(dash_dcc):
dropdown_options = sample_dropdown_options
app = Dash(__name__)
value = ['MTL', 'SF']
app.layout = html.Div([
dcc.Dropdown(
options=dropdown_options,
value=value,
multi=True,
id='dropdown',
),
html.Button('Remove option', id='remove'),
html.Div(id='value-output')
])
@app.callback(
Output('dropdown', 'options'),
[Input('remove', 'n_clicks')]
)
def on_click(n_clicks):
if not n_clicks:
raise PreventUpdate
return sample_dropdown_options[:-1]
@app.callback(
Output('value-output', 'children'),
[Input('dropdown', 'value')]
)
def on_change(val):
return json.dumps(val)
dash_dcc.start_server(app)
btn = dash_dcc.wait_for_element('#remove')
btn.click()
dash_dcc.wait_for_text_to_equal('#value-output', '["MTL"]')
|
|
8a72eff36b66492e17cfeb0383164a34dbf75ce0
|
addons/purchase/report/__init__.py
|
addons/purchase/report/__init__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import request_quotation
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Fix useless import following the removal of rml purchase reports
|
[FIX] Fix useless import following the removal of rml purchase reports
bzr revid: openerp-sle@openerp-sle.home-20140214150700-2zuukk4ahs4q1zhs
|
Python
|
agpl-3.0
|
odoousers2014/odoo,nuncjo/odoo,markeTIC/OCB,oasiswork/odoo,andreparames/odoo,nexiles/odoo,Endika/odoo,Danisan/odoo-1,spadae22/odoo,0k/OpenUpgrade,grap/OpenUpgrade,bealdav/OpenUpgrade,abstract-open-solutions/OCB,mustafat/odoo-1,papouso/odoo,provaleks/o8,OpusVL/odoo,odooindia/odoo,sinbazhou/odoo,slevenhagen/odoo-npg,rdeheele/odoo,rahuldhote/odoo,zchking/odoo,MarcosCommunity/odoo,draugiskisprendimai/odoo,rubencabrera/odoo,alhashash/odoo,ramitalat/odoo,matrixise/odoo,slevenhagen/odoo,lsinfo/odoo,xujb/odoo,rubencabrera/odoo,eino-makitalo/odoo,bplancher/odoo,dkubiak789/odoo,blaggacao/OpenUpgrade,patmcb/odoo,TRESCLOUD/odoopub,gavin-feng/odoo,windedge/odoo,javierTerry/odoo,numerigraphe/odoo,ihsanudin/odoo,goliveirab/odoo,havt/odoo,lgscofield/odoo,gorjuce/odoo,kifcaliph/odoo,lsinfo/odoo,nuuuboo/odoo,syci/OCB,Ernesto99/odoo,bakhtout/odoo-educ,n0m4dz/odoo,sebalix/OpenUpgrade,Gitlab11/odoo,Endika/odoo,pplatek/odoo,alqfahad/odoo,hifly/OpenUpgrade,TRESCLOUD/odoopub,jaxkodex/odoo,joshuajan/odoo,datenbetrieb/odoo,bealdav/OpenUpgrade,dgzurita/odoo,havt/odoo,salaria/odoo,cysnake4713/odoo,juanalfonsopr/odoo,NL66278/OCB,BT-ojossen/odoo,guerrerocarlos/odoo,mkieszek/odoo,hip-odoo/odoo,mustafat/odoo-1,VielSoft/odoo,jolevq/odoopub,sysadminmatmoz/OCB,synconics/odoo,xujb/odoo,erkrishna9/odoo,BT-ojossen/odoo,windedge/odoo,tvibliani/odoo,mustafat/odoo-1,glovebx/odoo,luiseduardohdbackup/odoo,javierTerry/odoo,x111ong/odoo,TRESCLOUD/odoopub,elmerdpadilla/iv,fuhongliang/odoo,Eric-Zhong/odoo,ChanduERP/odoo,christophlsa/odoo,gvb/odoo,abenzbiria/clients_odoo,incaser/odoo-odoo,nexiles/odoo,x111ong/odoo,sergio-incaser/odoo,AuyaJackie/odoo,OpenUpgrade-dev/OpenUpgrade,rubencabrera/odoo,nhomar/odoo,RafaelTorrealba/odoo,addition-it-solutions/project-all,waytai/odoo,janocat/odoo,jiangzhixiao/odoo,ThinkOpen-Solutions/odoo,ApuliaSoftware/odoo,bkirui/odoo,bakhtout/odoo-educ,pedrobaeza/odoo,ygol/odoo,doomsterinc/odoo,kifcaliph/odoo,tvtsoft/odoo8,guerrerocarlos/odoo,goliveirab/odoo,lsinfo/odoo,fjbatresv/odoo,thanhacun/odoo,rowemoore/odoo,luiseduardohdbackup/odoo,pedrobaeza/OpenUpgrade,fjbatresv/odoo,ApuliaSoftware/odoo,cedk/odoo,QianBIG/odoo,tinkhaven-organization/odoo,klunwebale/odoo,luiseduardohdbackup/odoo,joshuajan/odoo,factorlibre/OCB,BT-rmartin/odoo,tarzan0820/odoo,abenzbiria/clients_odoo,makinacorpus/odoo,pplatek/odoo,tarzan0820/odoo,microcom/odoo,hip-odoo/odoo,naousse/odoo,alhashash/odoo,Noviat/odoo,inspyration/odoo,apanju/odoo,damdam-s/OpenUpgrade,fgesora/odoo,abdellatifkarroum/odoo,fgesora/odoo,Endika/odoo,windedge/odoo,dalegregory/odoo,GauravSahu/odoo,lsinfo/odoo,sysadminmatmoz/OCB,apanju/GMIO_Odoo,fuselock/odoo,bwrsandman/OpenUpgrade,JCA-Developpement/Odoo,xzYue/odoo,savoirfairelinux/odoo,Maspear/odoo,leorochael/odoo,MarcosCommunity/odoo,JGarcia-Panach/odoo,guewen/OpenUpgrade,factorlibre/OCB,PongPi/isl-odoo,shingonoide/odoo,abstract-open-solutions/OCB,abenzbiria/clients_odoo,Ernesto99/odoo,dgzurita/odoo,osvalr/odoo,rdeheele/odoo,pplatek/odoo,aviciimaxwell/odoo,alqfahad/odoo,cloud9UG/odoo,ingadhoc/odoo,fdvarela/odoo8,slevenhagen/odoo-npg,bwrsandman/OpenUpgrade,Antiun/odoo,apanju/GMIO_Odoo,Eric-Zhong/odoo,xzYue/odoo,n0m4dz/odoo,oihane/odoo,lightcn/odoo,javierTerry/odoo,NeovaHealth/odoo,luiseduardohdbackup/odoo,microcom/odoo,odoo-turkiye/odoo,storm-computers/odoo,colinnewell/odoo,shaufi/odoo,sadleader/odoo,dfang/odoo,kybriainfotech/iSocioCRM,nexiles/odoo,agrista/odoo-saas,ovnicraft/odoo,virgree/odoo,kittiu/odoo,csrocha/OpenUpgrade,andreparames/odoo,jaxkodex/odoo,oasiswork/odoo,fuselock/odoo,blaggacao/OpenUpgrade,syci/OCB,gsmartway/odoo,florentx/OpenUpgrade,Ernesto99/odoo,dkubiak789/odoo,charbeljc/OCB,xujb/odoo,gvb/odoo,Nowheresly/odoo,BT-rmartin/odoo,apocalypsebg/odoo,kybriainfotech/iSocioCRM,kirca/OpenUpgrade,gorjuce/odoo,tangyiyong/odoo,erkrishna9/odoo,apocalypsebg/odoo,waytai/odoo,inspyration/odoo,makinacorpus/odoo,bguillot/OpenUpgrade,xzYue/odoo,alqfahad/odoo,arthru/OpenUpgrade,leoliujie/odoo,leorochael/odoo,bobisme/odoo,kirca/OpenUpgrade,incaser/odoo-odoo,draugiskisprendimai/odoo,grap/OpenUpgrade,rgeleta/odoo,jfpla/odoo,makinacorpus/odoo,mlaitinen/odoo,Bachaco-ve/odoo,gavin-feng/odoo,gorjuce/odoo,ramadhane/odoo,brijeshkesariya/odoo,bakhtout/odoo-educ,spadae22/odoo,luistorresm/odoo,goliveirab/odoo,wangjun/odoo,ubic135/odoo-design,JCA-Developpement/Odoo,realsaiko/odoo,tangyiyong/odoo,ShineFan/odoo,x111ong/odoo,bobisme/odoo,ThinkOpen-Solutions/odoo,dsfsdgsbngfggb/odoo,joariasl/odoo,datenbetrieb/odoo,VielSoft/odoo,luistorresm/odoo,n0m4dz/odoo,highco-groupe/odoo,gorjuce/odoo,ShineFan/odoo,synconics/odoo,thanhacun/odoo,nhomar/odoo-mirror,agrista/odoo-saas,sergio-incaser/odoo,nhomar/odoo-mirror,Danisan/odoo-1,leoliujie/odoo,nhomar/odoo,guerrerocarlos/odoo,colinnewell/odoo,addition-it-solutions/project-all,erkrishna9/odoo,hanicker/odoo,tvibliani/odoo,Gitlab11/odoo,sve-odoo/odoo,nhomar/odoo,JonathanStein/odoo,ecosoft-odoo/odoo,dkubiak789/odoo,Noviat/odoo,doomsterinc/odoo,juanalfonsopr/odoo,jusdng/odoo,ygol/odoo,tangyiyong/odoo,guewen/OpenUpgrade,fevxie/odoo,diagramsoftware/odoo,n0m4dz/odoo,laslabs/odoo,sebalix/OpenUpgrade,osvalr/odoo,colinnewell/odoo,Noviat/odoo,addition-it-solutions/project-all,numerigraphe/odoo,sysadminmatmoz/OCB,factorlibre/OCB,acshan/odoo,rubencabrera/odoo,pedrobaeza/OpenUpgrade,bobisme/odoo,pedrobaeza/OpenUpgrade,cdrooom/odoo,shivam1111/odoo,JGarcia-Panach/odoo,grap/OpenUpgrade,bguillot/OpenUpgrade,sve-odoo/odoo,dllsf/odootest,prospwro/odoo,havt/odoo,collex100/odoo,Maspear/odoo,mustafat/odoo-1,ThinkOpen-Solutions/odoo,oasiswork/odoo,bkirui/odoo,xzYue/odoo,dezynetechnologies/odoo,dfang/odoo,fgesora/odoo,brijeshkesariya/odoo,tinkhaven-organization/odoo,florentx/OpenUpgrade,Antiun/odoo,ovnicraft/odoo,sv-dev1/odoo,lombritz/odoo,alexteodor/odoo,poljeff/odoo,ShineFan/odoo,tarzan0820/odoo,havt/odoo,makinacorpus/odoo,damdam-s/OpenUpgrade,dsfsdgsbngfggb/odoo,zchking/odoo,cloud9UG/odoo,xujb/odoo,mmbtba/odoo,eino-makitalo/odoo,oliverhr/odoo,poljeff/odoo,dkubiak789/odoo,funkring/fdoo,osvalr/odoo,oliverhr/odoo,guewen/OpenUpgrade,mlaitinen/odoo,joariasl/odoo,microcom/odoo,ihsanudin/odoo,javierTerry/odoo,0k/OpenUpgrade,abenzbiria/clients_odoo,kybriainfotech/iSocioCRM,dariemp/odoo,fuselock/odoo,bplancher/odoo,hanicker/odoo,BT-ojossen/odoo,SAM-IT-SA/odoo,Elico-Corp/odoo_OCB,ovnicraft/odoo,nexiles/odoo,rahuldhote/odoo,shivam1111/odoo,mszewczy/odoo,colinnewell/odoo,ccomb/OpenUpgrade,ramadhane/odoo,ojengwa/odoo,cysnake4713/odoo,ingadhoc/odoo,QianBIG/odoo,KontorConsulting/odoo,collex100/odoo,slevenhagen/odoo,realsaiko/odoo,pedrobaeza/OpenUpgrade,odootr/odoo,lombritz/odoo,joshuajan/odoo,dezynetechnologies/odoo,hoatle/odoo,nitinitprof/odoo,windedge/odoo,gvb/odoo,srsman/odoo,bwrsandman/OpenUpgrade,NeovaHealth/odoo,OpenUpgrade/OpenUpgrade,shivam1111/odoo,lsinfo/odoo,ApuliaSoftware/odoo,lgscofield/odoo,stonegithubs/odoo,shaufi10/odoo,javierTerry/odoo,florentx/OpenUpgrade,tangyiyong/odoo,stephen144/odoo,Bachaco-ve/odoo,Nick-OpusVL/odoo,elmerdpadilla/iv,PongPi/isl-odoo,alqfahad/odoo,ehirt/odoo,apanju/odoo,guerrerocarlos/odoo,shaufi/odoo,guewen/OpenUpgrade,CopeX/odoo,dllsf/odootest,highco-groupe/odoo,dsfsdgsbngfggb/odoo,Maspear/odoo,spadae22/odoo,damdam-s/OpenUpgrade,FlorianLudwig/odoo,gsmartway/odoo,NeovaHealth/odoo,incaser/odoo-odoo,dezynetechnologies/odoo,JCA-Developpement/Odoo,srsman/odoo,bealdav/OpenUpgrade,SerpentCS/odoo,papouso/odoo,elmerdpadilla/iv,naousse/odoo,eino-makitalo/odoo,srsman/odoo,Maspear/odoo,Noviat/odoo,Adel-Magebinary/odoo,abdellatifkarroum/odoo,Daniel-CA/odoo,draugiskisprendimai/odoo,eino-makitalo/odoo,jaxkodex/odoo,chiragjogi/odoo,FlorianLudwig/odoo,markeTIC/OCB,rubencabrera/odoo,0k/OpenUpgrade,dsfsdgsbngfggb/odoo,deKupini/erp,makinacorpus/odoo,CopeX/odoo,stonegithubs/odoo,kirca/OpenUpgrade,zchking/odoo,jiachenning/odoo,ujjwalwahi/odoo,stephen144/odoo,tvibliani/odoo,oliverhr/odoo,mszewczy/odoo,gavin-feng/odoo,diagramsoftware/odoo,omprakasha/odoo,Gitlab11/odoo,jusdng/odoo,fdvarela/odoo8,doomsterinc/odoo,AuyaJackie/odoo,n0m4dz/odoo,abstract-open-solutions/OCB,ThinkOpen-Solutions/odoo,takis/odoo,Codefans-fan/odoo,sv-dev1/odoo,prospwro/odoo,ChanduERP/odoo,shingonoide/odoo,chiragjogi/odoo,SerpentCS/odoo,xujb/odoo,agrista/odoo-saas,simongoffin/website_version,mkieszek/odoo,klunwebale/odoo,tangyiyong/odoo,cedk/odoo,fjbatresv/odoo,Maspear/odoo,odoousers2014/odoo,abenzbiria/clients_odoo,CopeX/odoo,QianBIG/odoo,realsaiko/odoo,massot/odoo,mustafat/odoo-1,Codefans-fan/odoo,glovebx/odoo,jfpla/odoo,klunwebale/odoo,sergio-incaser/odoo,doomsterinc/odoo,elmerdpadilla/iv,oasiswork/odoo,Danisan/odoo-1,BT-rmartin/odoo,alexteodor/odoo,sv-dev1/odoo,stephen144/odoo,pedrobaeza/odoo,shaufi/odoo,mlaitinen/odoo,rgeleta/odoo,ingadhoc/odoo,idncom/odoo,virgree/odoo,incaser/odoo-odoo,Adel-Magebinary/odoo,nuuuboo/odoo,stonegithubs/odoo,savoirfairelinux/odoo,joshuajan/odoo,bealdav/OpenUpgrade,tvibliani/odoo,shivam1111/odoo,Daniel-CA/odoo,papouso/odoo,fossoult/odoo,blaggacao/OpenUpgrade,jiangzhixiao/odoo,avoinsystems/odoo,fuselock/odoo,funkring/fdoo,stephen144/odoo,demon-ru/iml-crm,odooindia/odoo,jpshort/odoo,odootr/odoo,fuhongliang/odoo,OpusVL/odoo,gavin-feng/odoo,kybriainfotech/iSocioCRM,sadleader/odoo,vnsofthe/odoo,provaleks/o8,JGarcia-Panach/odoo,BT-ojossen/odoo,optima-ict/odoo,Kilhog/odoo,jeasoft/odoo,ubic135/odoo-design,fjbatresv/odoo,Adel-Magebinary/odoo,FlorianLudwig/odoo,Ichag/odoo,Ernesto99/odoo,dllsf/odootest,hbrunn/OpenUpgrade,provaleks/o8,erkrishna9/odoo,leorochael/odoo,OpenUpgrade-dev/OpenUpgrade,lgscofield/odoo,bobisme/odoo,Drooids/odoo,jiangzhixiao/odoo,GauravSahu/odoo,OpenUpgrade-dev/OpenUpgrade,KontorConsulting/odoo,aviciimaxwell/odoo,OpenUpgrade/OpenUpgrade,xzYue/odoo,cpyou/odoo,rahuldhote/odoo,inspyration/odoo,hifly/OpenUpgrade,jaxkodex/odoo,csrocha/OpenUpgrade,mustafat/odoo-1,xzYue/odoo,patmcb/odoo,0k/odoo,Drooids/odoo,alqfahad/odoo,shaufi/odoo,goliveirab/odoo,Bachaco-ve/odoo,alexteodor/odoo,synconics/odoo,guewen/OpenUpgrade,Kilhog/odoo,ThinkOpen-Solutions/odoo,luistorresm/odoo,eino-makitalo/odoo,tarzan0820/odoo,sinbazhou/odoo,OpenUpgrade-dev/OpenUpgrade,draugiskisprendimai/odoo,markeTIC/OCB,ojengwa/odoo,odooindia/odoo,CubicERP/odoo,dgzurita/odoo,patmcb/odoo,odoo-turkiye/odoo,ecosoft-odoo/odoo,ApuliaSoftware/odoo,minhtuancn/odoo,sinbazhou/odoo,PongPi/isl-odoo,takis/odoo,stephen144/odoo,bealdav/OpenUpgrade,hanicker/odoo,KontorConsulting/odoo,mvaled/OpenUpgrade,fgesora/odoo,SAM-IT-SA/odoo,shingonoide/odoo,fjbatresv/odoo,vnsofthe/odoo,brijeshkesariya/odoo,bkirui/odoo,feroda/odoo,ujjwalwahi/odoo,stonegithubs/odoo,mvaled/OpenUpgrade,SerpentCS/odoo,hopeall/odoo,klunwebale/odoo,hoatle/odoo,JonathanStein/odoo,gvb/odoo,Gitlab11/odoo,savoirfairelinux/odoo,dalegregory/odoo,funkring/fdoo,fevxie/odoo,hopeall/odoo,OpenUpgrade/OpenUpgrade,tarzan0820/odoo,fevxie/odoo,alhashash/odoo,odoousers2014/odoo,provaleks/o8,jiangzhixiao/odoo,BT-rmartin/odoo,apanju/GMIO_Odoo,tvtsoft/odoo8,kittiu/odoo,colinnewell/odoo,oliverhr/odoo,Codefans-fan/odoo,CubicERP/odoo,charbeljc/OCB,Nowheresly/odoo,dgzurita/odoo,jaxkodex/odoo,shingonoide/odoo,lgscofield/odoo,apanju/GMIO_Odoo,cpyou/odoo,hanicker/odoo,havt/odoo,omprakasha/odoo,kybriainfotech/iSocioCRM,spadae22/odoo,takis/odoo,hoatle/odoo,chiragjogi/odoo,funkring/fdoo,bwrsandman/OpenUpgrade,QianBIG/odoo,damdam-s/OpenUpgrade,RafaelTorrealba/odoo,feroda/odoo,nitinitprof/odoo,ramitalat/odoo,oihane/odoo,glovebx/odoo,hassoon3/odoo,demon-ru/iml-crm,Daniel-CA/odoo,SAM-IT-SA/odoo,bakhtout/odoo-educ,takis/odoo,odooindia/odoo,QianBIG/odoo,jaxkodex/odoo,avoinsystems/odoo,mustafat/odoo-1,JonathanStein/odoo,sebalix/OpenUpgrade,nhomar/odoo,CatsAndDogsbvba/odoo,virgree/odoo,gorjuce/odoo,hoatle/odoo,Ichag/odoo,microcom/odoo,OpenUpgrade-dev/OpenUpgrade,csrocha/OpenUpgrade,florian-dacosta/OpenUpgrade,jeasoft/odoo,AuyaJackie/odoo,BT-fgarbely/odoo,bplancher/odoo,hopeall/odoo,naousse/odoo,factorlibre/OCB,mvaled/OpenUpgrade,charbeljc/OCB,cedk/odoo,pedrobaeza/odoo,cysnake4713/odoo,Nick-OpusVL/odoo,sinbazhou/odoo,fossoult/odoo,jesramirez/odoo,shaufi10/odoo,savoirfairelinux/odoo,KontorConsulting/odoo,rahuldhote/odoo,dsfsdgsbngfggb/odoo,ApuliaSoftware/odoo,ingadhoc/odoo,JCA-Developpement/Odoo,brijeshkesariya/odoo,JGarcia-Panach/odoo,Endika/OpenUpgrade,joariasl/odoo,datenbetrieb/odoo,Endika/OpenUpgrade,apocalypsebg/odoo,goliveirab/odoo,fuselock/odoo,bobisme/odoo,tinkerthaler/odoo,SAM-IT-SA/odoo,eino-makitalo/odoo,optima-ict/odoo,tvtsoft/odoo8,lombritz/odoo,Endika/odoo,laslabs/odoo,joshuajan/odoo,rgeleta/odoo,windedge/odoo,hoatle/odoo,cloud9UG/odoo,JGarcia-Panach/odoo,charbeljc/OCB,shivam1111/odoo,spadae22/odoo,bplancher/odoo,chiragjogi/odoo,vnsofthe/odoo,wangjun/odoo,jusdng/odoo,idncom/odoo,RafaelTorrealba/odoo,apanju/odoo,mszewczy/odoo,synconics/odoo,osvalr/odoo,Codefans-fan/odoo,datenbetrieb/odoo,Noviat/odoo,bkirui/odoo,BT-fgarbely/odoo,hubsaysnuaa/odoo,AuyaJackie/odoo,fossoult/odoo,gavin-feng/odoo,tinkerthaler/odoo,fuhongliang/odoo,rowemoore/odoo,pedrobaeza/odoo,hbrunn/OpenUpgrade,nuncjo/odoo,nuncjo/odoo,nagyistoce/odoo-dev-odoo,oliverhr/odoo,christophlsa/odoo,acshan/odoo,nitinitprof/odoo,matrixise/odoo,Elico-Corp/odoo_OCB,bguillot/OpenUpgrade,ThinkOpen-Solutions/odoo,bwrsandman/OpenUpgrade,ccomb/OpenUpgrade,addition-it-solutions/project-all,RafaelTorrealba/odoo,jiachenning/odoo,ramadhane/odoo,Drooids/odoo,ygol/odoo,jiangzhixiao/odoo,andreparames/odoo,GauravSahu/odoo,Kilhog/odoo,cysnake4713/odoo,hopeall/odoo,tinkerthaler/odoo,alqfahad/odoo,nagyistoce/odoo-dev-odoo,prospwro/odoo,JCA-Developpement/Odoo,laslabs/odoo,alhashash/odoo,stephen144/odoo,hubsaysnuaa/odoo,pplatek/odoo,leoliujie/odoo,blaggacao/OpenUpgrade,ramitalat/odoo,cedk/odoo,gsmartway/odoo,Danisan/odoo-1,factorlibre/OCB,KontorConsulting/odoo,hoatle/odoo,shivam1111/odoo,Endika/odoo,prospwro/odoo,eino-makitalo/odoo,Bachaco-ve/odoo,simongoffin/website_version,JonathanStein/odoo,Danisan/odoo-1,Eric-Zhong/odoo,funkring/fdoo,mmbtba/odoo,odoousers2014/odoo,hassoon3/odoo,hip-odoo/odoo,abstract-open-solutions/OCB,nexiles/odoo,bkirui/odoo,slevenhagen/odoo,nitinitprof/odoo,hbrunn/OpenUpgrade,arthru/OpenUpgrade,bobisme/odoo,cdrooom/odoo,ShineFan/odoo,CopeX/odoo,oasiswork/odoo,jesramirez/odoo,tvibliani/odoo,incaser/odoo-odoo,ehirt/odoo,fgesora/odoo,alexcuellar/odoo,nhomar/odoo,apanju/odoo,BT-astauder/odoo,kifcaliph/odoo,laslabs/odoo,optima-ict/odoo,tvibliani/odoo,brijeshkesariya/odoo,luistorresm/odoo,csrocha/OpenUpgrade,dalegregory/odoo,numerigraphe/odoo,rahuldhote/odoo,rgeleta/odoo,oihane/odoo,Daniel-CA/odoo,n0m4dz/odoo,papouso/odoo,ojengwa/odoo,slevenhagen/odoo-npg,chiragjogi/odoo,florentx/OpenUpgrade,BT-fgarbely/odoo,steedos/odoo,colinnewell/odoo,waytai/odoo,sv-dev1/odoo,nexiles/odoo,mlaitinen/odoo,sergio-incaser/odoo,SerpentCS/odoo,realsaiko/odoo,feroda/odoo,hip-odoo/odoo,waytai/odoo,Gitlab11/odoo,odootr/odoo,havt/odoo,Nowheresly/odoo,odootr/odoo,nuuuboo/odoo,shaufi10/odoo,ramitalat/odoo,jusdng/odoo,hubsaysnuaa/odoo,Bachaco-ve/odoo,mszewczy/odoo,avoinsystems/odoo,gavin-feng/odoo,ramitalat/odoo,hanicker/odoo,acshan/odoo,abstract-open-solutions/OCB,mlaitinen/odoo,gvb/odoo,arthru/OpenUpgrade,janocat/odoo,ChanduERP/odoo,rowemoore/odoo,ClearCorp-dev/odoo,fdvarela/odoo8,BT-ojossen/odoo,fossoult/odoo,OpusVL/odoo,fgesora/odoo,jusdng/odoo,charbeljc/OCB,nagyistoce/odoo-dev-odoo,JGarcia-Panach/odoo,slevenhagen/odoo,jiachenning/odoo,shingonoide/odoo,alexcuellar/odoo,diagramsoftware/odoo,idncom/odoo,vnsofthe/odoo,sysadminmatmoz/OCB,steedos/odoo,odoo-turkiye/odoo,Grirrane/odoo,poljeff/odoo,hassoon3/odoo,poljeff/odoo,mkieszek/odoo,rowemoore/odoo,hassoon3/odoo,cedk/odoo,sv-dev1/odoo,ClearCorp-dev/odoo,javierTerry/odoo,slevenhagen/odoo-npg,aviciimaxwell/odoo,odoo-turkiye/odoo,juanalfonsopr/odoo,odoo-turkiye/odoo,windedge/odoo,hassoon3/odoo,fuhongliang/odoo,ecosoft-odoo/odoo,leoliujie/odoo,datenbetrieb/odoo,odootr/odoo,mszewczy/odoo,sinbazhou/odoo,patmcb/odoo,vnsofthe/odoo,ecosoft-odoo/odoo,ClearCorp-dev/odoo,naousse/odoo,savoirfairelinux/OpenUpgrade,hifly/OpenUpgrade,VielSoft/odoo,srimai/odoo,savoirfairelinux/OpenUpgrade,mlaitinen/odoo,collex100/odoo,collex100/odoo,chiragjogi/odoo,hoatle/odoo,osvalr/odoo,christophlsa/odoo,hubsaysnuaa/odoo,minhtuancn/odoo,diagramsoftware/odoo,Grirrane/odoo,dezynetechnologies/odoo,leoliujie/odoo,leorochael/odoo,minhtuancn/odoo,papouso/odoo,CatsAndDogsbvba/odoo,makinacorpus/odoo,salaria/odoo,Codefans-fan/odoo,tinkerthaler/odoo,andreparames/odoo,pedrobaeza/odoo,GauravSahu/odoo,mszewczy/odoo,ThinkOpen-Solutions/odoo,ClearCorp-dev/odoo,jesramirez/odoo,deKupini/erp,alexcuellar/odoo,MarcosCommunity/odoo,sysadminmatmoz/OCB,draugiskisprendimai/odoo,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,ccomb/OpenUpgrade,ihsanudin/odoo,x111ong/odoo,factorlibre/OCB,chiragjogi/odoo,dgzurita/odoo,Elico-Corp/odoo_OCB,VielSoft/odoo,NL66278/OCB,mvaled/OpenUpgrade,srimai/odoo,avoinsystems/odoo,Noviat/odoo,syci/OCB,wangjun/odoo,fossoult/odoo,fevxie/odoo,ojengwa/odoo,christophlsa/odoo,VielSoft/odoo,abstract-open-solutions/OCB,Ichag/odoo,inspyration/odoo,leoliujie/odoo,Grirrane/odoo,osvalr/odoo,slevenhagen/odoo,dalegregory/odoo,Elico-Corp/odoo_OCB,Nowheresly/odoo,ihsanudin/odoo,ygol/odoo,nhomar/odoo,MarcosCommunity/odoo,jiachenning/odoo,synconics/odoo,simongoffin/website_version,x111ong/odoo,janocat/odoo,abdellatifkarroum/odoo,jeasoft/odoo,oihane/odoo,fevxie/odoo,glovebx/odoo,grap/OpenUpgrade,tvtsoft/odoo8,jeasoft/odoo,0k/odoo,minhtuancn/odoo,jpshort/odoo,feroda/odoo,CatsAndDogsbvba/odoo,hassoon3/odoo,nuncjo/odoo,guerrerocarlos/odoo,avoinsystems/odoo,osvalr/odoo,jeasoft/odoo,deKupini/erp,wangjun/odoo,0k/odoo,takis/odoo,collex100/odoo,nuuuboo/odoo,dgzurita/odoo,omprakasha/odoo,jeasoft/odoo,feroda/odoo,Bachaco-ve/odoo,jusdng/odoo,tarzan0820/odoo,provaleks/o8,apanju/odoo,joariasl/odoo,ChanduERP/odoo,leorochael/odoo,jaxkodex/odoo,cpyou/odoo,acshan/odoo,sadleader/odoo,dariemp/odoo,BT-fgarbely/odoo,Drooids/odoo,lombritz/odoo,simongoffin/website_version,gavin-feng/odoo,Eric-Zhong/odoo,brijeshkesariya/odoo,ShineFan/odoo,dalegregory/odoo,Danisan/odoo-1,bwrsandman/OpenUpgrade,andreparames/odoo,jusdng/odoo,hmen89/odoo,rgeleta/odoo,vnsofthe/odoo,blaggacao/OpenUpgrade,fuhongliang/odoo,apanju/GMIO_Odoo,tinkerthaler/odoo,juanalfonsopr/odoo,fuselock/odoo,spadae22/odoo,hifly/OpenUpgrade,glovebx/odoo,abdellatifkarroum/odoo,storm-computers/odoo,avoinsystems/odoo,wangjun/odoo,elmerdpadilla/iv,0k/odoo,jfpla/odoo,ramadhane/odoo,tinkhaven-organization/odoo,lombritz/odoo,hbrunn/OpenUpgrade,charbeljc/OCB,sadleader/odoo,omprakasha/odoo,savoirfairelinux/OpenUpgrade,shaufi10/odoo,steedos/odoo,lsinfo/odoo,ygol/odoo,rubencabrera/odoo,Antiun/odoo,BT-fgarbely/odoo,matrixise/odoo,pedrobaeza/OpenUpgrade,lightcn/odoo,SAM-IT-SA/odoo,microcom/odoo,sebalix/OpenUpgrade,KontorConsulting/odoo,cloud9UG/odoo,Nick-OpusVL/odoo,kittiu/odoo,sysadminmatmoz/OCB,massot/odoo,kybriainfotech/iSocioCRM,numerigraphe/odoo,syci/OCB,mkieszek/odoo,cdrooom/odoo,luiseduardohdbackup/odoo,CopeX/odoo,GauravSahu/odoo,apanju/GMIO_Odoo,salaria/odoo,sve-odoo/odoo,hubsaysnuaa/odoo,ihsanudin/odoo,laslabs/odoo,ojengwa/odoo,deKupini/erp,idncom/odoo,srimai/odoo,lgscofield/odoo,srimai/odoo,OpenUpgrade/OpenUpgrade,QianBIG/odoo,GauravSahu/odoo,klunwebale/odoo,tinkhaven-organization/odoo,fuselock/odoo,Bachaco-ve/odoo,n0m4dz/odoo,BT-fgarbely/odoo,tangyiyong/odoo,pedrobaeza/OpenUpgrade,Daniel-CA/odoo,acshan/odoo,laslabs/odoo,nagyistoce/odoo-dev-odoo,AuyaJackie/odoo,alexteodor/odoo,apanju/odoo,odooindia/odoo,xujb/odoo,zchking/odoo,dezynetechnologies/odoo,naousse/odoo,aviciimaxwell/odoo,steedos/odoo,ehirt/odoo,NL66278/OCB,nhomar/odoo-mirror,kittiu/odoo,cpyou/odoo,glovebx/odoo,virgree/odoo,ojengwa/odoo,ecosoft-odoo/odoo,Kilhog/odoo,OpenUpgrade/OpenUpgrade,fgesora/odoo,joariasl/odoo,goliveirab/odoo,salaria/odoo,Adel-Magebinary/odoo,AuyaJackie/odoo,ovnicraft/odoo,ramadhane/odoo,thanhacun/odoo,RafaelTorrealba/odoo,JGarcia-Panach/odoo,patmcb/odoo,thanhacun/odoo,prospwro/odoo,dariemp/odoo,oliverhr/odoo,kifcaliph/odoo,erkrishna9/odoo,Nick-OpusVL/odoo,cloud9UG/odoo,jiachenning/odoo,BT-astauder/odoo,collex100/odoo,matrixise/odoo,abdellatifkarroum/odoo,blaggacao/OpenUpgrade,virgree/odoo,BT-ojossen/odoo,CubicERP/odoo,Daniel-CA/odoo,leorochael/odoo,Ichag/odoo,sve-odoo/odoo,christophlsa/odoo,gsmartway/odoo,andreparames/odoo,optima-ict/odoo,ApuliaSoftware/odoo,savoirfairelinux/odoo,TRESCLOUD/odoopub,ingadhoc/odoo,gsmartway/odoo,rdeheele/odoo,kifcaliph/odoo,dezynetechnologies/odoo,provaleks/o8,AuyaJackie/odoo,storm-computers/odoo,thanhacun/odoo,shingonoide/odoo,nuuuboo/odoo,janocat/odoo,lgscofield/odoo,optima-ict/odoo,Nowheresly/odoo,jpshort/odoo,gsmartway/odoo,Drooids/odoo,patmcb/odoo,Endika/OpenUpgrade,shingonoide/odoo,guewen/OpenUpgrade,hip-odoo/odoo,jolevq/odoopub,collex100/odoo,hubsaysnuaa/odoo,MarcosCommunity/odoo,ramadhane/odoo,oihane/odoo,gorjuce/odoo,cedk/odoo,janocat/odoo,spadae22/odoo,VielSoft/odoo,alqfahad/odoo,dfang/odoo,joshuajan/odoo,lightcn/odoo,poljeff/odoo,bkirui/odoo,naousse/odoo,tinkhaven-organization/odoo,jesramirez/odoo,hbrunn/OpenUpgrade,glovebx/odoo,numerigraphe/odoo,massot/odoo,CatsAndDogsbvba/odoo,Endika/OpenUpgrade,odoo-turkiye/odoo,NeovaHealth/odoo,odootr/odoo,fossoult/odoo,florentx/OpenUpgrade,shaufi10/odoo,Ichag/odoo,jiangzhixiao/odoo,BT-rmartin/odoo,zchking/odoo,slevenhagen/odoo-npg,oihane/odoo,dezynetechnologies/odoo,storm-computers/odoo,fjbatresv/odoo,simongoffin/website_version,nitinitprof/odoo,CubicERP/odoo,kittiu/odoo,csrocha/OpenUpgrade,guewen/OpenUpgrade,Nowheresly/odoo,mszewczy/odoo,savoirfairelinux/OpenUpgrade,ingadhoc/odoo,ihsanudin/odoo,draugiskisprendimai/odoo,apocalypsebg/odoo,avoinsystems/odoo,Ernesto99/odoo,markeTIC/OCB,Maspear/odoo,andreparames/odoo,hifly/OpenUpgrade,bplancher/odoo,sebalix/OpenUpgrade,sinbazhou/odoo,bguillot/OpenUpgrade,doomsterinc/odoo,agrista/odoo-saas,dkubiak789/odoo,bobisme/odoo,BT-ojossen/odoo,florian-dacosta/OpenUpgrade,prospwro/odoo,gvb/odoo,tinkhaven-organization/odoo,MarcosCommunity/odoo,GauravSahu/odoo,CatsAndDogsbvba/odoo,makinacorpus/odoo,shaufi/odoo,apanju/odoo,joariasl/odoo,damdam-s/OpenUpgrade,BT-astauder/odoo,papouso/odoo,nagyistoce/odoo-dev-odoo,hmen89/odoo,christophlsa/odoo,CopeX/odoo,ubic135/odoo-design,Nick-OpusVL/odoo,abdellatifkarroum/odoo,leorochael/odoo,ubic135/odoo-design,demon-ru/iml-crm,salaria/odoo,ChanduERP/odoo,NeovaHealth/odoo,tarzan0820/odoo,rowemoore/odoo,odoousers2014/odoo,abstract-open-solutions/OCB,CopeX/odoo,steedos/odoo,ccomb/OpenUpgrade,janocat/odoo,JonathanStein/odoo,sv-dev1/odoo,markeTIC/OCB,odoo-turkiye/odoo,jfpla/odoo,srsman/odoo,alexcuellar/odoo,juanalfonsopr/odoo,PongPi/isl-odoo,alexcuellar/odoo,ehirt/odoo,csrocha/OpenUpgrade,ojengwa/odoo,0k/OpenUpgrade,microcom/odoo,sysadminmatmoz/OCB,funkring/fdoo,BT-astauder/odoo,dsfsdgsbngfggb/odoo,steedos/odoo,demon-ru/iml-crm,addition-it-solutions/project-all,OpenUpgrade-dev/OpenUpgrade,Gitlab11/odoo,alexcuellar/odoo,luistorresm/odoo,gvb/odoo,fdvarela/odoo8,sebalix/OpenUpgrade,fevxie/odoo,srsman/odoo,Grirrane/odoo,Endika/OpenUpgrade,wangjun/odoo,virgree/odoo,mkieszek/odoo,ubic135/odoo-design,Danisan/odoo-1,odootr/odoo,ramitalat/odoo,bwrsandman/OpenUpgrade,FlorianLudwig/odoo,ovnicraft/odoo,rdeheele/odoo,blaggacao/OpenUpgrade,brijeshkesariya/odoo,PongPi/isl-odoo,x111ong/odoo,Kilhog/odoo,rgeleta/odoo,guerrerocarlos/odoo,gorjuce/odoo,JonathanStein/odoo,Eric-Zhong/odoo,luistorresm/odoo,mvaled/OpenUpgrade,bakhtout/odoo-educ,realsaiko/odoo,mmbtba/odoo,hmen89/odoo,pplatek/odoo,Endika/OpenUpgrade,dariemp/odoo,fdvarela/odoo8,cloud9UG/odoo,oihane/odoo,nagyistoce/odoo-dev-odoo,Antiun/odoo,guerrerocarlos/odoo,charbeljc/OCB,BT-rmartin/odoo,Nowheresly/odoo,Endika/odoo,FlorianLudwig/odoo,nexiles/odoo,tangyiyong/odoo,jesramirez/odoo,nuuuboo/odoo,ihsanudin/odoo,florentx/OpenUpgrade,NeovaHealth/odoo,zchking/odoo,dfang/odoo,joariasl/odoo,CatsAndDogsbvba/odoo,ehirt/odoo,ujjwalwahi/odoo,shivam1111/odoo,nhomar/odoo-mirror,factorlibre/OCB,lombritz/odoo,tvtsoft/odoo8,kittiu/odoo,aviciimaxwell/odoo,CatsAndDogsbvba/odoo,Ichag/odoo,fuhongliang/odoo,arthru/OpenUpgrade,sadleader/odoo,ujjwalwahi/odoo,christophlsa/odoo,jpshort/odoo,kittiu/odoo,TRESCLOUD/odoopub,shaufi/odoo,incaser/odoo-odoo,waytai/odoo,x111ong/odoo,storm-computers/odoo,kirca/OpenUpgrade,ClearCorp-dev/odoo,ygol/odoo,mvaled/OpenUpgrade,Endika/odoo,luiseduardohdbackup/odoo,cpyou/odoo,bakhtout/odoo-educ,Ernesto99/odoo,incaser/odoo-odoo,abdellatifkarroum/odoo,dllsf/odootest,nhomar/odoo-mirror,stonegithubs/odoo,apocalypsebg/odoo,Codefans-fan/odoo,tinkhaven-organization/odoo,jpshort/odoo,pedrobaeza/odoo,rubencabrera/odoo,sinbazhou/odoo,tvtsoft/odoo8,shaufi10/odoo,idncom/odoo,numerigraphe/odoo,OpusVL/odoo,VielSoft/odoo,ygol/odoo,PongPi/isl-odoo,thanhacun/odoo,jfpla/odoo,omprakasha/odoo,srimai/odoo,grap/OpenUpgrade,Antiun/odoo,dariemp/odoo,Kilhog/odoo,sve-odoo/odoo,cedk/odoo,Eric-Zhong/odoo,lombritz/odoo,ccomb/OpenUpgrade,Daniel-CA/odoo,synconics/odoo,stonegithubs/odoo,apocalypsebg/odoo,takis/odoo,JonathanStein/odoo,mmbtba/odoo,highco-groupe/odoo,bakhtout/odoo-educ,massot/odoo,gsmartway/odoo,virgree/odoo,pedrobaeza/OpenUpgrade,CubicERP/odoo,hifly/OpenUpgrade,damdam-s/OpenUpgrade,Ernesto99/odoo,janocat/odoo,juanalfonsopr/odoo,waytai/odoo,lightcn/odoo,salaria/odoo,omprakasha/odoo,tinkerthaler/odoo,storm-computers/odoo,dalegregory/odoo,SAM-IT-SA/odoo,vnsofthe/odoo,savoirfairelinux/OpenUpgrade,datenbetrieb/odoo,dfang/odoo,RafaelTorrealba/odoo,FlorianLudwig/odoo,apocalypsebg/odoo,acshan/odoo,ovnicraft/odoo,cysnake4713/odoo,rahuldhote/odoo,jfpla/odoo,alexteodor/odoo,Grirrane/odoo,0k/odoo,ehirt/odoo,bealdav/OpenUpgrade,patmcb/odoo,florian-dacosta/OpenUpgrade,dgzurita/odoo,grap/OpenUpgrade,idncom/odoo,csrocha/OpenUpgrade,bguillot/OpenUpgrade,syci/OCB,grap/OpenUpgrade,xujb/odoo,demon-ru/iml-crm,srimai/odoo,Endika/OpenUpgrade,nitinitprof/odoo,bplancher/odoo,florian-dacosta/OpenUpgrade,mmbtba/odoo,matrixise/odoo,deKupini/erp,zchking/odoo,luiseduardohdbackup/odoo,florian-dacosta/OpenUpgrade,RafaelTorrealba/odoo,windedge/odoo,ChanduERP/odoo,mlaitinen/odoo,nuncjo/odoo,optima-ict/odoo,oasiswork/odoo,Adel-Magebinary/odoo,ApuliaSoftware/odoo,funkring/fdoo,alhashash/odoo,cdrooom/odoo,0k/OpenUpgrade,diagramsoftware/odoo,jolevq/odoopub,salaria/odoo,cloud9UG/odoo,sergio-incaser/odoo,hmen89/odoo,feroda/odoo,poljeff/odoo,SerpentCS/odoo,colinnewell/odoo,hopeall/odoo,markeTIC/OCB,dsfsdgsbngfggb/odoo,Gitlab11/odoo,diagramsoftware/odoo,havt/odoo,dariemp/odoo,Elico-Corp/odoo_OCB,bkirui/odoo,prospwro/odoo,SAM-IT-SA/odoo,nuncjo/odoo,hmen89/odoo,hanicker/odoo,NL66278/OCB,mmbtba/odoo,bguillot/OpenUpgrade,jeasoft/odoo,ovnicraft/odoo,omprakasha/odoo,MarcosCommunity/odoo,klunwebale/odoo,ehirt/odoo,pplatek/odoo,alhashash/odoo,massot/odoo,fuhongliang/odoo,idncom/odoo,dalegregory/odoo,ujjwalwahi/odoo,Adel-Magebinary/odoo,nuncjo/odoo,FlorianLudwig/odoo,feroda/odoo,dkubiak789/odoo,lightcn/odoo,ShineFan/odoo,Ichag/odoo,savoirfairelinux/odoo,SerpentCS/odoo,shaufi/odoo,naousse/odoo,markeTIC/OCB,fjbatresv/odoo,jolevq/odoopub,juanalfonsopr/odoo,agrista/odoo-saas,hbrunn/OpenUpgrade,datenbetrieb/odoo,poljeff/odoo,synconics/odoo,stonegithubs/odoo,pplatek/odoo,mmbtba/odoo,thanhacun/odoo,dariemp/odoo,0k/OpenUpgrade,Nick-OpusVL/odoo,kirca/OpenUpgrade,oasiswork/odoo,ramadhane/odoo,jpshort/odoo,hubsaysnuaa/odoo,ccomb/OpenUpgrade,mkieszek/odoo,takis/odoo,slevenhagen/odoo-npg,ccomb/OpenUpgrade,syci/OCB,provaleks/o8,KontorConsulting/odoo,kybriainfotech/iSocioCRM,hanicker/odoo,Drooids/odoo,jfpla/odoo,Grirrane/odoo,PongPi/isl-odoo,SerpentCS/odoo,arthru/OpenUpgrade,Elico-Corp/odoo_OCB,slevenhagen/odoo,Antiun/odoo,dkubiak789/odoo,ecosoft-odoo/odoo,Antiun/odoo,doomsterinc/odoo,sv-dev1/odoo,Adel-Magebinary/odoo,rgeleta/odoo,ingadhoc/odoo,NeovaHealth/odoo,hip-odoo/odoo,Eric-Zhong/odoo,kirca/OpenUpgrade,damdam-s/OpenUpgrade,NL66278/OCB,kirca/OpenUpgrade,highco-groupe/odoo,jiangzhixiao/odoo,nitinitprof/odoo,srsman/odoo,savoirfairelinux/OpenUpgrade,lsinfo/odoo,Drooids/odoo,acshan/odoo,dllsf/odootest,minhtuancn/odoo,mvaled/OpenUpgrade,Noviat/odoo,ChanduERP/odoo,jeasoft/odoo,Nick-OpusVL/odoo,Kilhog/odoo,arthru/OpenUpgrade,hifly/OpenUpgrade,papouso/odoo,tinkerthaler/odoo,ecosoft-odoo/odoo,aviciimaxwell/odoo,javierTerry/odoo,Codefans-fan/odoo,odoousers2014/odoo,BT-astauder/odoo,wangjun/odoo,oliverhr/odoo,addition-it-solutions/project-all,jpshort/odoo,fevxie/odoo,Maspear/odoo,ujjwalwahi/odoo,dfang/odoo,draugiskisprendimai/odoo,jolevq/odoopub,sebalix/OpenUpgrade,tvibliani/odoo,MarcosCommunity/odoo,leoliujie/odoo,rdeheele/odoo,ujjwalwahi/odoo,steedos/odoo,ShineFan/odoo,alexcuellar/odoo,nagyistoce/odoo-dev-odoo,BT-rmartin/odoo,sergio-incaser/odoo,rahuldhote/odoo,shaufi10/odoo,bguillot/OpenUpgrade,lgscofield/odoo,apanju/GMIO_Odoo,goliveirab/odoo,florian-dacosta/OpenUpgrade,aviciimaxwell/odoo,hopeall/odoo,minhtuancn/odoo,xzYue/odoo,CubicERP/odoo,rowemoore/odoo,minhtuancn/odoo,BT-fgarbely/odoo,diagramsoftware/odoo,lightcn/odoo,fossoult/odoo,slevenhagen/odoo-npg,rowemoore/odoo,doomsterinc/odoo,luistorresm/odoo,hopeall/odoo,srsman/odoo,slevenhagen/odoo,highco-groupe/odoo,srimai/odoo,CubicERP/odoo,lightcn/odoo,nuuuboo/odoo,numerigraphe/odoo,klunwebale/odoo,jiachenning/odoo,waytai/odoo
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import request_quotation
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[FIX] Fix useless import following the removal of rml purchase reports
bzr revid: openerp-sle@openerp-sle.home-20140214150700-2zuukk4ahs4q1zhs
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import request_quotation
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<commit_msg>[FIX] Fix useless import following the removal of rml purchase reports
bzr revid: openerp-sle@openerp-sle.home-20140214150700-2zuukk4ahs4q1zhs<commit_after>
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import request_quotation
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[FIX] Fix useless import following the removal of rml purchase reports
bzr revid: openerp-sle@openerp-sle.home-20140214150700-2zuukk4ahs4q1zhs# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import request_quotation
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<commit_msg>[FIX] Fix useless import following the removal of rml purchase reports
bzr revid: openerp-sle@openerp-sle.home-20140214150700-2zuukk4ahs4q1zhs<commit_after># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
067ccc7256d258a5b7b7a57beb26ed70488d1be2
|
mezzanine_slideshows/migrations/0003_auto__del_slideshow.py
|
mezzanine_slideshows/migrations/0003_auto__del_slideshow.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('mezzanine_slideshows_slideshow')
# Removing M2M table for field galleries on 'Slideshow'
db.delete_table(db.shorten_name('mezzanine_slideshows_slideshow_galleries'))
def backwards(self, orm):
# Adding model 'Slideshow'
db.create_table('mezzanine_slideshows_slideshow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.RichTextPage'], unique=True)),
))
db.send_create_signal('mezzanine_slideshows', ['Slideshow'])
# Adding M2M table for field galleries on 'Slideshow'
m2m_table_name = db.shorten_name('mezzanine_slideshows_slideshow_galleries')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideshow', models.ForeignKey(orm['mezzanine_slideshows.slideshow'], null=False)),
('gallery', models.ForeignKey(orm['galleries.gallery'], null=False))
))
db.create_unique(m2m_table_name, ['slideshow_id', 'gallery_id'])
models = {
}
complete_apps = ['mezzanine_slideshows']
|
Add migration for model app_name change in preceding commit
|
Add migration for model app_name change in preceding commit
|
Python
|
bsd-2-clause
|
philipsouthwell/mezzanine-slideshows,philipsouthwell/mezzanine-slideshows,philipsouthwell/mezzanine-slideshows,adam494m/mezzanine-slideshows,adam494m/mezzanine-slideshows,adam494m/mezzanine-slideshows
|
Add migration for model app_name change in preceding commit
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('mezzanine_slideshows_slideshow')
# Removing M2M table for field galleries on 'Slideshow'
db.delete_table(db.shorten_name('mezzanine_slideshows_slideshow_galleries'))
def backwards(self, orm):
# Adding model 'Slideshow'
db.create_table('mezzanine_slideshows_slideshow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.RichTextPage'], unique=True)),
))
db.send_create_signal('mezzanine_slideshows', ['Slideshow'])
# Adding M2M table for field galleries on 'Slideshow'
m2m_table_name = db.shorten_name('mezzanine_slideshows_slideshow_galleries')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideshow', models.ForeignKey(orm['mezzanine_slideshows.slideshow'], null=False)),
('gallery', models.ForeignKey(orm['galleries.gallery'], null=False))
))
db.create_unique(m2m_table_name, ['slideshow_id', 'gallery_id'])
models = {
}
complete_apps = ['mezzanine_slideshows']
|
<commit_before><commit_msg>Add migration for model app_name change in preceding commit<commit_after>
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('mezzanine_slideshows_slideshow')
# Removing M2M table for field galleries on 'Slideshow'
db.delete_table(db.shorten_name('mezzanine_slideshows_slideshow_galleries'))
def backwards(self, orm):
# Adding model 'Slideshow'
db.create_table('mezzanine_slideshows_slideshow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.RichTextPage'], unique=True)),
))
db.send_create_signal('mezzanine_slideshows', ['Slideshow'])
# Adding M2M table for field galleries on 'Slideshow'
m2m_table_name = db.shorten_name('mezzanine_slideshows_slideshow_galleries')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideshow', models.ForeignKey(orm['mezzanine_slideshows.slideshow'], null=False)),
('gallery', models.ForeignKey(orm['galleries.gallery'], null=False))
))
db.create_unique(m2m_table_name, ['slideshow_id', 'gallery_id'])
models = {
}
complete_apps = ['mezzanine_slideshows']
|
Add migration for model app_name change in preceding commit# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('mezzanine_slideshows_slideshow')
# Removing M2M table for field galleries on 'Slideshow'
db.delete_table(db.shorten_name('mezzanine_slideshows_slideshow_galleries'))
def backwards(self, orm):
# Adding model 'Slideshow'
db.create_table('mezzanine_slideshows_slideshow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.RichTextPage'], unique=True)),
))
db.send_create_signal('mezzanine_slideshows', ['Slideshow'])
# Adding M2M table for field galleries on 'Slideshow'
m2m_table_name = db.shorten_name('mezzanine_slideshows_slideshow_galleries')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideshow', models.ForeignKey(orm['mezzanine_slideshows.slideshow'], null=False)),
('gallery', models.ForeignKey(orm['galleries.gallery'], null=False))
))
db.create_unique(m2m_table_name, ['slideshow_id', 'gallery_id'])
models = {
}
complete_apps = ['mezzanine_slideshows']
|
<commit_before><commit_msg>Add migration for model app_name change in preceding commit<commit_after># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('mezzanine_slideshows_slideshow')
# Removing M2M table for field galleries on 'Slideshow'
db.delete_table(db.shorten_name('mezzanine_slideshows_slideshow_galleries'))
def backwards(self, orm):
# Adding model 'Slideshow'
db.create_table('mezzanine_slideshows_slideshow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.RichTextPage'], unique=True)),
))
db.send_create_signal('mezzanine_slideshows', ['Slideshow'])
# Adding M2M table for field galleries on 'Slideshow'
m2m_table_name = db.shorten_name('mezzanine_slideshows_slideshow_galleries')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideshow', models.ForeignKey(orm['mezzanine_slideshows.slideshow'], null=False)),
('gallery', models.ForeignKey(orm['galleries.gallery'], null=False))
))
db.create_unique(m2m_table_name, ['slideshow_id', 'gallery_id'])
models = {
}
complete_apps = ['mezzanine_slideshows']
|
|
bb8db2ef08b0431b173205f53a4431fcec3dc535
|
custom/enikshay/management/commands/get_locations_for_bets.py
|
custom/enikshay/management/commands/get_locations_for_bets.py
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
self.domain = domain
filename = 'eNikshay_locations.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'name',
'site_code',
'location_id',
'doc_type',
'domain',
'external_id',
'is_archived',
'last_modified',
'latitude',
'longitude',
# TODO exactly which metadata fields do they want? How do we
# serialize the headers, metadata.is_test?
# 'metadata',
'location_type',
'location_type_code',
'parent_location_id',
'parent_site_code',
# They may also want ancestors_by_type, TBD
])
loc_types = BETSLocationRepeater.location_types_to_forward
for loc in (SQLLocation.active_objects
.filter(domain=domain, location_type__name__in=loc_types)
.prefetch_related('parent', 'location_type')):
self.add_loc(loc, writer)
print "Wrote to {}".format(filename)
def add_loc(self, location, writer):
if location.metadata.get('is_test') != "yes":
return
writer.writerow([
location.name,
location.site_code,
location.location_id,
'Location',
location.domain,
location.external_id,
location.is_archived,
location.last_modified.isoformat(),
float(location.latitude) if location.latitude else None,
float(location.longitude) if location.longitude else None,
# location.metadata,
location.location_type.name,
location.location_type.code,
location.parent_location_id,
location.parent.site_code,
# They may also want ancestors_by_type, if so, use
# custom.enikshay.integrations.bets.utils.get_bets_location_json
])
|
Add first pass at BETS location dump
|
Add first pass at BETS location dump
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add first pass at BETS location dump
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
self.domain = domain
filename = 'eNikshay_locations.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'name',
'site_code',
'location_id',
'doc_type',
'domain',
'external_id',
'is_archived',
'last_modified',
'latitude',
'longitude',
# TODO exactly which metadata fields do they want? How do we
# serialize the headers, metadata.is_test?
# 'metadata',
'location_type',
'location_type_code',
'parent_location_id',
'parent_site_code',
# They may also want ancestors_by_type, TBD
])
loc_types = BETSLocationRepeater.location_types_to_forward
for loc in (SQLLocation.active_objects
.filter(domain=domain, location_type__name__in=loc_types)
.prefetch_related('parent', 'location_type')):
self.add_loc(loc, writer)
print "Wrote to {}".format(filename)
def add_loc(self, location, writer):
if location.metadata.get('is_test') != "yes":
return
writer.writerow([
location.name,
location.site_code,
location.location_id,
'Location',
location.domain,
location.external_id,
location.is_archived,
location.last_modified.isoformat(),
float(location.latitude) if location.latitude else None,
float(location.longitude) if location.longitude else None,
# location.metadata,
location.location_type.name,
location.location_type.code,
location.parent_location_id,
location.parent.site_code,
# They may also want ancestors_by_type, if so, use
# custom.enikshay.integrations.bets.utils.get_bets_location_json
])
|
<commit_before><commit_msg>Add first pass at BETS location dump<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
self.domain = domain
filename = 'eNikshay_locations.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'name',
'site_code',
'location_id',
'doc_type',
'domain',
'external_id',
'is_archived',
'last_modified',
'latitude',
'longitude',
# TODO exactly which metadata fields do they want? How do we
# serialize the headers, metadata.is_test?
# 'metadata',
'location_type',
'location_type_code',
'parent_location_id',
'parent_site_code',
# They may also want ancestors_by_type, TBD
])
loc_types = BETSLocationRepeater.location_types_to_forward
for loc in (SQLLocation.active_objects
.filter(domain=domain, location_type__name__in=loc_types)
.prefetch_related('parent', 'location_type')):
self.add_loc(loc, writer)
print "Wrote to {}".format(filename)
def add_loc(self, location, writer):
if location.metadata.get('is_test') != "yes":
return
writer.writerow([
location.name,
location.site_code,
location.location_id,
'Location',
location.domain,
location.external_id,
location.is_archived,
location.last_modified.isoformat(),
float(location.latitude) if location.latitude else None,
float(location.longitude) if location.longitude else None,
# location.metadata,
location.location_type.name,
location.location_type.code,
location.parent_location_id,
location.parent.site_code,
# They may also want ancestors_by_type, if so, use
# custom.enikshay.integrations.bets.utils.get_bets_location_json
])
|
Add first pass at BETS location dumpimport csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
self.domain = domain
filename = 'eNikshay_locations.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'name',
'site_code',
'location_id',
'doc_type',
'domain',
'external_id',
'is_archived',
'last_modified',
'latitude',
'longitude',
# TODO exactly which metadata fields do they want? How do we
# serialize the headers, metadata.is_test?
# 'metadata',
'location_type',
'location_type_code',
'parent_location_id',
'parent_site_code',
# They may also want ancestors_by_type, TBD
])
loc_types = BETSLocationRepeater.location_types_to_forward
for loc in (SQLLocation.active_objects
.filter(domain=domain, location_type__name__in=loc_types)
.prefetch_related('parent', 'location_type')):
self.add_loc(loc, writer)
print "Wrote to {}".format(filename)
def add_loc(self, location, writer):
if location.metadata.get('is_test') != "yes":
return
writer.writerow([
location.name,
location.site_code,
location.location_id,
'Location',
location.domain,
location.external_id,
location.is_archived,
location.last_modified.isoformat(),
float(location.latitude) if location.latitude else None,
float(location.longitude) if location.longitude else None,
# location.metadata,
location.location_type.name,
location.location_type.code,
location.parent_location_id,
location.parent.site_code,
# They may also want ancestors_by_type, if so, use
# custom.enikshay.integrations.bets.utils.get_bets_location_json
])
|
<commit_before><commit_msg>Add first pass at BETS location dump<commit_after>import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from custom.enikshay.integrations.bets.repeaters import BETSLocationRepeater
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
self.domain = domain
filename = 'eNikshay_locations.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow([
'name',
'site_code',
'location_id',
'doc_type',
'domain',
'external_id',
'is_archived',
'last_modified',
'latitude',
'longitude',
# TODO exactly which metadata fields do they want? How do we
# serialize the headers, metadata.is_test?
# 'metadata',
'location_type',
'location_type_code',
'parent_location_id',
'parent_site_code',
# They may also want ancestors_by_type, TBD
])
loc_types = BETSLocationRepeater.location_types_to_forward
for loc in (SQLLocation.active_objects
.filter(domain=domain, location_type__name__in=loc_types)
.prefetch_related('parent', 'location_type')):
self.add_loc(loc, writer)
print "Wrote to {}".format(filename)
def add_loc(self, location, writer):
if location.metadata.get('is_test') != "yes":
return
writer.writerow([
location.name,
location.site_code,
location.location_id,
'Location',
location.domain,
location.external_id,
location.is_archived,
location.last_modified.isoformat(),
float(location.latitude) if location.latitude else None,
float(location.longitude) if location.longitude else None,
# location.metadata,
location.location_type.name,
location.location_type.code,
location.parent_location_id,
location.parent.site_code,
# They may also want ancestors_by_type, if so, use
# custom.enikshay.integrations.bets.utils.get_bets_location_json
])
|
|
a2f991625bc6de9895efb443175b59dfc12d1236
|
src/objective-c/tests/analyze_link_map.py
|
src/objective-c/tests/analyze_link_map.py
|
#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
Add Xcode link map analyzer
|
Add Xcode link map analyzer
|
Python
|
apache-2.0
|
mehrdada/grpc,sreecha/grpc,muxi/grpc,vjpai/grpc,grpc/grpc,pszemus/grpc,sreecha/grpc,firebase/grpc,murgatroid99/grpc,chrisdunelm/grpc,ncteisen/grpc,ctiller/grpc,firebase/grpc,jboeuf/grpc,ejona86/grpc,jboeuf/grpc,Vizerai/grpc,nicolasnoble/grpc,grpc/grpc,stanley-cheung/grpc,grpc/grpc,pszemus/grpc,stanley-cheung/grpc,jtattermusch/grpc,mehrdada/grpc,Vizerai/grpc,nicolasnoble/grpc,thinkerou/grpc,ncteisen/grpc,mehrdada/grpc,firebase/grpc,kpayson64/grpc,muxi/grpc,chrisdunelm/grpc,grpc/grpc,nicolasnoble/grpc,thinkerou/grpc,ctiller/grpc,vjpai/grpc,donnadionne/grpc,ctiller/grpc,donnadionne/grpc,Vizerai/grpc,firebase/grpc,simonkuang/grpc,stanley-cheung/grpc,Vizerai/grpc,vjpai/grpc,murgatroid99/grpc,sreecha/grpc,firebase/grpc,sreecha/grpc,jtattermusch/grpc,kpayson64/grpc,dgquintas/grpc,muxi/grpc,mehrdada/grpc,Vizerai/grpc,jboeuf/grpc,thinkerou/grpc,ejona86/grpc,chrisdunelm/grpc,murgatroid99/grpc,jtattermusch/grpc,kpayson64/grpc,nicolasnoble/grpc,pszemus/grpc,ejona86/grpc,vjpai/grpc,simonkuang/grpc,mehrdada/grpc,chrisdunelm/grpc,Vizerai/grpc,firebase/grpc,grpc/grpc,pszemus/grpc,muxi/grpc,nicolasnoble/grpc,jtattermusch/grpc,firebase/grpc,Vizerai/grpc,carl-mastrangelo/grpc,ctiller/grpc,ejona86/grpc,jtattermusch/grpc,grpc/grpc,dgquintas/grpc,ejona86/grpc,muxi/grpc,jboeuf/grpc,ejona86/grpc,vjpai/grpc,muxi/grpc,ncteisen/grpc,dgquintas/grpc,carl-mastrangelo/grpc,jtattermusch/grpc,ctiller/grpc,thinkerou/grpc,carl-mastrangelo/grpc,pszemus/grpc,grpc/grpc,ejona86/grpc,grpc/grpc,sreecha/grpc,ctiller/grpc,firebase/grpc,chrisdunelm/grpc,stanley-cheung/grpc,firebase/grpc,mehrdada/grpc,carl-mastrangelo/grpc,stanley-cheung/grpc,stanley-cheung/grpc,dgquintas/grpc,grpc/grpc,muxi/grpc,donnadionne/grpc,chrisdunelm/grpc,grpc/grpc,mehrdada/grpc,muxi/grpc,ncteisen/grpc,thinkerou/grpc,ncteisen/grpc,jtattermusch/grpc,stanley-cheung/grpc,jboeuf/grpc,firebase/grpc,donnadionne/grpc,vjpai/grpc,grpc/grpc,jtattermusch/grpc,dgquintas/grpc,jtattermusch/grpc,mehrdada/grpc,donnadionne/grpc,ncteisen/grpc,muxi/grpc,thinkerou/grpc,pszemus/grpc,pszemus/grpc,mehrdada/grpc,dgquintas/grpc,sreecha/grpc,nicolasnoble/grpc,murgatroid99/grpc,kpayson64/grpc,grpc/grpc,simonkuang/grpc,pszemus/grpc,vjpai/grpc,sreecha/grpc,sreecha/grpc,simonkuang/grpc,carl-mastrangelo/grpc,nicolasnoble/grpc,ejona86/grpc,vjpai/grpc,muxi/grpc,murgatroid99/grpc,dgquintas/grpc,jboeuf/grpc,chrisdunelm/grpc,jtattermusch/grpc,stanley-cheung/grpc,vjpai/grpc,jboeuf/grpc,ctiller/grpc,kpayson64/grpc,nicolasnoble/grpc,ejona86/grpc,chrisdunelm/grpc,pszemus/grpc,nicolasnoble/grpc,stanley-cheung/grpc,murgatroid99/grpc,simonkuang/grpc,sreecha/grpc,jboeuf/grpc,thinkerou/grpc,ctiller/grpc,simonkuang/grpc,dgquintas/grpc,thinkerou/grpc,chrisdunelm/grpc,dgquintas/grpc,nicolasnoble/grpc,ctiller/grpc,muxi/grpc,kpayson64/grpc,kpayson64/grpc,Vizerai/grpc,nicolasnoble/grpc,pszemus/grpc,muxi/grpc,vjpai/grpc,jtattermusch/grpc,chrisdunelm/grpc,ncteisen/grpc,ncteisen/grpc,jboeuf/grpc,murgatroid99/grpc,sreecha/grpc,ctiller/grpc,jtattermusch/grpc,nicolasnoble/grpc,carl-mastrangelo/grpc,jboeuf/grpc,carl-mastrangelo/grpc,stanley-cheung/grpc,ejona86/grpc,sreecha/grpc,stanley-cheung/grpc,simonkuang/grpc,chrisdunelm/grpc,thinkerou/grpc,donnadionne/grpc,thinkerou/grpc,ejona86/grpc,ncteisen/grpc,Vizerai/grpc,stanley-cheung/grpc,carl-mastrangelo/grpc,dgquintas/grpc,simonkuang/grpc,pszemus/grpc,ctiller/grpc,dgquintas/grpc,kpayson64/grpc,ncteisen/grpc,ncteisen/grpc,thinkerou/grpc,murgatroid99/grpc,simonkuang/grpc,vjpai/grpc,Vizerai/grpc,carl-mastrangelo/grpc,murgatroid99/grpc,carl-mastrangelo/grpc,kpayson64/grpc,ctiller/grpc,sreecha/grpc,mehrdada/grpc,mehrdada/grpc,kpayson64/grpc,donnadionne/grpc,donnadionne/grpc,vjpai/grpc,mehrdada/grpc,kpayson64/grpc,ejona86/grpc,donnadionne/grpc,ncteisen/grpc,Vizerai/grpc,murgatroid99/grpc,pszemus/grpc,firebase/grpc,donnadionne/grpc,donnadionne/grpc,carl-mastrangelo/grpc,firebase/grpc,donnadionne/grpc,thinkerou/grpc,carl-mastrangelo/grpc,jboeuf/grpc,jboeuf/grpc
|
Add Xcode link map analyzer
|
#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
<commit_before><commit_msg>Add Xcode link map analyzer<commit_after>
|
#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
Add Xcode link map analyzer#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
<commit_before><commit_msg>Add Xcode link map analyzer<commit_after>#!/usr/bin/python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script analyzes link map file generated by Xcode. It calculates and
# prints out the sizes of each dependent library and the total sizes of the
# symbols.
# The script takes one parameter, which is the path to the link map file.
import sys
import re
table_tag = {}
state = "start"
table_stats_symbol = {}
table_stats_dead = {}
section_total_size = 0
symbol_total_size = 0
file_import = sys.argv[1]
lines = list(open(file_import))
for line in lines:
line_stripped = line[:-1]
if "# Object files:" == line_stripped:
state = "object"
continue
elif "# Sections:" == line_stripped:
state = "section"
continue
elif "# Symbols:" == line_stripped:
state = "symbol"
continue
elif "# Dead Stripped Symbols:" == line_stripped:
state = "dead"
continue
if state == "object":
segs = re.search('(\[ *[0-9]*\]) (.*)', line_stripped)
table_tag[segs.group(1)] = segs.group(2)
if state == "section":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^(.+?)\s+(.+?)\s+.*', line_stripped)
section_total_size += int(segs.group(2), 16)
if state == "symbol":
if len(line_stripped) == 0 or line_stripped[0] == '#':
continue
segs = re.search('^.+?\s+(.+?)\s+(\[.+?\]).*', line_stripped)
target = table_tag[segs.group(2)]
target_stripped = re.search('^(.*?)(\(.+?\))?$', target).group(1)
size = int(segs.group(1), 16)
if not target_stripped in table_stats_symbol:
table_stats_symbol[target_stripped] = 0
table_stats_symbol[target_stripped] += size
print("Sections total size: %d" % section_total_size)
for target in table_stats_symbol:
print(target)
print(table_stats_symbol[target])
symbol_total_size += table_stats_symbol[target]
print("Symbols total size: %d" % symbol_total_size)
|
|
e89997b998766a257ca9ce58b0528bd0b7504ed1
|
tools/workspace_status_release.py
|
tools/workspace_status_release.py
|
import os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
Add custom stamp to add its-base revision to version number
|
Add custom stamp to add its-base revision to version number
Change-Id: I87bacf25e25e58ee4c648963912056726cd297dc
|
Python
|
apache-2.0
|
GerritCodeReview/plugins_its-phabricator,GerritCodeReview/plugins_its-phabricator
|
Add custom stamp to add its-base revision to version number
Change-Id: I87bacf25e25e58ee4c648963912056726cd297dc
|
import os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
<commit_before><commit_msg>Add custom stamp to add its-base revision to version number
Change-Id: I87bacf25e25e58ee4c648963912056726cd297dc<commit_after>
|
import os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
Add custom stamp to add its-base revision to version number
Change-Id: I87bacf25e25e58ee4c648963912056726cd297dcimport os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
<commit_before><commit_msg>Add custom stamp to add its-base revision to version number
Change-Id: I87bacf25e25e58ee4c648963912056726cd297dc<commit_after>import os
import sys
# As this plugin is typically only sym-linked into a gerrit checkout and both os.getcwd and
# os.path.abspath follow symbolic links, they would not allow us to find the gerrit root
# directory. So we have to resort to the PWD environment variable to find the place we're
# symlinked to.
#
# We append __file__ to avoid having to require to run it from a well-know directory.
ABS_FILE_PARTS = os.path.join(os.getenv('PWD'), __file__).split(os.sep)
PLUGIN_NAME = ABS_FILE_PARTS[-3]
GERRIT_ROOT = os.sep.join(ABS_FILE_PARTS[:-4])
sys.path = [os.sep.join([GERRIT_ROOT, 'tools'])] + sys.path
from workspace_status_release import revision
def get_plugin_revision(name):
os.chdir(os.path.join(GERRIT_ROOT, 'plugins', name))
ret=revision(GERRIT_VERSION)
return ret
os.chdir(GERRIT_ROOT)
GERRIT_VERSION=revision()
ITS_BASE_VERSION=get_plugin_revision('its-base')
PLUGIN_RAW_VERSION=get_plugin_revision(PLUGIN_NAME)
PLUGIN_FULL_VERSION="%s(its-base:%s)" % (PLUGIN_RAW_VERSION, ITS_BASE_VERSION)
print("STABLE_BUILD_%s_LABEL %s" % (PLUGIN_NAME.upper(), PLUGIN_FULL_VERSION))
|
|
0ff59da2bd80d28002b3a963dee5bc48ad851ad8
|
migrations/versions/d03236a0cda4_add_corporation_flag.py
|
migrations/versions/d03236a0cda4_add_corporation_flag.py
|
"""add corporation flag
Revision ID: d03236a0cda4
Revises: 47e567919169
Create Date: 2017-04-12 13:21:03.482000
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
from lazyblacksmith.models import Blueprint
from lazyblacksmith.models import db
revision = 'd03236a0cda4'
down_revision = '47e567919169'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprint',
sa.Column('corporation', sa.Boolean(), nullable=False))
Blueprint.query.update({'corporation': False})
db.session.commit()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprint', 'corporation')
# ### end Alembic commands ###
|
Add corporation flag in blueprints
|
Add corporation flag in blueprints
|
Python
|
bsd-3-clause
|
Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith
|
Add corporation flag in blueprints
|
"""add corporation flag
Revision ID: d03236a0cda4
Revises: 47e567919169
Create Date: 2017-04-12 13:21:03.482000
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
from lazyblacksmith.models import Blueprint
from lazyblacksmith.models import db
revision = 'd03236a0cda4'
down_revision = '47e567919169'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprint',
sa.Column('corporation', sa.Boolean(), nullable=False))
Blueprint.query.update({'corporation': False})
db.session.commit()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprint', 'corporation')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add corporation flag in blueprints<commit_after>
|
"""add corporation flag
Revision ID: d03236a0cda4
Revises: 47e567919169
Create Date: 2017-04-12 13:21:03.482000
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
from lazyblacksmith.models import Blueprint
from lazyblacksmith.models import db
revision = 'd03236a0cda4'
down_revision = '47e567919169'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprint',
sa.Column('corporation', sa.Boolean(), nullable=False))
Blueprint.query.update({'corporation': False})
db.session.commit()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprint', 'corporation')
# ### end Alembic commands ###
|
Add corporation flag in blueprints"""add corporation flag
Revision ID: d03236a0cda4
Revises: 47e567919169
Create Date: 2017-04-12 13:21:03.482000
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
from lazyblacksmith.models import Blueprint
from lazyblacksmith.models import db
revision = 'd03236a0cda4'
down_revision = '47e567919169'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprint',
sa.Column('corporation', sa.Boolean(), nullable=False))
Blueprint.query.update({'corporation': False})
db.session.commit()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprint', 'corporation')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add corporation flag in blueprints<commit_after>"""add corporation flag
Revision ID: d03236a0cda4
Revises: 47e567919169
Create Date: 2017-04-12 13:21:03.482000
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
from alembic import op
from lazyblacksmith.models import Blueprint
from lazyblacksmith.models import db
revision = 'd03236a0cda4'
down_revision = '47e567919169'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprint',
sa.Column('corporation', sa.Boolean(), nullable=False))
Blueprint.query.update({'corporation': False})
db.session.commit()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprint', 'corporation')
# ### end Alembic commands ###
|
|
7834829bf3d2d97814366ca62826c2c19fd9144e
|
EXAMPLE-npyscreenreactor-client.py
|
EXAMPLE-npyscreenreactor-client.py
|
#!/usr/bin/env python
import npyscreen
import curses
class EditorFormExample(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = npyscreen.BufferPager
def __init__(self, *args, **keywords):
super(npyscreen.FormMutt, self).__init__(*args, **keywords)
self.wCommand.add_handlers({
curses.ascii.NL : self.do_line,
curses.ascii.CR : self.do_line,
})
def do_line(self,name):
self.parentApp.process_line()
self.display()
# needs handler ....
# on enter ...
class TestApp(npyscreen.NPSApp):
def process_line(self):
self.F.wMain.buffer((self.F.wCommand.value,))
self.F.wCommand.value = ""
self.F.display()
def main(self):
self.F = EditorFormExample(parentApp=self)
self.F.wStatus1.value = "Status Line "
self.F.wStatus2.value = "Enter text to send ...."
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
Add framework for example client
|
Add framework for example client
|
Python
|
mit
|
mtearle/npyscreenreactor
|
Add framework for example client
|
#!/usr/bin/env python
import npyscreen
import curses
class EditorFormExample(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = npyscreen.BufferPager
def __init__(self, *args, **keywords):
super(npyscreen.FormMutt, self).__init__(*args, **keywords)
self.wCommand.add_handlers({
curses.ascii.NL : self.do_line,
curses.ascii.CR : self.do_line,
})
def do_line(self,name):
self.parentApp.process_line()
self.display()
# needs handler ....
# on enter ...
class TestApp(npyscreen.NPSApp):
def process_line(self):
self.F.wMain.buffer((self.F.wCommand.value,))
self.F.wCommand.value = ""
self.F.display()
def main(self):
self.F = EditorFormExample(parentApp=self)
self.F.wStatus1.value = "Status Line "
self.F.wStatus2.value = "Enter text to send ...."
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
<commit_before><commit_msg>Add framework for example client<commit_after>
|
#!/usr/bin/env python
import npyscreen
import curses
class EditorFormExample(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = npyscreen.BufferPager
def __init__(self, *args, **keywords):
super(npyscreen.FormMutt, self).__init__(*args, **keywords)
self.wCommand.add_handlers({
curses.ascii.NL : self.do_line,
curses.ascii.CR : self.do_line,
})
def do_line(self,name):
self.parentApp.process_line()
self.display()
# needs handler ....
# on enter ...
class TestApp(npyscreen.NPSApp):
def process_line(self):
self.F.wMain.buffer((self.F.wCommand.value,))
self.F.wCommand.value = ""
self.F.display()
def main(self):
self.F = EditorFormExample(parentApp=self)
self.F.wStatus1.value = "Status Line "
self.F.wStatus2.value = "Enter text to send ...."
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
Add framework for example client#!/usr/bin/env python
import npyscreen
import curses
class EditorFormExample(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = npyscreen.BufferPager
def __init__(self, *args, **keywords):
super(npyscreen.FormMutt, self).__init__(*args, **keywords)
self.wCommand.add_handlers({
curses.ascii.NL : self.do_line,
curses.ascii.CR : self.do_line,
})
def do_line(self,name):
self.parentApp.process_line()
self.display()
# needs handler ....
# on enter ...
class TestApp(npyscreen.NPSApp):
def process_line(self):
self.F.wMain.buffer((self.F.wCommand.value,))
self.F.wCommand.value = ""
self.F.display()
def main(self):
self.F = EditorFormExample(parentApp=self)
self.F.wStatus1.value = "Status Line "
self.F.wStatus2.value = "Enter text to send ...."
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
<commit_before><commit_msg>Add framework for example client<commit_after>#!/usr/bin/env python
import npyscreen
import curses
class EditorFormExample(npyscreen.FormMutt):
MAIN_WIDGET_CLASS = npyscreen.BufferPager
def __init__(self, *args, **keywords):
super(npyscreen.FormMutt, self).__init__(*args, **keywords)
self.wCommand.add_handlers({
curses.ascii.NL : self.do_line,
curses.ascii.CR : self.do_line,
})
def do_line(self,name):
self.parentApp.process_line()
self.display()
# needs handler ....
# on enter ...
class TestApp(npyscreen.NPSApp):
def process_line(self):
self.F.wMain.buffer((self.F.wCommand.value,))
self.F.wCommand.value = ""
self.F.display()
def main(self):
self.F = EditorFormExample(parentApp=self)
self.F.wStatus1.value = "Status Line "
self.F.wStatus2.value = "Enter text to send ...."
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.wMain.buffer(("boo\n", "book\n"))
self.F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
|
4e6028b528592a5b23ff13f40422a3d850e1fd44
|
repl.py
|
repl.py
|
"""A repl across hosts."""
import ast
import sys
import readline
import traceback
from io import StringIO
def read_stmt():
stmt = ''
prompt = '>>> '
indented = False
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
sys.stdout = StringIO()
try:
result = exec(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, sys.stdout.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
group = Group([
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
|
Add REPL that evaluates code on various Python versions
|
Add REPL that evaluates code on various Python versions
|
Python
|
apache-2.0
|
lordmauve/chopsticks,lordmauve/chopsticks
|
Add REPL that evaluates code on various Python versions
|
"""A repl across hosts."""
import ast
import sys
import readline
import traceback
from io import StringIO
def read_stmt():
stmt = ''
prompt = '>>> '
indented = False
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
sys.stdout = StringIO()
try:
result = exec(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, sys.stdout.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
group = Group([
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
|
<commit_before><commit_msg>Add REPL that evaluates code on various Python versions<commit_after>
|
"""A repl across hosts."""
import ast
import sys
import readline
import traceback
from io import StringIO
def read_stmt():
stmt = ''
prompt = '>>> '
indented = False
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
sys.stdout = StringIO()
try:
result = exec(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, sys.stdout.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
group = Group([
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
|
Add REPL that evaluates code on various Python versions"""A repl across hosts."""
import ast
import sys
import readline
import traceback
from io import StringIO
def read_stmt():
stmt = ''
prompt = '>>> '
indented = False
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
sys.stdout = StringIO()
try:
result = exec(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, sys.stdout.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
group = Group([
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
|
<commit_before><commit_msg>Add REPL that evaluates code on various Python versions<commit_after>"""A repl across hosts."""
import ast
import sys
import readline
import traceback
from io import StringIO
def read_stmt():
stmt = ''
prompt = '>>> '
indented = False
while True:
try:
line = input(prompt)
except EOFError:
print()
sys.exit(0)
stmt += line + '\n'
try:
ast.parse(stmt)
except SyntaxError as e:
msg = e.args[0]
if msg == 'unexpected EOF while parsing':
prompt = '... '
continue
raise
else:
if line.startswith((' ', '\t')) and prompt == '... ':
continue
return stmt
namespace = {}
def runit(stmt):
code = compile(stmt, '<stdin>', 'single', dont_inherit=True)
sys.stdout = StringIO()
try:
result = exec(code, namespace)
except Exception:
return False, traceback.format_exc()
return True, sys.stdout.getvalue()
def dorepl(group):
from chopsticks.tunnel import ErrorResult
try:
stmt = read_stmt()
except Exception:
traceback.print_exc()
return
results = group.call(runit, stmt)
vals = list(results.values())
if all(vals[0] == v for v in vals[1:]):
results = {'all %d' % len(vals): vals[0]}
for host, result in sorted(results.items()):
if isinstance(result, ErrorResult):
success = False
result = result.msg
else:
success, result = result
color = '32' if success else '31'
if sys.stderr.isatty():
fmt = '\x1b[{color}m[{host}]\x1b[0m {l}'
else:
fmt = '[{host}] {l}'
for l in result.splitlines():
print(fmt.format(host=host, color=color, l=l))
if __name__ == '__main__':
from chopsticks.tunnel import Docker
from chopsticks.group import Group
group = Group([
Docker('python3.4', image='python:3.4'),
Docker('python3.5', image='python:3.5'),
Docker('python3.6', image='python:3.6'),
])
try:
while True:
dorepl(group)
finally:
del group
|
|
77a2ce5ffff72f6414fd11ea37a08048b81edf8b
|
test.py
|
test.py
|
# -*- coding: utf-8 -*-
from pycublas import *
from ctypes import *
#Test functions
handle = cublasHandle_t()
def Init():
status = cublasCreate(byref(handle))
print status
def Close():
status = cublasDestroy(handle)
print status
def Version():
version = c_int()
status = cublasGetVersion(handle, byref(version))
print 'version:', version.value
print status
def GetPointerMode():
mode = c_cublasPointerMode_t()
status = cublasGetPointerMode(handle, byref(mode))
print 'PointerMode:', cublasPointerMode_t(mode.value)
print status
def SetPointerMode(mode):
mode = c_cublasPointerMode_t(mode)
status = cublasSetPointerMode(handle, mode)
print status
def GetAtomicsMode():
mode = c_cublasAtomicsMode_t()
status = cublasGetAtomicsMode(handle, byref(mode))
print 'AtomicMode:', cublasAtomicsMode_t(mode.value)
print status
def SetAtomicsMode(mode):
mode = c_cublasAtomicsMode_t(mode)
status = cublasSetAtomicsMode(handle, mode)
print status
|
Test in a new file
|
Test in a new file
|
Python
|
bsd-3-clause
|
Vrekrer/PycuBLAS
|
Test in a new file
|
# -*- coding: utf-8 -*-
from pycublas import *
from ctypes import *
#Test functions
handle = cublasHandle_t()
def Init():
status = cublasCreate(byref(handle))
print status
def Close():
status = cublasDestroy(handle)
print status
def Version():
version = c_int()
status = cublasGetVersion(handle, byref(version))
print 'version:', version.value
print status
def GetPointerMode():
mode = c_cublasPointerMode_t()
status = cublasGetPointerMode(handle, byref(mode))
print 'PointerMode:', cublasPointerMode_t(mode.value)
print status
def SetPointerMode(mode):
mode = c_cublasPointerMode_t(mode)
status = cublasSetPointerMode(handle, mode)
print status
def GetAtomicsMode():
mode = c_cublasAtomicsMode_t()
status = cublasGetAtomicsMode(handle, byref(mode))
print 'AtomicMode:', cublasAtomicsMode_t(mode.value)
print status
def SetAtomicsMode(mode):
mode = c_cublasAtomicsMode_t(mode)
status = cublasSetAtomicsMode(handle, mode)
print status
|
<commit_before><commit_msg>Test in a new file<commit_after>
|
# -*- coding: utf-8 -*-
from pycublas import *
from ctypes import *
#Test functions
handle = cublasHandle_t()
def Init():
status = cublasCreate(byref(handle))
print status
def Close():
status = cublasDestroy(handle)
print status
def Version():
version = c_int()
status = cublasGetVersion(handle, byref(version))
print 'version:', version.value
print status
def GetPointerMode():
mode = c_cublasPointerMode_t()
status = cublasGetPointerMode(handle, byref(mode))
print 'PointerMode:', cublasPointerMode_t(mode.value)
print status
def SetPointerMode(mode):
mode = c_cublasPointerMode_t(mode)
status = cublasSetPointerMode(handle, mode)
print status
def GetAtomicsMode():
mode = c_cublasAtomicsMode_t()
status = cublasGetAtomicsMode(handle, byref(mode))
print 'AtomicMode:', cublasAtomicsMode_t(mode.value)
print status
def SetAtomicsMode(mode):
mode = c_cublasAtomicsMode_t(mode)
status = cublasSetAtomicsMode(handle, mode)
print status
|
Test in a new file# -*- coding: utf-8 -*-
from pycublas import *
from ctypes import *
#Test functions
handle = cublasHandle_t()
def Init():
status = cublasCreate(byref(handle))
print status
def Close():
status = cublasDestroy(handle)
print status
def Version():
version = c_int()
status = cublasGetVersion(handle, byref(version))
print 'version:', version.value
print status
def GetPointerMode():
mode = c_cublasPointerMode_t()
status = cublasGetPointerMode(handle, byref(mode))
print 'PointerMode:', cublasPointerMode_t(mode.value)
print status
def SetPointerMode(mode):
mode = c_cublasPointerMode_t(mode)
status = cublasSetPointerMode(handle, mode)
print status
def GetAtomicsMode():
mode = c_cublasAtomicsMode_t()
status = cublasGetAtomicsMode(handle, byref(mode))
print 'AtomicMode:', cublasAtomicsMode_t(mode.value)
print status
def SetAtomicsMode(mode):
mode = c_cublasAtomicsMode_t(mode)
status = cublasSetAtomicsMode(handle, mode)
print status
|
<commit_before><commit_msg>Test in a new file<commit_after># -*- coding: utf-8 -*-
from pycublas import *
from ctypes import *
#Test functions
handle = cublasHandle_t()
def Init():
status = cublasCreate(byref(handle))
print status
def Close():
status = cublasDestroy(handle)
print status
def Version():
version = c_int()
status = cublasGetVersion(handle, byref(version))
print 'version:', version.value
print status
def GetPointerMode():
mode = c_cublasPointerMode_t()
status = cublasGetPointerMode(handle, byref(mode))
print 'PointerMode:', cublasPointerMode_t(mode.value)
print status
def SetPointerMode(mode):
mode = c_cublasPointerMode_t(mode)
status = cublasSetPointerMode(handle, mode)
print status
def GetAtomicsMode():
mode = c_cublasAtomicsMode_t()
status = cublasGetAtomicsMode(handle, byref(mode))
print 'AtomicMode:', cublasAtomicsMode_t(mode.value)
print status
def SetAtomicsMode(mode):
mode = c_cublasAtomicsMode_t(mode)
status = cublasSetAtomicsMode(handle, mode)
print status
|
|
5f3b36bb11858a56bb6154ea5c77eff64e351386
|
pombola/south_africa/management/commands/south_africa_export_constituency_offices.py
|
pombola/south_africa/management/commands/south_africa_export_constituency_offices.py
|
import csv
import sys
from django.core.management.base import BaseCommand, CommandError
from pombola.core.models import Organisation, OrganisationRelationship
MAPS_URL_TEMPLATE = 'https://www.google.com/maps/place/{lat}+{lon}/@{lat},{lon},17z'
def encode_row_values_to_utf8(row):
return {
k: unicode(v).encode('utf-8')
for k, v in row.items()
}
class Command(BaseCommand):
def handle(self, *args, **options):
fields = [
'OfficeID',
'OfficeName',
'MapURL',
'PhysicalAddress',
'Latitude',
'Longitude',
'PartyName',
'PartyID'
]
writer = csv.DictWriter(sys.stdout, fieldnames=fields)
writer.writeheader()
for o in Organisation.objects. \
filter(kind__slug='constituency-office'). \
order_by('slug'):
places = o.place_set.filter(
kind__slug='constituency-office',
name__startswith='Approximate position of ',
)
place_count = places.count()
if place_count > 1:
msg = "Multiple places ({place_count}) found for {organisation_id}"
raise CommandError(msg.format(
place_count=place_count,
organisation_id=o.id
))
row = {'OfficeID': o.id, 'OfficeName': o.name}
if place_count:
location = places[0].location
row['Latitude'] = location.y
row['Longitude'] = location.x
row['MapURL'] = MAPS_URL_TEMPLATE.format(
lat=location.y,
lon=location.x,
)
# Try to find the party whose office this is:
try:
party = OrganisationRelationship.objects.get(
organisation_b=o,
kind__name='has_office'
).organisation_a
row['PartyID'] = party.id
row['PartyName'] = party.name
except OrganisationRelationship.DoesNotExist:
message = "No party found for office with ID {office_id}\n"
self.stderr.write(message.format(office_id=o.id))
# Now try to find the address from the office
# organisation's contacts:
addresses = o.contacts.filter(kind__slug='address')
address_count = addresses.count()
if address_count > 1:
msg = "Multiple address found for {organisation_id}"
raise CommandError(msg.format(
address_count=address_count,
organisation_id=o.id
))
if address_count:
row['PhysicalAddress'] = addresses[0].value
writer.writerow(encode_row_values_to_utf8(row))
|
Add a command to output a CSV file of constituency offices
|
ZA: Add a command to output a CSV file of constituency offices
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,mysociety/pombola
|
ZA: Add a command to output a CSV file of constituency offices
|
import csv
import sys
from django.core.management.base import BaseCommand, CommandError
from pombola.core.models import Organisation, OrganisationRelationship
MAPS_URL_TEMPLATE = 'https://www.google.com/maps/place/{lat}+{lon}/@{lat},{lon},17z'
def encode_row_values_to_utf8(row):
return {
k: unicode(v).encode('utf-8')
for k, v in row.items()
}
class Command(BaseCommand):
def handle(self, *args, **options):
fields = [
'OfficeID',
'OfficeName',
'MapURL',
'PhysicalAddress',
'Latitude',
'Longitude',
'PartyName',
'PartyID'
]
writer = csv.DictWriter(sys.stdout, fieldnames=fields)
writer.writeheader()
for o in Organisation.objects. \
filter(kind__slug='constituency-office'). \
order_by('slug'):
places = o.place_set.filter(
kind__slug='constituency-office',
name__startswith='Approximate position of ',
)
place_count = places.count()
if place_count > 1:
msg = "Multiple places ({place_count}) found for {organisation_id}"
raise CommandError(msg.format(
place_count=place_count,
organisation_id=o.id
))
row = {'OfficeID': o.id, 'OfficeName': o.name}
if place_count:
location = places[0].location
row['Latitude'] = location.y
row['Longitude'] = location.x
row['MapURL'] = MAPS_URL_TEMPLATE.format(
lat=location.y,
lon=location.x,
)
# Try to find the party whose office this is:
try:
party = OrganisationRelationship.objects.get(
organisation_b=o,
kind__name='has_office'
).organisation_a
row['PartyID'] = party.id
row['PartyName'] = party.name
except OrganisationRelationship.DoesNotExist:
message = "No party found for office with ID {office_id}\n"
self.stderr.write(message.format(office_id=o.id))
# Now try to find the address from the office
# organisation's contacts:
addresses = o.contacts.filter(kind__slug='address')
address_count = addresses.count()
if address_count > 1:
msg = "Multiple address found for {organisation_id}"
raise CommandError(msg.format(
address_count=address_count,
organisation_id=o.id
))
if address_count:
row['PhysicalAddress'] = addresses[0].value
writer.writerow(encode_row_values_to_utf8(row))
|
<commit_before><commit_msg>ZA: Add a command to output a CSV file of constituency offices<commit_after>
|
import csv
import sys
from django.core.management.base import BaseCommand, CommandError
from pombola.core.models import Organisation, OrganisationRelationship
MAPS_URL_TEMPLATE = 'https://www.google.com/maps/place/{lat}+{lon}/@{lat},{lon},17z'
def encode_row_values_to_utf8(row):
return {
k: unicode(v).encode('utf-8')
for k, v in row.items()
}
class Command(BaseCommand):
def handle(self, *args, **options):
fields = [
'OfficeID',
'OfficeName',
'MapURL',
'PhysicalAddress',
'Latitude',
'Longitude',
'PartyName',
'PartyID'
]
writer = csv.DictWriter(sys.stdout, fieldnames=fields)
writer.writeheader()
for o in Organisation.objects. \
filter(kind__slug='constituency-office'). \
order_by('slug'):
places = o.place_set.filter(
kind__slug='constituency-office',
name__startswith='Approximate position of ',
)
place_count = places.count()
if place_count > 1:
msg = "Multiple places ({place_count}) found for {organisation_id}"
raise CommandError(msg.format(
place_count=place_count,
organisation_id=o.id
))
row = {'OfficeID': o.id, 'OfficeName': o.name}
if place_count:
location = places[0].location
row['Latitude'] = location.y
row['Longitude'] = location.x
row['MapURL'] = MAPS_URL_TEMPLATE.format(
lat=location.y,
lon=location.x,
)
# Try to find the party whose office this is:
try:
party = OrganisationRelationship.objects.get(
organisation_b=o,
kind__name='has_office'
).organisation_a
row['PartyID'] = party.id
row['PartyName'] = party.name
except OrganisationRelationship.DoesNotExist:
message = "No party found for office with ID {office_id}\n"
self.stderr.write(message.format(office_id=o.id))
# Now try to find the address from the office
# organisation's contacts:
addresses = o.contacts.filter(kind__slug='address')
address_count = addresses.count()
if address_count > 1:
msg = "Multiple address found for {organisation_id}"
raise CommandError(msg.format(
address_count=address_count,
organisation_id=o.id
))
if address_count:
row['PhysicalAddress'] = addresses[0].value
writer.writerow(encode_row_values_to_utf8(row))
|
ZA: Add a command to output a CSV file of constituency officesimport csv
import sys
from django.core.management.base import BaseCommand, CommandError
from pombola.core.models import Organisation, OrganisationRelationship
MAPS_URL_TEMPLATE = 'https://www.google.com/maps/place/{lat}+{lon}/@{lat},{lon},17z'
def encode_row_values_to_utf8(row):
return {
k: unicode(v).encode('utf-8')
for k, v in row.items()
}
class Command(BaseCommand):
def handle(self, *args, **options):
fields = [
'OfficeID',
'OfficeName',
'MapURL',
'PhysicalAddress',
'Latitude',
'Longitude',
'PartyName',
'PartyID'
]
writer = csv.DictWriter(sys.stdout, fieldnames=fields)
writer.writeheader()
for o in Organisation.objects. \
filter(kind__slug='constituency-office'). \
order_by('slug'):
places = o.place_set.filter(
kind__slug='constituency-office',
name__startswith='Approximate position of ',
)
place_count = places.count()
if place_count > 1:
msg = "Multiple places ({place_count}) found for {organisation_id}"
raise CommandError(msg.format(
place_count=place_count,
organisation_id=o.id
))
row = {'OfficeID': o.id, 'OfficeName': o.name}
if place_count:
location = places[0].location
row['Latitude'] = location.y
row['Longitude'] = location.x
row['MapURL'] = MAPS_URL_TEMPLATE.format(
lat=location.y,
lon=location.x,
)
# Try to find the party whose office this is:
try:
party = OrganisationRelationship.objects.get(
organisation_b=o,
kind__name='has_office'
).organisation_a
row['PartyID'] = party.id
row['PartyName'] = party.name
except OrganisationRelationship.DoesNotExist:
message = "No party found for office with ID {office_id}\n"
self.stderr.write(message.format(office_id=o.id))
# Now try to find the address from the office
# organisation's contacts:
addresses = o.contacts.filter(kind__slug='address')
address_count = addresses.count()
if address_count > 1:
msg = "Multiple address found for {organisation_id}"
raise CommandError(msg.format(
address_count=address_count,
organisation_id=o.id
))
if address_count:
row['PhysicalAddress'] = addresses[0].value
writer.writerow(encode_row_values_to_utf8(row))
|
<commit_before><commit_msg>ZA: Add a command to output a CSV file of constituency offices<commit_after>import csv
import sys
from django.core.management.base import BaseCommand, CommandError
from pombola.core.models import Organisation, OrganisationRelationship
MAPS_URL_TEMPLATE = 'https://www.google.com/maps/place/{lat}+{lon}/@{lat},{lon},17z'
def encode_row_values_to_utf8(row):
return {
k: unicode(v).encode('utf-8')
for k, v in row.items()
}
class Command(BaseCommand):
def handle(self, *args, **options):
fields = [
'OfficeID',
'OfficeName',
'MapURL',
'PhysicalAddress',
'Latitude',
'Longitude',
'PartyName',
'PartyID'
]
writer = csv.DictWriter(sys.stdout, fieldnames=fields)
writer.writeheader()
for o in Organisation.objects. \
filter(kind__slug='constituency-office'). \
order_by('slug'):
places = o.place_set.filter(
kind__slug='constituency-office',
name__startswith='Approximate position of ',
)
place_count = places.count()
if place_count > 1:
msg = "Multiple places ({place_count}) found for {organisation_id}"
raise CommandError(msg.format(
place_count=place_count,
organisation_id=o.id
))
row = {'OfficeID': o.id, 'OfficeName': o.name}
if place_count:
location = places[0].location
row['Latitude'] = location.y
row['Longitude'] = location.x
row['MapURL'] = MAPS_URL_TEMPLATE.format(
lat=location.y,
lon=location.x,
)
# Try to find the party whose office this is:
try:
party = OrganisationRelationship.objects.get(
organisation_b=o,
kind__name='has_office'
).organisation_a
row['PartyID'] = party.id
row['PartyName'] = party.name
except OrganisationRelationship.DoesNotExist:
message = "No party found for office with ID {office_id}\n"
self.stderr.write(message.format(office_id=o.id))
# Now try to find the address from the office
# organisation's contacts:
addresses = o.contacts.filter(kind__slug='address')
address_count = addresses.count()
if address_count > 1:
msg = "Multiple address found for {organisation_id}"
raise CommandError(msg.format(
address_count=address_count,
organisation_id=o.id
))
if address_count:
row['PhysicalAddress'] = addresses[0].value
writer.writerow(encode_row_values_to_utf8(row))
|
|
247f303c2d5e60108fbd3054f012e6b24859dd26
|
lintcode/Medium/029_Interleaving_String.py
|
lintcode/Medium/029_Interleaving_String.py
|
class Solution:
"""
@params s1, s2, s3: Three strings as description.
@return: return True if s3 is formed by the interleaving of
s1 and s2 or False if not.
@hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.
"""
def isInterleave(self, s1, s2, s3):
# write your code here
# Solution 1 (TLE)
# if (not s1):
# return s2 == s3
# if (not s2):
# return s1 == s3
# dp = [[c] for c in s1]
# for i in range(len(s2)):
# for j in range(len(s1)):
# if (j == 0):
# left = [s2[:i+1]]
# else:
# left = dp[j - 1]
# tmp = []
# for s in left:
# if ((s + s1[j]) not in tmp):
# tmp.append(s + s1[j])
# if (i == 0 and (s1[:j + 1] + s2[i]) not in tmp):
# tmp.append(s1[:j + 1] + s2[i])
# else:
# for s in dp[j]:
# if ((s + s2[i]) not in tmp):
# tmp.append(s + s2[i])
# dp[j] = tmp
# return s3 in dp[-1]
# Solution 2
if (not s1):
return s2 == s3
if (not s2):
return s1 == s3
if (len(s3) != (len(s1) + len(s2))):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
for i in range(1, len(s2) + 1):
if (s2[:i] == s3[:i]):
dp[0][i] = True
for i in range(1, len(s1) + 1):
if (s1[:i] == s3[:i]):
dp[i][0] = True
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
if (s1[i - 1] == s3[i + j - 1] and dp[i - 1][j]):
dp[i][j] = True
elif (s2[j - 1] == s3[i + j - 1] and dp[i][j - 1]):
dp[i][j] = True
return dp[-1][-1]
|
Add solution to lintcode question 29
|
Add solution to lintcode question 29
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 29
|
class Solution:
"""
@params s1, s2, s3: Three strings as description.
@return: return True if s3 is formed by the interleaving of
s1 and s2 or False if not.
@hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.
"""
def isInterleave(self, s1, s2, s3):
# write your code here
# Solution 1 (TLE)
# if (not s1):
# return s2 == s3
# if (not s2):
# return s1 == s3
# dp = [[c] for c in s1]
# for i in range(len(s2)):
# for j in range(len(s1)):
# if (j == 0):
# left = [s2[:i+1]]
# else:
# left = dp[j - 1]
# tmp = []
# for s in left:
# if ((s + s1[j]) not in tmp):
# tmp.append(s + s1[j])
# if (i == 0 and (s1[:j + 1] + s2[i]) not in tmp):
# tmp.append(s1[:j + 1] + s2[i])
# else:
# for s in dp[j]:
# if ((s + s2[i]) not in tmp):
# tmp.append(s + s2[i])
# dp[j] = tmp
# return s3 in dp[-1]
# Solution 2
if (not s1):
return s2 == s3
if (not s2):
return s1 == s3
if (len(s3) != (len(s1) + len(s2))):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
for i in range(1, len(s2) + 1):
if (s2[:i] == s3[:i]):
dp[0][i] = True
for i in range(1, len(s1) + 1):
if (s1[:i] == s3[:i]):
dp[i][0] = True
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
if (s1[i - 1] == s3[i + j - 1] and dp[i - 1][j]):
dp[i][j] = True
elif (s2[j - 1] == s3[i + j - 1] and dp[i][j - 1]):
dp[i][j] = True
return dp[-1][-1]
|
<commit_before><commit_msg>Add solution to lintcode question 29<commit_after>
|
class Solution:
"""
@params s1, s2, s3: Three strings as description.
@return: return True if s3 is formed by the interleaving of
s1 and s2 or False if not.
@hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.
"""
def isInterleave(self, s1, s2, s3):
# write your code here
# Solution 1 (TLE)
# if (not s1):
# return s2 == s3
# if (not s2):
# return s1 == s3
# dp = [[c] for c in s1]
# for i in range(len(s2)):
# for j in range(len(s1)):
# if (j == 0):
# left = [s2[:i+1]]
# else:
# left = dp[j - 1]
# tmp = []
# for s in left:
# if ((s + s1[j]) not in tmp):
# tmp.append(s + s1[j])
# if (i == 0 and (s1[:j + 1] + s2[i]) not in tmp):
# tmp.append(s1[:j + 1] + s2[i])
# else:
# for s in dp[j]:
# if ((s + s2[i]) not in tmp):
# tmp.append(s + s2[i])
# dp[j] = tmp
# return s3 in dp[-1]
# Solution 2
if (not s1):
return s2 == s3
if (not s2):
return s1 == s3
if (len(s3) != (len(s1) + len(s2))):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
for i in range(1, len(s2) + 1):
if (s2[:i] == s3[:i]):
dp[0][i] = True
for i in range(1, len(s1) + 1):
if (s1[:i] == s3[:i]):
dp[i][0] = True
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
if (s1[i - 1] == s3[i + j - 1] and dp[i - 1][j]):
dp[i][j] = True
elif (s2[j - 1] == s3[i + j - 1] and dp[i][j - 1]):
dp[i][j] = True
return dp[-1][-1]
|
Add solution to lintcode question 29class Solution:
"""
@params s1, s2, s3: Three strings as description.
@return: return True if s3 is formed by the interleaving of
s1 and s2 or False if not.
@hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.
"""
def isInterleave(self, s1, s2, s3):
# write your code here
# Solution 1 (TLE)
# if (not s1):
# return s2 == s3
# if (not s2):
# return s1 == s3
# dp = [[c] for c in s1]
# for i in range(len(s2)):
# for j in range(len(s1)):
# if (j == 0):
# left = [s2[:i+1]]
# else:
# left = dp[j - 1]
# tmp = []
# for s in left:
# if ((s + s1[j]) not in tmp):
# tmp.append(s + s1[j])
# if (i == 0 and (s1[:j + 1] + s2[i]) not in tmp):
# tmp.append(s1[:j + 1] + s2[i])
# else:
# for s in dp[j]:
# if ((s + s2[i]) not in tmp):
# tmp.append(s + s2[i])
# dp[j] = tmp
# return s3 in dp[-1]
# Solution 2
if (not s1):
return s2 == s3
if (not s2):
return s1 == s3
if (len(s3) != (len(s1) + len(s2))):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
for i in range(1, len(s2) + 1):
if (s2[:i] == s3[:i]):
dp[0][i] = True
for i in range(1, len(s1) + 1):
if (s1[:i] == s3[:i]):
dp[i][0] = True
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
if (s1[i - 1] == s3[i + j - 1] and dp[i - 1][j]):
dp[i][j] = True
elif (s2[j - 1] == s3[i + j - 1] and dp[i][j - 1]):
dp[i][j] = True
return dp[-1][-1]
|
<commit_before><commit_msg>Add solution to lintcode question 29<commit_after>class Solution:
"""
@params s1, s2, s3: Three strings as description.
@return: return True if s3 is formed by the interleaving of
s1 and s2 or False if not.
@hint: you can use [[True] * m for i in range (n)] to allocate a n*m matrix.
"""
def isInterleave(self, s1, s2, s3):
# write your code here
# Solution 1 (TLE)
# if (not s1):
# return s2 == s3
# if (not s2):
# return s1 == s3
# dp = [[c] for c in s1]
# for i in range(len(s2)):
# for j in range(len(s1)):
# if (j == 0):
# left = [s2[:i+1]]
# else:
# left = dp[j - 1]
# tmp = []
# for s in left:
# if ((s + s1[j]) not in tmp):
# tmp.append(s + s1[j])
# if (i == 0 and (s1[:j + 1] + s2[i]) not in tmp):
# tmp.append(s1[:j + 1] + s2[i])
# else:
# for s in dp[j]:
# if ((s + s2[i]) not in tmp):
# tmp.append(s + s2[i])
# dp[j] = tmp
# return s3 in dp[-1]
# Solution 2
if (not s1):
return s2 == s3
if (not s2):
return s1 == s3
if (len(s3) != (len(s1) + len(s2))):
return False
dp = [[False] * (len(s2) + 1) for i in range(len(s1) + 1)]
for i in range(1, len(s2) + 1):
if (s2[:i] == s3[:i]):
dp[0][i] = True
for i in range(1, len(s1) + 1):
if (s1[:i] == s3[:i]):
dp[i][0] = True
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
if (s1[i - 1] == s3[i + j - 1] and dp[i - 1][j]):
dp[i][j] = True
elif (s2[j - 1] == s3[i + j - 1] and dp[i][j - 1]):
dp[i][j] = True
return dp[-1][-1]
|
|
8a7befd69d4a7b224e037b4eb8111853125a1105
|
code_sample/python/sensor_avoid.py
|
code_sample/python/sensor_avoid.py
|
#!/usr/bin/env python
#coding: utf-8
import sys,time
j1,j2,j3,j5,j6 = 0,60,0,0,0
while True:
# $B%m%\%C%H$N3QEY$NFI$_9~$_<~4|$O(B20ms$B!J%"%P%&%H$G$9!K(B
time.sleep(0.05)
ch0 = 0
delta = 0
#$B%A%c%s%M%k(B0$B$N(BAD$B%3%s%P!<%?$NCM$rFI$_9~$`!J5wN%%;%s%5!K(B
with open("/run/shm/adconv_values","r") as sensor:
vs_str = sensor.readline().rstrip().split()
ch0 = int(vs_str[0])
#$B5wN%%;%s%5$NCM$,(B300$B0J>e$J$i@5$NJ}8~(B
#$B$=$&$G$J$1$l$PIi$NJ}8~$K2s$k(B
delta = 1 if ch0 > 300 else -1
#$B$b$&8~$-$,9T$-$9$.$F$$$?$i;_$a$F$*$/(B
if j1 < -90 and delta < 0: continue
if j1 > 90 and delta > 0: continue
j1 += delta
#$B%"!<%`$N3QEY$r;XDj(B
s = "%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6)
with open("/run/shm/angles","w") as arm:
arm.write(s)
print >> sys.stderr, s
|
Add a sensor feedback sample
|
Add a sensor feedback sample
|
Python
|
mit
|
ryuichiueda/RobotDesign3,ryuichiueda/RobotDesign3,ryuichiueda/RobotDesign3,ryuichiueda/RobotDesign3,ryuichiueda/RobotDesign3
|
Add a sensor feedback sample
|
#!/usr/bin/env python
#coding: utf-8
import sys,time
j1,j2,j3,j5,j6 = 0,60,0,0,0
while True:
# $B%m%\%C%H$N3QEY$NFI$_9~$_<~4|$O(B20ms$B!J%"%P%&%H$G$9!K(B
time.sleep(0.05)
ch0 = 0
delta = 0
#$B%A%c%s%M%k(B0$B$N(BAD$B%3%s%P!<%?$NCM$rFI$_9~$`!J5wN%%;%s%5!K(B
with open("/run/shm/adconv_values","r") as sensor:
vs_str = sensor.readline().rstrip().split()
ch0 = int(vs_str[0])
#$B5wN%%;%s%5$NCM$,(B300$B0J>e$J$i@5$NJ}8~(B
#$B$=$&$G$J$1$l$PIi$NJ}8~$K2s$k(B
delta = 1 if ch0 > 300 else -1
#$B$b$&8~$-$,9T$-$9$.$F$$$?$i;_$a$F$*$/(B
if j1 < -90 and delta < 0: continue
if j1 > 90 and delta > 0: continue
j1 += delta
#$B%"!<%`$N3QEY$r;XDj(B
s = "%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6)
with open("/run/shm/angles","w") as arm:
arm.write(s)
print >> sys.stderr, s
|
<commit_before><commit_msg>Add a sensor feedback sample<commit_after>
|
#!/usr/bin/env python
#coding: utf-8
import sys,time
j1,j2,j3,j5,j6 = 0,60,0,0,0
while True:
# $B%m%\%C%H$N3QEY$NFI$_9~$_<~4|$O(B20ms$B!J%"%P%&%H$G$9!K(B
time.sleep(0.05)
ch0 = 0
delta = 0
#$B%A%c%s%M%k(B0$B$N(BAD$B%3%s%P!<%?$NCM$rFI$_9~$`!J5wN%%;%s%5!K(B
with open("/run/shm/adconv_values","r") as sensor:
vs_str = sensor.readline().rstrip().split()
ch0 = int(vs_str[0])
#$B5wN%%;%s%5$NCM$,(B300$B0J>e$J$i@5$NJ}8~(B
#$B$=$&$G$J$1$l$PIi$NJ}8~$K2s$k(B
delta = 1 if ch0 > 300 else -1
#$B$b$&8~$-$,9T$-$9$.$F$$$?$i;_$a$F$*$/(B
if j1 < -90 and delta < 0: continue
if j1 > 90 and delta > 0: continue
j1 += delta
#$B%"!<%`$N3QEY$r;XDj(B
s = "%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6)
with open("/run/shm/angles","w") as arm:
arm.write(s)
print >> sys.stderr, s
|
Add a sensor feedback sample#!/usr/bin/env python
#coding: utf-8
import sys,time
j1,j2,j3,j5,j6 = 0,60,0,0,0
while True:
# $B%m%\%C%H$N3QEY$NFI$_9~$_<~4|$O(B20ms$B!J%"%P%&%H$G$9!K(B
time.sleep(0.05)
ch0 = 0
delta = 0
#$B%A%c%s%M%k(B0$B$N(BAD$B%3%s%P!<%?$NCM$rFI$_9~$`!J5wN%%;%s%5!K(B
with open("/run/shm/adconv_values","r") as sensor:
vs_str = sensor.readline().rstrip().split()
ch0 = int(vs_str[0])
#$B5wN%%;%s%5$NCM$,(B300$B0J>e$J$i@5$NJ}8~(B
#$B$=$&$G$J$1$l$PIi$NJ}8~$K2s$k(B
delta = 1 if ch0 > 300 else -1
#$B$b$&8~$-$,9T$-$9$.$F$$$?$i;_$a$F$*$/(B
if j1 < -90 and delta < 0: continue
if j1 > 90 and delta > 0: continue
j1 += delta
#$B%"!<%`$N3QEY$r;XDj(B
s = "%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6)
with open("/run/shm/angles","w") as arm:
arm.write(s)
print >> sys.stderr, s
|
<commit_before><commit_msg>Add a sensor feedback sample<commit_after>#!/usr/bin/env python
#coding: utf-8
import sys,time
j1,j2,j3,j5,j6 = 0,60,0,0,0
while True:
# $B%m%\%C%H$N3QEY$NFI$_9~$_<~4|$O(B20ms$B!J%"%P%&%H$G$9!K(B
time.sleep(0.05)
ch0 = 0
delta = 0
#$B%A%c%s%M%k(B0$B$N(BAD$B%3%s%P!<%?$NCM$rFI$_9~$`!J5wN%%;%s%5!K(B
with open("/run/shm/adconv_values","r") as sensor:
vs_str = sensor.readline().rstrip().split()
ch0 = int(vs_str[0])
#$B5wN%%;%s%5$NCM$,(B300$B0J>e$J$i@5$NJ}8~(B
#$B$=$&$G$J$1$l$PIi$NJ}8~$K2s$k(B
delta = 1 if ch0 > 300 else -1
#$B$b$&8~$-$,9T$-$9$.$F$$$?$i;_$a$F$*$/(B
if j1 < -90 and delta < 0: continue
if j1 > 90 and delta > 0: continue
j1 += delta
#$B%"!<%`$N3QEY$r;XDj(B
s = "%d,%d,%d,%d,%d\n" % (j1,j2,j3,j5,j6)
with open("/run/shm/angles","w") as arm:
arm.write(s)
print >> sys.stderr, s
|
|
c7e5a46348b48e081b122f1e0f8a578b2bfaf05a
|
open_humans/migrations/0008_rm_ghost_illuminauyg_table.py
|
open_humans/migrations/0008_rm_ghost_illuminauyg_table.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-26 00:19
from __future__ import unicode_literals
from django.db import migrations
DROP_UNUSED_TABLES = """\
DROP TABLE IF EXISTS illumina_uyg_userdata;
"""
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0007_blogpost'),
]
operations = [
migrations.RunSQL(DROP_UNUSED_TABLES),
]
|
Remove obsolete table from database
|
Remove obsolete table from database
|
Python
|
mit
|
OpenHumans/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans,PersonalGenomesOrg/open-humans,PersonalGenomesOrg/open-humans,PersonalGenomesOrg/open-humans,OpenHumans/open-humans
|
Remove obsolete table from database
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-26 00:19
from __future__ import unicode_literals
from django.db import migrations
DROP_UNUSED_TABLES = """\
DROP TABLE IF EXISTS illumina_uyg_userdata;
"""
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0007_blogpost'),
]
operations = [
migrations.RunSQL(DROP_UNUSED_TABLES),
]
|
<commit_before><commit_msg>Remove obsolete table from database<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-26 00:19
from __future__ import unicode_literals
from django.db import migrations
DROP_UNUSED_TABLES = """\
DROP TABLE IF EXISTS illumina_uyg_userdata;
"""
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0007_blogpost'),
]
operations = [
migrations.RunSQL(DROP_UNUSED_TABLES),
]
|
Remove obsolete table from database# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-26 00:19
from __future__ import unicode_literals
from django.db import migrations
DROP_UNUSED_TABLES = """\
DROP TABLE IF EXISTS illumina_uyg_userdata;
"""
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0007_blogpost'),
]
operations = [
migrations.RunSQL(DROP_UNUSED_TABLES),
]
|
<commit_before><commit_msg>Remove obsolete table from database<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-01-26 00:19
from __future__ import unicode_literals
from django.db import migrations
DROP_UNUSED_TABLES = """\
DROP TABLE IF EXISTS illumina_uyg_userdata;
"""
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0007_blogpost'),
]
operations = [
migrations.RunSQL(DROP_UNUSED_TABLES),
]
|
|
e080c174e4430684856d8d5877c75fd15d5e0f48
|
opps/core/management/commands/update_channel_denormalization.py
|
opps/core/management/commands/update_channel_denormalization.py
|
from django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
|
Add management command to denormalize channel and containers
|
Add management command to denormalize channel and containers
|
Python
|
mit
|
YACOWS/opps,jeanmask/opps,jeanmask/opps,jeanmask/opps,jeanmask/opps,YACOWS/opps,opps/opps,opps/opps,YACOWS/opps,opps/opps,opps/opps,YACOWS/opps,williamroot/opps,williamroot/opps,williamroot/opps,williamroot/opps
|
Add management command to denormalize channel and containers
|
from django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
|
<commit_before><commit_msg>Add management command to denormalize channel and containers<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
|
Add management command to denormalize channel and containersfrom django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
|
<commit_before><commit_msg>Add management command to denormalize channel and containers<commit_after>from django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
|
|
79ee7795135f9cba31aefdf9b752da24e98c6337
|
graphing/classification_method_score_meandiffbar.py
|
graphing/classification_method_score_meandiffbar.py
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
data = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
star_category = data[0,1:6]
diffmeans = data[1,1:6]
diffstd = data[2,1:6]
# or type in the data
N = 5
diffmeans = (8.2784, 15.6807, 0.9163, -40.3757, -18.6863)
diffstd = (10.5863, 10.0328, 9.9718, 35.6703, 28.8641)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
# add some
ax.set_xlabel('Star Classification Method')
ax.set_ylabel('Difference in Mean GHI Scores Compared to the Default')
ax.set_title('Difference in Mean GHI Scores between 5 classification methods and the Default')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Geo', 'Geo-up', 'Geo-down', 'Infra-auto', 'Infra-selective') )
plt.show()
-------------------------
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
diffdata = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
# plot data
def plot_bar_with_error(results):
fig = plt.figure()
ax = fig.add_subplot(111)
# extract each column into a variable
diffmeans = results[1,1:6]
diffstd = results[2,1:6]
ind = np.arange(5)
width = 0.35
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
plt.xlabel('Star Classification Method')
plt.legend( (p1[0], p1[1], p1[2], p1[3], p1[4]), label)
plt.xticks(ind+width/2., ('Default','GEO','GEO-Up','GEO-Down','INFRA-Auto','INFRA-Selective') )
plot_bar_with_error(diffdata)
plt.ylabel('Difference in Mean GHI Scores Compared to the Default')
plt.title('Difference in Mean GHI Scores between 5 Classification Methods and the Default')
plt.show()
|
Create bar charts with error bars for foj sensitivity scores.
|
Create bar charts with error bars for foj sensitivity scores.
|
Python
|
mit
|
Nodoka/Bioquality,Nodoka/Bioquality
|
Create bar charts with error bars for foj sensitivity scores.
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
data = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
star_category = data[0,1:6]
diffmeans = data[1,1:6]
diffstd = data[2,1:6]
# or type in the data
N = 5
diffmeans = (8.2784, 15.6807, 0.9163, -40.3757, -18.6863)
diffstd = (10.5863, 10.0328, 9.9718, 35.6703, 28.8641)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
# add some
ax.set_xlabel('Star Classification Method')
ax.set_ylabel('Difference in Mean GHI Scores Compared to the Default')
ax.set_title('Difference in Mean GHI Scores between 5 classification methods and the Default')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Geo', 'Geo-up', 'Geo-down', 'Infra-auto', 'Infra-selective') )
plt.show()
-------------------------
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
diffdata = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
# plot data
def plot_bar_with_error(results):
fig = plt.figure()
ax = fig.add_subplot(111)
# extract each column into a variable
diffmeans = results[1,1:6]
diffstd = results[2,1:6]
ind = np.arange(5)
width = 0.35
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
plt.xlabel('Star Classification Method')
plt.legend( (p1[0], p1[1], p1[2], p1[3], p1[4]), label)
plt.xticks(ind+width/2., ('Default','GEO','GEO-Up','GEO-Down','INFRA-Auto','INFRA-Selective') )
plot_bar_with_error(diffdata)
plt.ylabel('Difference in Mean GHI Scores Compared to the Default')
plt.title('Difference in Mean GHI Scores between 5 Classification Methods and the Default')
plt.show()
|
<commit_before><commit_msg>Create bar charts with error bars for foj sensitivity scores.<commit_after>
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
data = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
star_category = data[0,1:6]
diffmeans = data[1,1:6]
diffstd = data[2,1:6]
# or type in the data
N = 5
diffmeans = (8.2784, 15.6807, 0.9163, -40.3757, -18.6863)
diffstd = (10.5863, 10.0328, 9.9718, 35.6703, 28.8641)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
# add some
ax.set_xlabel('Star Classification Method')
ax.set_ylabel('Difference in Mean GHI Scores Compared to the Default')
ax.set_title('Difference in Mean GHI Scores between 5 classification methods and the Default')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Geo', 'Geo-up', 'Geo-down', 'Infra-auto', 'Infra-selective') )
plt.show()
-------------------------
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
diffdata = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
# plot data
def plot_bar_with_error(results):
fig = plt.figure()
ax = fig.add_subplot(111)
# extract each column into a variable
diffmeans = results[1,1:6]
diffstd = results[2,1:6]
ind = np.arange(5)
width = 0.35
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
plt.xlabel('Star Classification Method')
plt.legend( (p1[0], p1[1], p1[2], p1[3], p1[4]), label)
plt.xticks(ind+width/2., ('Default','GEO','GEO-Up','GEO-Down','INFRA-Auto','INFRA-Selective') )
plot_bar_with_error(diffdata)
plt.ylabel('Difference in Mean GHI Scores Compared to the Default')
plt.title('Difference in Mean GHI Scores between 5 Classification Methods and the Default')
plt.show()
|
Create bar charts with error bars for foj sensitivity scores.#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
data = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
star_category = data[0,1:6]
diffmeans = data[1,1:6]
diffstd = data[2,1:6]
# or type in the data
N = 5
diffmeans = (8.2784, 15.6807, 0.9163, -40.3757, -18.6863)
diffstd = (10.5863, 10.0328, 9.9718, 35.6703, 28.8641)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
# add some
ax.set_xlabel('Star Classification Method')
ax.set_ylabel('Difference in Mean GHI Scores Compared to the Default')
ax.set_title('Difference in Mean GHI Scores between 5 classification methods and the Default')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Geo', 'Geo-up', 'Geo-down', 'Infra-auto', 'Infra-selective') )
plt.show()
-------------------------
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
diffdata = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
# plot data
def plot_bar_with_error(results):
fig = plt.figure()
ax = fig.add_subplot(111)
# extract each column into a variable
diffmeans = results[1,1:6]
diffstd = results[2,1:6]
ind = np.arange(5)
width = 0.35
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
plt.xlabel('Star Classification Method')
plt.legend( (p1[0], p1[1], p1[2], p1[3], p1[4]), label)
plt.xticks(ind+width/2., ('Default','GEO','GEO-Up','GEO-Down','INFRA-Auto','INFRA-Selective') )
plot_bar_with_error(diffdata)
plt.ylabel('Difference in Mean GHI Scores Compared to the Default')
plt.title('Difference in Mean GHI Scores between 5 Classification Methods and the Default')
plt.show()
|
<commit_before><commit_msg>Create bar charts with error bars for foj sensitivity scores.<commit_after>#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
data = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
star_category = data[0,1:6]
diffmeans = data[1,1:6]
diffstd = data[2,1:6]
# or type in the data
N = 5
diffmeans = (8.2784, 15.6807, 0.9163, -40.3757, -18.6863)
diffstd = (10.5863, 10.0328, 9.9718, 35.6703, 28.8641)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
# add some
ax.set_xlabel('Star Classification Method')
ax.set_ylabel('Difference in Mean GHI Scores Compared to the Default')
ax.set_title('Difference in Mean GHI Scores between 5 classification methods and the Default')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('Geo', 'Geo-up', 'Geo-down', 'Infra-auto', 'Infra-selective') )
plt.show()
-------------------------
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
# extract column of the dataset
diffdata = np.genfromtxt('../data/sens_scores_difftable.csv',delimiter=',',dtype=None)
# plot data
def plot_bar_with_error(results):
fig = plt.figure()
ax = fig.add_subplot(111)
# extract each column into a variable
diffmeans = results[1,1:6]
diffstd = results[2,1:6]
ind = np.arange(5)
width = 0.35
p1 = ax.bar(ind, diffmeans, width, color='r', yerr=diffstd)
plt.xlabel('Star Classification Method')
plt.legend( (p1[0], p1[1], p1[2], p1[3], p1[4]), label)
plt.xticks(ind+width/2., ('Default','GEO','GEO-Up','GEO-Down','INFRA-Auto','INFRA-Selective') )
plot_bar_with_error(diffdata)
plt.ylabel('Difference in Mean GHI Scores Compared to the Default')
plt.title('Difference in Mean GHI Scores between 5 Classification Methods and the Default')
plt.show()
|
|
123a58bf7d09c804eb502220278ef9e1346538cd
|
netbox/extras/tests/test_models.py
|
netbox/extras/tests/test_models.py
|
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from dcim.models import Site
from extras.choices import ExportTemplateLanguageChoices
from extras.models import Graph
class GraphTest(TestCase):
def setUp(self):
self.site = Site(name='Site 1', slug='site-1')
def test_graph_render_django(self):
# Using the pluralize filter as a sanity check (it's only available in Django)
TEMPLATE_TEXT = "{{ obj.name|lower }} thing{{ 2|pluralize }}"
RENDERED_TEXT = "site 1 things"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_DJANGO,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
def test_graph_render_jinja2(self):
TEMPLATE_TEXT = "{{ [obj.name, obj.slug]|join(',') }}"
RENDERED_TEXT = "Site 1,site-1"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_JINJA2,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
|
Add tests for Graph rendering
|
Add tests for Graph rendering
|
Python
|
apache-2.0
|
digitalocean/netbox,digitalocean/netbox,digitalocean/netbox,digitalocean/netbox
|
Add tests for Graph rendering
|
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from dcim.models import Site
from extras.choices import ExportTemplateLanguageChoices
from extras.models import Graph
class GraphTest(TestCase):
def setUp(self):
self.site = Site(name='Site 1', slug='site-1')
def test_graph_render_django(self):
# Using the pluralize filter as a sanity check (it's only available in Django)
TEMPLATE_TEXT = "{{ obj.name|lower }} thing{{ 2|pluralize }}"
RENDERED_TEXT = "site 1 things"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_DJANGO,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
def test_graph_render_jinja2(self):
TEMPLATE_TEXT = "{{ [obj.name, obj.slug]|join(',') }}"
RENDERED_TEXT = "Site 1,site-1"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_JINJA2,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
|
<commit_before><commit_msg>Add tests for Graph rendering<commit_after>
|
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from dcim.models import Site
from extras.choices import ExportTemplateLanguageChoices
from extras.models import Graph
class GraphTest(TestCase):
def setUp(self):
self.site = Site(name='Site 1', slug='site-1')
def test_graph_render_django(self):
# Using the pluralize filter as a sanity check (it's only available in Django)
TEMPLATE_TEXT = "{{ obj.name|lower }} thing{{ 2|pluralize }}"
RENDERED_TEXT = "site 1 things"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_DJANGO,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
def test_graph_render_jinja2(self):
TEMPLATE_TEXT = "{{ [obj.name, obj.slug]|join(',') }}"
RENDERED_TEXT = "Site 1,site-1"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_JINJA2,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
|
Add tests for Graph renderingfrom django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from dcim.models import Site
from extras.choices import ExportTemplateLanguageChoices
from extras.models import Graph
class GraphTest(TestCase):
def setUp(self):
self.site = Site(name='Site 1', slug='site-1')
def test_graph_render_django(self):
# Using the pluralize filter as a sanity check (it's only available in Django)
TEMPLATE_TEXT = "{{ obj.name|lower }} thing{{ 2|pluralize }}"
RENDERED_TEXT = "site 1 things"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_DJANGO,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
def test_graph_render_jinja2(self):
TEMPLATE_TEXT = "{{ [obj.name, obj.slug]|join(',') }}"
RENDERED_TEXT = "Site 1,site-1"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_JINJA2,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
|
<commit_before><commit_msg>Add tests for Graph rendering<commit_after>from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from dcim.models import Site
from extras.choices import ExportTemplateLanguageChoices
from extras.models import Graph
class GraphTest(TestCase):
def setUp(self):
self.site = Site(name='Site 1', slug='site-1')
def test_graph_render_django(self):
# Using the pluralize filter as a sanity check (it's only available in Django)
TEMPLATE_TEXT = "{{ obj.name|lower }} thing{{ 2|pluralize }}"
RENDERED_TEXT = "site 1 things"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_DJANGO,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
def test_graph_render_jinja2(self):
TEMPLATE_TEXT = "{{ [obj.name, obj.slug]|join(',') }}"
RENDERED_TEXT = "Site 1,site-1"
graph = Graph(
type=ContentType.objects.get(app_label='dcim', model='site'),
name='Graph 1',
template_language=ExportTemplateLanguageChoices.LANGUAGE_JINJA2,
source=TEMPLATE_TEXT,
link=TEMPLATE_TEXT
)
self.assertEqual(graph.embed_url(self.site), RENDERED_TEXT)
self.assertEqual(graph.embed_link(self.site), RENDERED_TEXT)
|
|
91503c4cd8afcfdf483bf7dbd3090e2b57e7ae5c
|
predictions2visualization-pairs.py
|
predictions2visualization-pairs.py
|
"""Generate visualization pairs (emotion label - concept type) for predicted
labels.
Input: directory containing text files with predicted heem labels.
Generates a text file containing
<text_id>\t<sentence id>\t<emotion label>\t<concept type label>
for each file in the input dir.
Usage: python predictions2visualization.py <dir in> <dir out>
"""
import argparse
import glob
import os
import codecs
from count_labels import load_data
from emotools.heem_utils import heem_emotion_labels, heem_concept_type_labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing files with '
'predictions.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
text_files = glob.glob(os.path.join(input_dir, '*.txt'))
for i, text_file in enumerate(text_files):
print '({} of {}) {}'.format((i+1), len(text_files), text_file)
text_id = text_file[-17:-4]
X_data, Y_data = load_data(text_file)
out_file = os.path.join(output_dir, os.path.basename(text_file))
with codecs.open(out_file, 'wb', 'utf-8') as f:
for j, predicted in enumerate(Y_data):
lbs = (set(predicted.split('_')) - {'None'})
emotion_labels = [l for l in lbs if l in heem_emotion_labels]
ct_labels = [l for l in lbs if l in heem_concept_type_labels]
if emotion_labels and ct_labels:
for e in emotion_labels:
for ct in ct_labels:
sent_id = X_data[j].split('\t')[0]
f.write('{}\t{}\t{}\t{}\n'.format(text_id, sent_id,
e, ct))
|
Add script to generate visualization pairs for predicted labels
|
Add script to generate visualization pairs for predicted labels
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to generate visualization pairs for predicted labels
|
"""Generate visualization pairs (emotion label - concept type) for predicted
labels.
Input: directory containing text files with predicted heem labels.
Generates a text file containing
<text_id>\t<sentence id>\t<emotion label>\t<concept type label>
for each file in the input dir.
Usage: python predictions2visualization.py <dir in> <dir out>
"""
import argparse
import glob
import os
import codecs
from count_labels import load_data
from emotools.heem_utils import heem_emotion_labels, heem_concept_type_labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing files with '
'predictions.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
text_files = glob.glob(os.path.join(input_dir, '*.txt'))
for i, text_file in enumerate(text_files):
print '({} of {}) {}'.format((i+1), len(text_files), text_file)
text_id = text_file[-17:-4]
X_data, Y_data = load_data(text_file)
out_file = os.path.join(output_dir, os.path.basename(text_file))
with codecs.open(out_file, 'wb', 'utf-8') as f:
for j, predicted in enumerate(Y_data):
lbs = (set(predicted.split('_')) - {'None'})
emotion_labels = [l for l in lbs if l in heem_emotion_labels]
ct_labels = [l for l in lbs if l in heem_concept_type_labels]
if emotion_labels and ct_labels:
for e in emotion_labels:
for ct in ct_labels:
sent_id = X_data[j].split('\t')[0]
f.write('{}\t{}\t{}\t{}\n'.format(text_id, sent_id,
e, ct))
|
<commit_before><commit_msg>Add script to generate visualization pairs for predicted labels<commit_after>
|
"""Generate visualization pairs (emotion label - concept type) for predicted
labels.
Input: directory containing text files with predicted heem labels.
Generates a text file containing
<text_id>\t<sentence id>\t<emotion label>\t<concept type label>
for each file in the input dir.
Usage: python predictions2visualization.py <dir in> <dir out>
"""
import argparse
import glob
import os
import codecs
from count_labels import load_data
from emotools.heem_utils import heem_emotion_labels, heem_concept_type_labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing files with '
'predictions.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
text_files = glob.glob(os.path.join(input_dir, '*.txt'))
for i, text_file in enumerate(text_files):
print '({} of {}) {}'.format((i+1), len(text_files), text_file)
text_id = text_file[-17:-4]
X_data, Y_data = load_data(text_file)
out_file = os.path.join(output_dir, os.path.basename(text_file))
with codecs.open(out_file, 'wb', 'utf-8') as f:
for j, predicted in enumerate(Y_data):
lbs = (set(predicted.split('_')) - {'None'})
emotion_labels = [l for l in lbs if l in heem_emotion_labels]
ct_labels = [l for l in lbs if l in heem_concept_type_labels]
if emotion_labels and ct_labels:
for e in emotion_labels:
for ct in ct_labels:
sent_id = X_data[j].split('\t')[0]
f.write('{}\t{}\t{}\t{}\n'.format(text_id, sent_id,
e, ct))
|
Add script to generate visualization pairs for predicted labels"""Generate visualization pairs (emotion label - concept type) for predicted
labels.
Input: directory containing text files with predicted heem labels.
Generates a text file containing
<text_id>\t<sentence id>\t<emotion label>\t<concept type label>
for each file in the input dir.
Usage: python predictions2visualization.py <dir in> <dir out>
"""
import argparse
import glob
import os
import codecs
from count_labels import load_data
from emotools.heem_utils import heem_emotion_labels, heem_concept_type_labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing files with '
'predictions.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
text_files = glob.glob(os.path.join(input_dir, '*.txt'))
for i, text_file in enumerate(text_files):
print '({} of {}) {}'.format((i+1), len(text_files), text_file)
text_id = text_file[-17:-4]
X_data, Y_data = load_data(text_file)
out_file = os.path.join(output_dir, os.path.basename(text_file))
with codecs.open(out_file, 'wb', 'utf-8') as f:
for j, predicted in enumerate(Y_data):
lbs = (set(predicted.split('_')) - {'None'})
emotion_labels = [l for l in lbs if l in heem_emotion_labels]
ct_labels = [l for l in lbs if l in heem_concept_type_labels]
if emotion_labels and ct_labels:
for e in emotion_labels:
for ct in ct_labels:
sent_id = X_data[j].split('\t')[0]
f.write('{}\t{}\t{}\t{}\n'.format(text_id, sent_id,
e, ct))
|
<commit_before><commit_msg>Add script to generate visualization pairs for predicted labels<commit_after>"""Generate visualization pairs (emotion label - concept type) for predicted
labels.
Input: directory containing text files with predicted heem labels.
Generates a text file containing
<text_id>\t<sentence id>\t<emotion label>\t<concept type label>
for each file in the input dir.
Usage: python predictions2visualization.py <dir in> <dir out>
"""
import argparse
import glob
import os
import codecs
from count_labels import load_data
from emotools.heem_utils import heem_emotion_labels, heem_concept_type_labels
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='directory containing files with '
'predictions.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
text_files = glob.glob(os.path.join(input_dir, '*.txt'))
for i, text_file in enumerate(text_files):
print '({} of {}) {}'.format((i+1), len(text_files), text_file)
text_id = text_file[-17:-4]
X_data, Y_data = load_data(text_file)
out_file = os.path.join(output_dir, os.path.basename(text_file))
with codecs.open(out_file, 'wb', 'utf-8') as f:
for j, predicted in enumerate(Y_data):
lbs = (set(predicted.split('_')) - {'None'})
emotion_labels = [l for l in lbs if l in heem_emotion_labels]
ct_labels = [l for l in lbs if l in heem_concept_type_labels]
if emotion_labels and ct_labels:
for e in emotion_labels:
for ct in ct_labels:
sent_id = X_data[j].split('\t')[0]
f.write('{}\t{}\t{}\t{}\n'.format(text_id, sent_id,
e, ct))
|
|
ea8dabdceec103fc9aa7f3da665a08340a42f931
|
src/util/batstack.py
|
src/util/batstack.py
|
#!/usr/bin/env python
"""
A collection of handy functions for the BatStack project.
Largely unsorted, but each function should have a decent docstring.
Scott Livingston <slivingston@caltech.edu>
Sep 2010.
"""
import sys
def read_chanmap( fname ):
"""Read plaintext channel map file.
Supports either manual or automatic addressing. This is set by
considering the number of elements in the first row of the file. If
there are 5, then we assume manual and use the first element of every
row as the address. If there are 4, then we assume automatic and begin
with Stack of first row having address 1, second row 2, and so on.
Note that, in manual mode, we assume addresses are given in
hexadecimal format; e.g., 0x0a is read as 10 (base 10). The '0x'
prefix is optional.
Returns a dictionary with keys corresponding to Stack addresses and
values being lists (of length 4, always) where index in list
corresponds to local channel number and actual value is global
(i.e. system-wide for your Array implementation) channel.
On error, an empty dictionary is returned.
"""
try:
f = open(fname, 'r')
except:
print 'Error: could not open %s for reading.' % fname
return {}
line = f.readline()
tok = line.split()
if len(tok) == 5:
man_addr_flag = True
elif len(tok) == 4:
man_addr_flag = False
current_addr = 1
else:
print 'Error: file looks ill-formed.'
return {}
di = {}
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
for line in f:
tok = line.split()
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
f.close()
return di
# For use at the command-line,
if __name__ == "__main__":
di = read_chanmap( sys.argv[1] )
print di
|
Create (beginnings of a) Python module for misc BatStack related functions.
|
Create (beginnings of a) Python module for misc BatStack related functions.
First method, read_chanmap, for reading plaintext files that specify
channel mappings, i.e. how local numbering maps to the global of an
entire Array system.
My planned style for batstack.py is to make it available primarily for
import into other more specialized or extensive programs. Use at the
command-line is currently only for quick testing/experimenting.
|
Python
|
bsd-3-clause
|
slivingston/BatStack,slivingston/BatStack,slivingston/BatStack,slivingston/BatStack,slivingston/BatStack
|
Create (beginnings of a) Python module for misc BatStack related functions.
First method, read_chanmap, for reading plaintext files that specify
channel mappings, i.e. how local numbering maps to the global of an
entire Array system.
My planned style for batstack.py is to make it available primarily for
import into other more specialized or extensive programs. Use at the
command-line is currently only for quick testing/experimenting.
|
#!/usr/bin/env python
"""
A collection of handy functions for the BatStack project.
Largely unsorted, but each function should have a decent docstring.
Scott Livingston <slivingston@caltech.edu>
Sep 2010.
"""
import sys
def read_chanmap( fname ):
"""Read plaintext channel map file.
Supports either manual or automatic addressing. This is set by
considering the number of elements in the first row of the file. If
there are 5, then we assume manual and use the first element of every
row as the address. If there are 4, then we assume automatic and begin
with Stack of first row having address 1, second row 2, and so on.
Note that, in manual mode, we assume addresses are given in
hexadecimal format; e.g., 0x0a is read as 10 (base 10). The '0x'
prefix is optional.
Returns a dictionary with keys corresponding to Stack addresses and
values being lists (of length 4, always) where index in list
corresponds to local channel number and actual value is global
(i.e. system-wide for your Array implementation) channel.
On error, an empty dictionary is returned.
"""
try:
f = open(fname, 'r')
except:
print 'Error: could not open %s for reading.' % fname
return {}
line = f.readline()
tok = line.split()
if len(tok) == 5:
man_addr_flag = True
elif len(tok) == 4:
man_addr_flag = False
current_addr = 1
else:
print 'Error: file looks ill-formed.'
return {}
di = {}
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
for line in f:
tok = line.split()
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
f.close()
return di
# For use at the command-line,
if __name__ == "__main__":
di = read_chanmap( sys.argv[1] )
print di
|
<commit_before><commit_msg>Create (beginnings of a) Python module for misc BatStack related functions.
First method, read_chanmap, for reading plaintext files that specify
channel mappings, i.e. how local numbering maps to the global of an
entire Array system.
My planned style for batstack.py is to make it available primarily for
import into other more specialized or extensive programs. Use at the
command-line is currently only for quick testing/experimenting.<commit_after>
|
#!/usr/bin/env python
"""
A collection of handy functions for the BatStack project.
Largely unsorted, but each function should have a decent docstring.
Scott Livingston <slivingston@caltech.edu>
Sep 2010.
"""
import sys
def read_chanmap( fname ):
"""Read plaintext channel map file.
Supports either manual or automatic addressing. This is set by
considering the number of elements in the first row of the file. If
there are 5, then we assume manual and use the first element of every
row as the address. If there are 4, then we assume automatic and begin
with Stack of first row having address 1, second row 2, and so on.
Note that, in manual mode, we assume addresses are given in
hexadecimal format; e.g., 0x0a is read as 10 (base 10). The '0x'
prefix is optional.
Returns a dictionary with keys corresponding to Stack addresses and
values being lists (of length 4, always) where index in list
corresponds to local channel number and actual value is global
(i.e. system-wide for your Array implementation) channel.
On error, an empty dictionary is returned.
"""
try:
f = open(fname, 'r')
except:
print 'Error: could not open %s for reading.' % fname
return {}
line = f.readline()
tok = line.split()
if len(tok) == 5:
man_addr_flag = True
elif len(tok) == 4:
man_addr_flag = False
current_addr = 1
else:
print 'Error: file looks ill-formed.'
return {}
di = {}
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
for line in f:
tok = line.split()
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
f.close()
return di
# For use at the command-line,
if __name__ == "__main__":
di = read_chanmap( sys.argv[1] )
print di
|
Create (beginnings of a) Python module for misc BatStack related functions.
First method, read_chanmap, for reading plaintext files that specify
channel mappings, i.e. how local numbering maps to the global of an
entire Array system.
My planned style for batstack.py is to make it available primarily for
import into other more specialized or extensive programs. Use at the
command-line is currently only for quick testing/experimenting.#!/usr/bin/env python
"""
A collection of handy functions for the BatStack project.
Largely unsorted, but each function should have a decent docstring.
Scott Livingston <slivingston@caltech.edu>
Sep 2010.
"""
import sys
def read_chanmap( fname ):
"""Read plaintext channel map file.
Supports either manual or automatic addressing. This is set by
considering the number of elements in the first row of the file. If
there are 5, then we assume manual and use the first element of every
row as the address. If there are 4, then we assume automatic and begin
with Stack of first row having address 1, second row 2, and so on.
Note that, in manual mode, we assume addresses are given in
hexadecimal format; e.g., 0x0a is read as 10 (base 10). The '0x'
prefix is optional.
Returns a dictionary with keys corresponding to Stack addresses and
values being lists (of length 4, always) where index in list
corresponds to local channel number and actual value is global
(i.e. system-wide for your Array implementation) channel.
On error, an empty dictionary is returned.
"""
try:
f = open(fname, 'r')
except:
print 'Error: could not open %s for reading.' % fname
return {}
line = f.readline()
tok = line.split()
if len(tok) == 5:
man_addr_flag = True
elif len(tok) == 4:
man_addr_flag = False
current_addr = 1
else:
print 'Error: file looks ill-formed.'
return {}
di = {}
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
for line in f:
tok = line.split()
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
f.close()
return di
# For use at the command-line,
if __name__ == "__main__":
di = read_chanmap( sys.argv[1] )
print di
|
<commit_before><commit_msg>Create (beginnings of a) Python module for misc BatStack related functions.
First method, read_chanmap, for reading plaintext files that specify
channel mappings, i.e. how local numbering maps to the global of an
entire Array system.
My planned style for batstack.py is to make it available primarily for
import into other more specialized or extensive programs. Use at the
command-line is currently only for quick testing/experimenting.<commit_after>#!/usr/bin/env python
"""
A collection of handy functions for the BatStack project.
Largely unsorted, but each function should have a decent docstring.
Scott Livingston <slivingston@caltech.edu>
Sep 2010.
"""
import sys
def read_chanmap( fname ):
"""Read plaintext channel map file.
Supports either manual or automatic addressing. This is set by
considering the number of elements in the first row of the file. If
there are 5, then we assume manual and use the first element of every
row as the address. If there are 4, then we assume automatic and begin
with Stack of first row having address 1, second row 2, and so on.
Note that, in manual mode, we assume addresses are given in
hexadecimal format; e.g., 0x0a is read as 10 (base 10). The '0x'
prefix is optional.
Returns a dictionary with keys corresponding to Stack addresses and
values being lists (of length 4, always) where index in list
corresponds to local channel number and actual value is global
(i.e. system-wide for your Array implementation) channel.
On error, an empty dictionary is returned.
"""
try:
f = open(fname, 'r')
except:
print 'Error: could not open %s for reading.' % fname
return {}
line = f.readline()
tok = line.split()
if len(tok) == 5:
man_addr_flag = True
elif len(tok) == 4:
man_addr_flag = False
current_addr = 1
else:
print 'Error: file looks ill-formed.'
return {}
di = {}
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
for line in f:
tok = line.split()
if man_addr_flag:
di[int(tok[0], 16)] = [int(x) for x in tok[1:]]
else:
di[current_addr] = [int(x) for x in tok]
current_addr += 1
f.close()
return di
# For use at the command-line,
if __name__ == "__main__":
di = read_chanmap( sys.argv[1] )
print di
|
|
7d9955c85bf48e57f1ed0a0e77a89df8bf95e6aa
|
froide/document/migrations/0025_auto_20210505_1720.py
|
froide/document/migrations/0025_auto_20210505_1720.py
|
# Generated by Django 3.1.8 on 2021-05-05 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20210323_1404'),
]
operations = [
migrations.AddField(
model_name='document',
name='data',
field=models.JSONField(blank=True, default=dict),
),
migrations.AddField(
model_name='documentcollection',
name='settings',
field=models.JSONField(blank=True, default=dict),
),
]
|
Add data/settings fields to document/collection
|
Add data/settings fields to document/collection
|
Python
|
mit
|
fin/froide,fin/froide,fin/froide,fin/froide
|
Add data/settings fields to document/collection
|
# Generated by Django 3.1.8 on 2021-05-05 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20210323_1404'),
]
operations = [
migrations.AddField(
model_name='document',
name='data',
field=models.JSONField(blank=True, default=dict),
),
migrations.AddField(
model_name='documentcollection',
name='settings',
field=models.JSONField(blank=True, default=dict),
),
]
|
<commit_before><commit_msg>Add data/settings fields to document/collection<commit_after>
|
# Generated by Django 3.1.8 on 2021-05-05 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20210323_1404'),
]
operations = [
migrations.AddField(
model_name='document',
name='data',
field=models.JSONField(blank=True, default=dict),
),
migrations.AddField(
model_name='documentcollection',
name='settings',
field=models.JSONField(blank=True, default=dict),
),
]
|
Add data/settings fields to document/collection# Generated by Django 3.1.8 on 2021-05-05 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20210323_1404'),
]
operations = [
migrations.AddField(
model_name='document',
name='data',
field=models.JSONField(blank=True, default=dict),
),
migrations.AddField(
model_name='documentcollection',
name='settings',
field=models.JSONField(blank=True, default=dict),
),
]
|
<commit_before><commit_msg>Add data/settings fields to document/collection<commit_after># Generated by Django 3.1.8 on 2021-05-05 15:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20210323_1404'),
]
operations = [
migrations.AddField(
model_name='document',
name='data',
field=models.JSONField(blank=True, default=dict),
),
migrations.AddField(
model_name='documentcollection',
name='settings',
field=models.JSONField(blank=True, default=dict),
),
]
|
|
0e2317d1b2af7b88f626cefa7a1e187c5823eef7
|
kolibri/logger/migrations/0004_tidy_progress_range.py
|
kolibri/logger/migrations/0004_tidy_progress_range.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model('logger', 'ContentSessionLog')
ContentSummaryLog = apps.get_model('logger', 'ContentSummaryLog')
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSessionLog.objects.filter(progress__gt=1).update(progress=1.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
class Migration(migrations.Migration):
dependencies = [
('logger', '0003_auto_20170531_1140'),
]
operations = [
migrations.RunPython(tidy_progress_range),
]
|
Add migration for tidying up old values in progress fields
|
Add migration for tidying up old values in progress fields
|
Python
|
mit
|
DXCanas/kolibri,jonboiser/kolibri,lyw07/kolibri,mrpau/kolibri,learningequality/kolibri,mrpau/kolibri,learningequality/kolibri,jonboiser/kolibri,DXCanas/kolibri,indirectlylit/kolibri,benjaoming/kolibri,lyw07/kolibri,christianmemije/kolibri,lyw07/kolibri,DXCanas/kolibri,DXCanas/kolibri,lyw07/kolibri,mrpau/kolibri,benjaoming/kolibri,christianmemije/kolibri,christianmemije/kolibri,benjaoming/kolibri,learningequality/kolibri,indirectlylit/kolibri,mrpau/kolibri,learningequality/kolibri,jonboiser/kolibri,christianmemije/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,jonboiser/kolibri,benjaoming/kolibri
|
Add migration for tidying up old values in progress fields
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model('logger', 'ContentSessionLog')
ContentSummaryLog = apps.get_model('logger', 'ContentSummaryLog')
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSessionLog.objects.filter(progress__gt=1).update(progress=1.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
class Migration(migrations.Migration):
dependencies = [
('logger', '0003_auto_20170531_1140'),
]
operations = [
migrations.RunPython(tidy_progress_range),
]
|
<commit_before><commit_msg>Add migration for tidying up old values in progress fields<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model('logger', 'ContentSessionLog')
ContentSummaryLog = apps.get_model('logger', 'ContentSummaryLog')
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSessionLog.objects.filter(progress__gt=1).update(progress=1.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
class Migration(migrations.Migration):
dependencies = [
('logger', '0003_auto_20170531_1140'),
]
operations = [
migrations.RunPython(tidy_progress_range),
]
|
Add migration for tidying up old values in progress fields# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model('logger', 'ContentSessionLog')
ContentSummaryLog = apps.get_model('logger', 'ContentSummaryLog')
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSessionLog.objects.filter(progress__gt=1).update(progress=1.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
class Migration(migrations.Migration):
dependencies = [
('logger', '0003_auto_20170531_1140'),
]
operations = [
migrations.RunPython(tidy_progress_range),
]
|
<commit_before><commit_msg>Add migration for tidying up old values in progress fields<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model('logger', 'ContentSessionLog')
ContentSummaryLog = apps.get_model('logger', 'ContentSummaryLog')
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSessionLog.objects.filter(progress__gt=1).update(progress=1.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
class Migration(migrations.Migration):
dependencies = [
('logger', '0003_auto_20170531_1140'),
]
operations = [
migrations.RunPython(tidy_progress_range),
]
|
|
4f663ff7a7ee05163e2cbd819412cfa76001dee1
|
stagecraft/apps/collectors/tasks.py
|
stagecraft/apps/collectors/tasks.py
|
from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def log(message):
logger.info(message)
|
Add simple task to test integration of celery
|
Add simple task to test integration of celery
This adds a very simple logging task that just dumps and argument out to
the log. This can be used to setup a periodic task that will show that
all of the workers (worker, beat and celerycam) are working in harmony.
|
Python
|
mit
|
alphagov/stagecraft,alphagov/stagecraft,alphagov/stagecraft,alphagov/stagecraft
|
Add simple task to test integration of celery
This adds a very simple logging task that just dumps and argument out to
the log. This can be used to setup a periodic task that will show that
all of the workers (worker, beat and celerycam) are working in harmony.
|
from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def log(message):
logger.info(message)
|
<commit_before><commit_msg>Add simple task to test integration of celery
This adds a very simple logging task that just dumps and argument out to
the log. This can be used to setup a periodic task that will show that
all of the workers (worker, beat and celerycam) are working in harmony.<commit_after>
|
from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def log(message):
logger.info(message)
|
Add simple task to test integration of celery
This adds a very simple logging task that just dumps and argument out to
the log. This can be used to setup a periodic task that will show that
all of the workers (worker, beat and celerycam) are working in harmony.from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def log(message):
logger.info(message)
|
<commit_before><commit_msg>Add simple task to test integration of celery
This adds a very simple logging task that just dumps and argument out to
the log. This can be used to setup a periodic task that will show that
all of the workers (worker, beat and celerycam) are working in harmony.<commit_after>from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def log(message):
logger.info(message)
|
|
ab017e9bbf0ffd19d9d8e5a933989e9111f00e77
|
CodeFights/countBlackCells.py
|
CodeFights/countBlackCells.py
|
#!/usr/local/bin/python
# Code Fights Count Black Cells Problem
from fractions import gcd
def countBlackCells(m, n):
# Manhattan distance plus lattice points less 2 end points
return (m + n) + gcd(n, m) - 2
def main():
tests = [
[3, 4, 6],
[3, 3, 7],
[2, 5, 6],
[1, 1, 1],
[1, 2, 2],
[1, 3, 3],
[1, 239, 239],
[33, 44, 86],
[16, 8, 30],
[66666, 88888, 177774]
]
for t in tests:
res = countBlackCells(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: countBlackCells({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: countBlackCells({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights count black cells problem
|
Solve Code Fights count black cells problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights count black cells problem
|
#!/usr/local/bin/python
# Code Fights Count Black Cells Problem
from fractions import gcd
def countBlackCells(m, n):
# Manhattan distance plus lattice points less 2 end points
return (m + n) + gcd(n, m) - 2
def main():
tests = [
[3, 4, 6],
[3, 3, 7],
[2, 5, 6],
[1, 1, 1],
[1, 2, 2],
[1, 3, 3],
[1, 239, 239],
[33, 44, 86],
[16, 8, 30],
[66666, 88888, 177774]
]
for t in tests:
res = countBlackCells(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: countBlackCells({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: countBlackCells({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights count black cells problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Count Black Cells Problem
from fractions import gcd
def countBlackCells(m, n):
# Manhattan distance plus lattice points less 2 end points
return (m + n) + gcd(n, m) - 2
def main():
tests = [
[3, 4, 6],
[3, 3, 7],
[2, 5, 6],
[1, 1, 1],
[1, 2, 2],
[1, 3, 3],
[1, 239, 239],
[33, 44, 86],
[16, 8, 30],
[66666, 88888, 177774]
]
for t in tests:
res = countBlackCells(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: countBlackCells({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: countBlackCells({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights count black cells problem#!/usr/local/bin/python
# Code Fights Count Black Cells Problem
from fractions import gcd
def countBlackCells(m, n):
# Manhattan distance plus lattice points less 2 end points
return (m + n) + gcd(n, m) - 2
def main():
tests = [
[3, 4, 6],
[3, 3, 7],
[2, 5, 6],
[1, 1, 1],
[1, 2, 2],
[1, 3, 3],
[1, 239, 239],
[33, 44, 86],
[16, 8, 30],
[66666, 88888, 177774]
]
for t in tests:
res = countBlackCells(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: countBlackCells({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: countBlackCells({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights count black cells problem<commit_after>#!/usr/local/bin/python
# Code Fights Count Black Cells Problem
from fractions import gcd
def countBlackCells(m, n):
# Manhattan distance plus lattice points less 2 end points
return (m + n) + gcd(n, m) - 2
def main():
tests = [
[3, 4, 6],
[3, 3, 7],
[2, 5, 6],
[1, 1, 1],
[1, 2, 2],
[1, 3, 3],
[1, 239, 239],
[33, 44, 86],
[16, 8, 30],
[66666, 88888, 177774]
]
for t in tests:
res = countBlackCells(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: countBlackCells({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: countBlackCells({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
b1c02240b8c2b291d43c70100c517a6038a91de4
|
tests/dags/test_external_task_sensor_fn_multiple_execution_dates_dags.py
|
tests/dags/test_external_task_sensor_fn_multiple_execution_dates_dags.py
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sensors import ExternalTaskSensor
from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE
from datetime import datetime, timedelta
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
# That DAG is use to test the behavior of the ExternalTaskSensor
# when depending on several runs of an external task.
# test_without_failure should not fail, leading to
# test_external_task_sensor_multiple_dates_with_failure
# to succeed, whereas test_with_failure should fail once
# per minute (the DAG runs every second) leading to
# test_external_task_sensor_multiple_dates_with_failure
# to fail (because of timeout).
dag_external_id = TEST_DAG_ID + '_secondly_external'
dag_secondly_external = DAG(dag_external_id,
default_args=args,
schedule_interval=timedelta(seconds=1))
dag_secondly_external.add_task(BashOperator(
task_id="test_with_failure",
bash_command=bash_command_code,
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_secondly_external.add_task(DummyOperator(
task_id="test_without_failure",
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_id = TEST_DAG_ID + '_minutely'
dag_minutely = DAG(dag_id,
default_args=args,
schedule_interval=timedelta(minutes=1))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_without_failure',
external_dag_id=dag_external_id,
external_task_id='test_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_with_failure',
external_dag_id=dag_external_id,
external_task_id='test_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
depends_on_past=False,
timeout=1,
poke_interval=1,
start_date=DEFAULT_DATE))
|
Add ability for ExternalTaskSensor to wait on multiple runs of a task
|
Add ability for ExternalTaskSensor to wait on multiple runs of a task
Currently using the execution_date_fn parameter of the ExternalTaskSensor
sensors only allows to wait for the completion of one given run of the
task the ExternalTaskSensor is sensing.
However, this prevents users to have setups where dags don't have the same
schedule frequency but still depend on one another. For example, let's say
you have a dag scheduled hourly that transforms log data and is owned by
the team in charge of logging. In the current setup you cannot have other
higher level teams, that want to use this transformed data, create
dags processing transformed log data in daily batches, while making sure
the logged transformed data was properly created.
Note that simply waiting for the data to be present (using e.g. the
HivePartitionSensor if the data is in hive) might not be satisfactory
because the data being present doesn't mean it is ready to be used.
This commit adds the ability to do exactly that by being able to have
an ExternalTaskSensor wait for multiple runs of the task it is sensing to
have finished. Now higher level teams can setup dags with an
ExternalTaskSensor sensing the end task of the dag that transforms the
log data and waiting for the successful completion of 24 of its hourly runs.
|
Python
|
apache-2.0
|
yati-sagade/incubator-airflow,yati-sagade/incubator-airflow,yati-sagade/incubator-airflow,yati-sagade/incubator-airflow
|
Add ability for ExternalTaskSensor to wait on multiple runs of a task
Currently using the execution_date_fn parameter of the ExternalTaskSensor
sensors only allows to wait for the completion of one given run of the
task the ExternalTaskSensor is sensing.
However, this prevents users to have setups where dags don't have the same
schedule frequency but still depend on one another. For example, let's say
you have a dag scheduled hourly that transforms log data and is owned by
the team in charge of logging. In the current setup you cannot have other
higher level teams, that want to use this transformed data, create
dags processing transformed log data in daily batches, while making sure
the logged transformed data was properly created.
Note that simply waiting for the data to be present (using e.g. the
HivePartitionSensor if the data is in hive) might not be satisfactory
because the data being present doesn't mean it is ready to be used.
This commit adds the ability to do exactly that by being able to have
an ExternalTaskSensor wait for multiple runs of the task it is sensing to
have finished. Now higher level teams can setup dags with an
ExternalTaskSensor sensing the end task of the dag that transforms the
log data and waiting for the successful completion of 24 of its hourly runs.
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sensors import ExternalTaskSensor
from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE
from datetime import datetime, timedelta
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
# That DAG is use to test the behavior of the ExternalTaskSensor
# when depending on several runs of an external task.
# test_without_failure should not fail, leading to
# test_external_task_sensor_multiple_dates_with_failure
# to succeed, whereas test_with_failure should fail once
# per minute (the DAG runs every second) leading to
# test_external_task_sensor_multiple_dates_with_failure
# to fail (because of timeout).
dag_external_id = TEST_DAG_ID + '_secondly_external'
dag_secondly_external = DAG(dag_external_id,
default_args=args,
schedule_interval=timedelta(seconds=1))
dag_secondly_external.add_task(BashOperator(
task_id="test_with_failure",
bash_command=bash_command_code,
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_secondly_external.add_task(DummyOperator(
task_id="test_without_failure",
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_id = TEST_DAG_ID + '_minutely'
dag_minutely = DAG(dag_id,
default_args=args,
schedule_interval=timedelta(minutes=1))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_without_failure',
external_dag_id=dag_external_id,
external_task_id='test_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_with_failure',
external_dag_id=dag_external_id,
external_task_id='test_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
depends_on_past=False,
timeout=1,
poke_interval=1,
start_date=DEFAULT_DATE))
|
<commit_before><commit_msg>Add ability for ExternalTaskSensor to wait on multiple runs of a task
Currently using the execution_date_fn parameter of the ExternalTaskSensor
sensors only allows to wait for the completion of one given run of the
task the ExternalTaskSensor is sensing.
However, this prevents users to have setups where dags don't have the same
schedule frequency but still depend on one another. For example, let's say
you have a dag scheduled hourly that transforms log data and is owned by
the team in charge of logging. In the current setup you cannot have other
higher level teams, that want to use this transformed data, create
dags processing transformed log data in daily batches, while making sure
the logged transformed data was properly created.
Note that simply waiting for the data to be present (using e.g. the
HivePartitionSensor if the data is in hive) might not be satisfactory
because the data being present doesn't mean it is ready to be used.
This commit adds the ability to do exactly that by being able to have
an ExternalTaskSensor wait for multiple runs of the task it is sensing to
have finished. Now higher level teams can setup dags with an
ExternalTaskSensor sensing the end task of the dag that transforms the
log data and waiting for the successful completion of 24 of its hourly runs.<commit_after>
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sensors import ExternalTaskSensor
from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE
from datetime import datetime, timedelta
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
# That DAG is use to test the behavior of the ExternalTaskSensor
# when depending on several runs of an external task.
# test_without_failure should not fail, leading to
# test_external_task_sensor_multiple_dates_with_failure
# to succeed, whereas test_with_failure should fail once
# per minute (the DAG runs every second) leading to
# test_external_task_sensor_multiple_dates_with_failure
# to fail (because of timeout).
dag_external_id = TEST_DAG_ID + '_secondly_external'
dag_secondly_external = DAG(dag_external_id,
default_args=args,
schedule_interval=timedelta(seconds=1))
dag_secondly_external.add_task(BashOperator(
task_id="test_with_failure",
bash_command=bash_command_code,
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_secondly_external.add_task(DummyOperator(
task_id="test_without_failure",
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_id = TEST_DAG_ID + '_minutely'
dag_minutely = DAG(dag_id,
default_args=args,
schedule_interval=timedelta(minutes=1))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_without_failure',
external_dag_id=dag_external_id,
external_task_id='test_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_with_failure',
external_dag_id=dag_external_id,
external_task_id='test_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
depends_on_past=False,
timeout=1,
poke_interval=1,
start_date=DEFAULT_DATE))
|
Add ability for ExternalTaskSensor to wait on multiple runs of a task
Currently using the execution_date_fn parameter of the ExternalTaskSensor
sensors only allows to wait for the completion of one given run of the
task the ExternalTaskSensor is sensing.
However, this prevents users to have setups where dags don't have the same
schedule frequency but still depend on one another. For example, let's say
you have a dag scheduled hourly that transforms log data and is owned by
the team in charge of logging. In the current setup you cannot have other
higher level teams, that want to use this transformed data, create
dags processing transformed log data in daily batches, while making sure
the logged transformed data was properly created.
Note that simply waiting for the data to be present (using e.g. the
HivePartitionSensor if the data is in hive) might not be satisfactory
because the data being present doesn't mean it is ready to be used.
This commit adds the ability to do exactly that by being able to have
an ExternalTaskSensor wait for multiple runs of the task it is sensing to
have finished. Now higher level teams can setup dags with an
ExternalTaskSensor sensing the end task of the dag that transforms the
log data and waiting for the successful completion of 24 of its hourly runs.from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sensors import ExternalTaskSensor
from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE
from datetime import datetime, timedelta
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
# That DAG is use to test the behavior of the ExternalTaskSensor
# when depending on several runs of an external task.
# test_without_failure should not fail, leading to
# test_external_task_sensor_multiple_dates_with_failure
# to succeed, whereas test_with_failure should fail once
# per minute (the DAG runs every second) leading to
# test_external_task_sensor_multiple_dates_with_failure
# to fail (because of timeout).
dag_external_id = TEST_DAG_ID + '_secondly_external'
dag_secondly_external = DAG(dag_external_id,
default_args=args,
schedule_interval=timedelta(seconds=1))
dag_secondly_external.add_task(BashOperator(
task_id="test_with_failure",
bash_command=bash_command_code,
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_secondly_external.add_task(DummyOperator(
task_id="test_without_failure",
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_id = TEST_DAG_ID + '_minutely'
dag_minutely = DAG(dag_id,
default_args=args,
schedule_interval=timedelta(minutes=1))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_without_failure',
external_dag_id=dag_external_id,
external_task_id='test_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_with_failure',
external_dag_id=dag_external_id,
external_task_id='test_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
depends_on_past=False,
timeout=1,
poke_interval=1,
start_date=DEFAULT_DATE))
|
<commit_before><commit_msg>Add ability for ExternalTaskSensor to wait on multiple runs of a task
Currently using the execution_date_fn parameter of the ExternalTaskSensor
sensors only allows to wait for the completion of one given run of the
task the ExternalTaskSensor is sensing.
However, this prevents users to have setups where dags don't have the same
schedule frequency but still depend on one another. For example, let's say
you have a dag scheduled hourly that transforms log data and is owned by
the team in charge of logging. In the current setup you cannot have other
higher level teams, that want to use this transformed data, create
dags processing transformed log data in daily batches, while making sure
the logged transformed data was properly created.
Note that simply waiting for the data to be present (using e.g. the
HivePartitionSensor if the data is in hive) might not be satisfactory
because the data being present doesn't mean it is ready to be used.
This commit adds the ability to do exactly that by being able to have
an ExternalTaskSensor wait for multiple runs of the task it is sensing to
have finished. Now higher level teams can setup dags with an
ExternalTaskSensor sensing the end task of the dag that transforms the
log data and waiting for the successful completion of 24 of its hourly runs.<commit_after>from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sensors import ExternalTaskSensor
from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE
from datetime import datetime, timedelta
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
# That DAG is use to test the behavior of the ExternalTaskSensor
# when depending on several runs of an external task.
# test_without_failure should not fail, leading to
# test_external_task_sensor_multiple_dates_with_failure
# to succeed, whereas test_with_failure should fail once
# per minute (the DAG runs every second) leading to
# test_external_task_sensor_multiple_dates_with_failure
# to fail (because of timeout).
dag_external_id = TEST_DAG_ID + '_secondly_external'
dag_secondly_external = DAG(dag_external_id,
default_args=args,
schedule_interval=timedelta(seconds=1))
dag_secondly_external.add_task(BashOperator(
task_id="test_with_failure",
bash_command=bash_command_code,
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_secondly_external.add_task(DummyOperator(
task_id="test_without_failure",
retries=0,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_id = TEST_DAG_ID + '_minutely'
dag_minutely = DAG(dag_id,
default_args=args,
schedule_interval=timedelta(minutes=1))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_without_failure',
external_dag_id=dag_external_id,
external_task_id='test_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
depends_on_past=False,
start_date=DEFAULT_DATE))
dag_minutely.add_task(ExternalTaskSensor(
task_id='test_external_task_sensor_multiple_dates_with_failure',
external_dag_id=dag_external_id,
external_task_id='test_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=['success'],
retries=0,
depends_on_past=False,
timeout=1,
poke_interval=1,
start_date=DEFAULT_DATE))
|
|
094deed52071e74225b8196079a34bda07230d39
|
accelerator/migrations/0063_update_gender_criteria_to_full_gender_spec.py
|
accelerator/migrations/0063_update_gender_criteria_to_full_gender_spec.py
|
# Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="m").update(option="male")
CriterionOptionSpec.objects.filter(option="fem").update(option="female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="male").update(option="m")
CriterionOptionSpec.objects.filter(option="female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0062_update_polymorphic_ctype'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse=reverse_update)
]
|
Add migration to use full gender spec
|
[AC-8797] Add migration to use full gender spec
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-8797] Add migration to use full gender spec
|
# Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="m").update(option="male")
CriterionOptionSpec.objects.filter(option="fem").update(option="female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="male").update(option="m")
CriterionOptionSpec.objects.filter(option="female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0062_update_polymorphic_ctype'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse=reverse_update)
]
|
<commit_before><commit_msg>[AC-8797] Add migration to use full gender spec<commit_after>
|
# Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="m").update(option="male")
CriterionOptionSpec.objects.filter(option="fem").update(option="female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="male").update(option="m")
CriterionOptionSpec.objects.filter(option="female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0062_update_polymorphic_ctype'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse=reverse_update)
]
|
[AC-8797] Add migration to use full gender spec# Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="m").update(option="male")
CriterionOptionSpec.objects.filter(option="fem").update(option="female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="male").update(option="m")
CriterionOptionSpec.objects.filter(option="female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0062_update_polymorphic_ctype'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse=reverse_update)
]
|
<commit_before><commit_msg>[AC-8797] Add migration to use full gender spec<commit_after># Generated by Django 2.2.24 on 2021-07-01 20:17
from django.db import migrations
def update_criterion_specs(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="m").update(option="male")
CriterionOptionSpec.objects.filter(option="fem").update(option="female")
def reverse_update(apps, schema_editor):
CriterionOptionSpec = apps.get_model("CriterionOptionSpec",
"accelerator")
CriterionOptionSpec.objects.filter(option="male").update(option="m")
CriterionOptionSpec.objects.filter(option="female").update(option="f")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0062_update_polymorphic_ctype'),
]
operations = [
migrations.RunPython(update_criterion_specs,
reverse=reverse_update)
]
|
|
1e41ce969b3162797ed8a47a4e52d448550481ae
|
src/functions/exercise6.py
|
src/functions/exercise6.py
|
# Write a function that print something n times
# including relatives spaces
def pprint(wyp, ntimes):
wyp = str(wyp) + " "
print wyp * ntimes
def main():
pprint(555, 24)
exit(0)
main()
|
Write a function that print something n times including relatives spaces
|
Write a function that print something n times including relatives spaces
|
Python
|
mit
|
let42/python-course
|
Write a function that print something n times including relatives spaces
|
# Write a function that print something n times
# including relatives spaces
def pprint(wyp, ntimes):
wyp = str(wyp) + " "
print wyp * ntimes
def main():
pprint(555, 24)
exit(0)
main()
|
<commit_before><commit_msg>Write a function that print something n times including relatives spaces<commit_after>
|
# Write a function that print something n times
# including relatives spaces
def pprint(wyp, ntimes):
wyp = str(wyp) + " "
print wyp * ntimes
def main():
pprint(555, 24)
exit(0)
main()
|
Write a function that print something n times including relatives spaces# Write a function that print something n times
# including relatives spaces
def pprint(wyp, ntimes):
wyp = str(wyp) + " "
print wyp * ntimes
def main():
pprint(555, 24)
exit(0)
main()
|
<commit_before><commit_msg>Write a function that print something n times including relatives spaces<commit_after># Write a function that print something n times
# including relatives spaces
def pprint(wyp, ntimes):
wyp = str(wyp) + " "
print wyp * ntimes
def main():
pprint(555, 24)
exit(0)
main()
|
|
906072a29277bc1727271ac121886e5158085d03
|
tests/functional/test_new_resolver_errors.py
|
tests/functional/test_new_resolver_errors.py
|
from tests.lib import create_basic_wheel_for_package
def test_new_resolver_conflict_requirements_file(tmpdir, script):
create_basic_wheel_for_package(script, "base", "1.0")
create_basic_wheel_for_package(script, "base", "2.0")
create_basic_wheel_for_package(
script, "pkga", "1.0", depends=["base==1.0"],
)
create_basic_wheel_for_package(
script, "pkgb", "1.0", depends=["base==2.0"],
)
req_file = tmpdir.joinpath("requirements.txt")
req_file.write_text("pkga\npkgb")
result = script.pip(
"install", "--use-feature=2020-resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"-r", req_file,
expect_error=True,
)
message = "package versions have conflicting dependencies"
assert message in result.stderr, str(result)
|
Test for conflict message from requirements.txt
|
Test for conflict message from requirements.txt
|
Python
|
mit
|
pfmoore/pip,sbidoul/pip,pradyunsg/pip,pfmoore/pip,sbidoul/pip,pypa/pip,pypa/pip,pradyunsg/pip
|
Test for conflict message from requirements.txt
|
from tests.lib import create_basic_wheel_for_package
def test_new_resolver_conflict_requirements_file(tmpdir, script):
create_basic_wheel_for_package(script, "base", "1.0")
create_basic_wheel_for_package(script, "base", "2.0")
create_basic_wheel_for_package(
script, "pkga", "1.0", depends=["base==1.0"],
)
create_basic_wheel_for_package(
script, "pkgb", "1.0", depends=["base==2.0"],
)
req_file = tmpdir.joinpath("requirements.txt")
req_file.write_text("pkga\npkgb")
result = script.pip(
"install", "--use-feature=2020-resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"-r", req_file,
expect_error=True,
)
message = "package versions have conflicting dependencies"
assert message in result.stderr, str(result)
|
<commit_before><commit_msg>Test for conflict message from requirements.txt<commit_after>
|
from tests.lib import create_basic_wheel_for_package
def test_new_resolver_conflict_requirements_file(tmpdir, script):
create_basic_wheel_for_package(script, "base", "1.0")
create_basic_wheel_for_package(script, "base", "2.0")
create_basic_wheel_for_package(
script, "pkga", "1.0", depends=["base==1.0"],
)
create_basic_wheel_for_package(
script, "pkgb", "1.0", depends=["base==2.0"],
)
req_file = tmpdir.joinpath("requirements.txt")
req_file.write_text("pkga\npkgb")
result = script.pip(
"install", "--use-feature=2020-resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"-r", req_file,
expect_error=True,
)
message = "package versions have conflicting dependencies"
assert message in result.stderr, str(result)
|
Test for conflict message from requirements.txtfrom tests.lib import create_basic_wheel_for_package
def test_new_resolver_conflict_requirements_file(tmpdir, script):
create_basic_wheel_for_package(script, "base", "1.0")
create_basic_wheel_for_package(script, "base", "2.0")
create_basic_wheel_for_package(
script, "pkga", "1.0", depends=["base==1.0"],
)
create_basic_wheel_for_package(
script, "pkgb", "1.0", depends=["base==2.0"],
)
req_file = tmpdir.joinpath("requirements.txt")
req_file.write_text("pkga\npkgb")
result = script.pip(
"install", "--use-feature=2020-resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"-r", req_file,
expect_error=True,
)
message = "package versions have conflicting dependencies"
assert message in result.stderr, str(result)
|
<commit_before><commit_msg>Test for conflict message from requirements.txt<commit_after>from tests.lib import create_basic_wheel_for_package
def test_new_resolver_conflict_requirements_file(tmpdir, script):
create_basic_wheel_for_package(script, "base", "1.0")
create_basic_wheel_for_package(script, "base", "2.0")
create_basic_wheel_for_package(
script, "pkga", "1.0", depends=["base==1.0"],
)
create_basic_wheel_for_package(
script, "pkgb", "1.0", depends=["base==2.0"],
)
req_file = tmpdir.joinpath("requirements.txt")
req_file.write_text("pkga\npkgb")
result = script.pip(
"install", "--use-feature=2020-resolver",
"--no-cache-dir", "--no-index",
"--find-links", script.scratch_path,
"-r", req_file,
expect_error=True,
)
message = "package versions have conflicting dependencies"
assert message in result.stderr, str(result)
|
|
7b098f1ca44aa7b149987cd2f16280e39a7d2040
|
tests/sentry/lang/javascript/test_example.py
|
tests/sentry/lang/javascript/test_example.py
|
# coding: utf-8
from __future__ import absolute_import
import os
import json
import responses
from sentry.testutils import TestCase
from sentry.models import Event
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'example-project', name)
def load_fixture(name):
with open(get_fixture_path(name)) as f:
return f.read()
class ExampleTestCase(TestCase):
@responses.activate
def test_sourcemap_expansion(self):
responses.add(
responses.GET,
'http://example.com/test.js',
body=load_fixture('test.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.min.js',
body=load_fixture('test.min.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.map',
body=load_fixture('test.map'),
content_type='application/json'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': json.loads(load_fixture('minifiedError.json'))[::-1],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 4
assert frame_list[0].function == 'produceStack'
assert frame_list[0].lineno == 6
assert frame_list[0].filename == 'index.html'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[1].function == 'i'
assert frame_list[1].lineno == 20
assert frame_list[1].filename == 'test.js'
assert frame_list[2].function == 'invoke'
assert frame_list[2].lineno == 15
assert frame_list[2].filename == 'test.js'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[3].function == 'cb'
assert frame_list[3].lineno == 5
assert frame_list[3].filename == 'test.js'
|
Add complete test for javascript sourcemap resolving
|
Add complete test for javascript sourcemap resolving
|
Python
|
bsd-3-clause
|
gencer/sentry,gencer/sentry,looker/sentry,beeftornado/sentry,mvaled/sentry,ifduyue/sentry,mvaled/sentry,ifduyue/sentry,looker/sentry,gencer/sentry,ifduyue/sentry,looker/sentry,mvaled/sentry,gencer/sentry,mvaled/sentry,looker/sentry,ifduyue/sentry,beeftornado/sentry,beeftornado/sentry,gencer/sentry,mvaled/sentry,looker/sentry,mvaled/sentry,ifduyue/sentry
|
Add complete test for javascript sourcemap resolving
|
# coding: utf-8
from __future__ import absolute_import
import os
import json
import responses
from sentry.testutils import TestCase
from sentry.models import Event
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'example-project', name)
def load_fixture(name):
with open(get_fixture_path(name)) as f:
return f.read()
class ExampleTestCase(TestCase):
@responses.activate
def test_sourcemap_expansion(self):
responses.add(
responses.GET,
'http://example.com/test.js',
body=load_fixture('test.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.min.js',
body=load_fixture('test.min.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.map',
body=load_fixture('test.map'),
content_type='application/json'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': json.loads(load_fixture('minifiedError.json'))[::-1],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 4
assert frame_list[0].function == 'produceStack'
assert frame_list[0].lineno == 6
assert frame_list[0].filename == 'index.html'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[1].function == 'i'
assert frame_list[1].lineno == 20
assert frame_list[1].filename == 'test.js'
assert frame_list[2].function == 'invoke'
assert frame_list[2].lineno == 15
assert frame_list[2].filename == 'test.js'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[3].function == 'cb'
assert frame_list[3].lineno == 5
assert frame_list[3].filename == 'test.js'
|
<commit_before><commit_msg>Add complete test for javascript sourcemap resolving<commit_after>
|
# coding: utf-8
from __future__ import absolute_import
import os
import json
import responses
from sentry.testutils import TestCase
from sentry.models import Event
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'example-project', name)
def load_fixture(name):
with open(get_fixture_path(name)) as f:
return f.read()
class ExampleTestCase(TestCase):
@responses.activate
def test_sourcemap_expansion(self):
responses.add(
responses.GET,
'http://example.com/test.js',
body=load_fixture('test.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.min.js',
body=load_fixture('test.min.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.map',
body=load_fixture('test.map'),
content_type='application/json'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': json.loads(load_fixture('minifiedError.json'))[::-1],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 4
assert frame_list[0].function == 'produceStack'
assert frame_list[0].lineno == 6
assert frame_list[0].filename == 'index.html'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[1].function == 'i'
assert frame_list[1].lineno == 20
assert frame_list[1].filename == 'test.js'
assert frame_list[2].function == 'invoke'
assert frame_list[2].lineno == 15
assert frame_list[2].filename == 'test.js'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[3].function == 'cb'
assert frame_list[3].lineno == 5
assert frame_list[3].filename == 'test.js'
|
Add complete test for javascript sourcemap resolving# coding: utf-8
from __future__ import absolute_import
import os
import json
import responses
from sentry.testutils import TestCase
from sentry.models import Event
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'example-project', name)
def load_fixture(name):
with open(get_fixture_path(name)) as f:
return f.read()
class ExampleTestCase(TestCase):
@responses.activate
def test_sourcemap_expansion(self):
responses.add(
responses.GET,
'http://example.com/test.js',
body=load_fixture('test.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.min.js',
body=load_fixture('test.min.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.map',
body=load_fixture('test.map'),
content_type='application/json'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': json.loads(load_fixture('minifiedError.json'))[::-1],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 4
assert frame_list[0].function == 'produceStack'
assert frame_list[0].lineno == 6
assert frame_list[0].filename == 'index.html'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[1].function == 'i'
assert frame_list[1].lineno == 20
assert frame_list[1].filename == 'test.js'
assert frame_list[2].function == 'invoke'
assert frame_list[2].lineno == 15
assert frame_list[2].filename == 'test.js'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[3].function == 'cb'
assert frame_list[3].lineno == 5
assert frame_list[3].filename == 'test.js'
|
<commit_before><commit_msg>Add complete test for javascript sourcemap resolving<commit_after># coding: utf-8
from __future__ import absolute_import
import os
import json
import responses
from sentry.testutils import TestCase
from sentry.models import Event
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), 'example-project', name)
def load_fixture(name):
with open(get_fixture_path(name)) as f:
return f.read()
class ExampleTestCase(TestCase):
@responses.activate
def test_sourcemap_expansion(self):
responses.add(
responses.GET,
'http://example.com/test.js',
body=load_fixture('test.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.min.js',
body=load_fixture('test.min.js'),
content_type='application/javascript'
)
responses.add(
responses.GET,
'http://example.com/test.map',
body=load_fixture('test.map'),
content_type='application/json'
)
responses.add(responses.GET, 'http://example.com/index.html', body='Not Found', status=404)
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': json.loads(load_fixture('minifiedError.json'))[::-1],
},
}
],
}
}
resp = self._postWithHeader(data)
assert resp.status_code == 200
event = Event.objects.get()
exception = event.interfaces['sentry.interfaces.Exception']
frame_list = exception.values[0].stacktrace.frames
assert len(frame_list) == 4
assert frame_list[0].function == 'produceStack'
assert frame_list[0].lineno == 6
assert frame_list[0].filename == 'index.html'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[1].function == 'i'
assert frame_list[1].lineno == 20
assert frame_list[1].filename == 'test.js'
assert frame_list[2].function == 'invoke'
assert frame_list[2].lineno == 15
assert frame_list[2].filename == 'test.js'
# This function name is obviously wrong but the current logic we
# have does not permit better data here
assert frame_list[3].function == 'cb'
assert frame_list[3].lineno == 5
assert frame_list[3].filename == 'test.js'
|
|
88245c20019ca92caeeb16c05583e9d60b4d7b7f
|
assignment2/Replicator.py
|
assignment2/Replicator.py
|
# from slave1 import Slave1
# from slave2 import Slave2
import rocksdb
import encodings
# class Replica:
# def __init__(self):
# print("Decorator")
# def decorator(self, rep):
# def wrapper():
# key, value = rep()
# #if((slave1.Slave1.get(key))!=value):
# slave1.Slave1.put(self, key, value)
# #if(slave2.Slave2.get(key)!=value):
# slave1.Slave1.put(self, key, value)
# print("Something is happening after some_function() is called.")
# return wrapper
# class Replica:
# def my_decorator(argument):
def put_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
value = (args[1])
print("Put data in Slave1 DB")
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
slave1_db.put(key.encode(), value.encode())
print("Put data in Slave2 DB")
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
slave2_db.put(key.encode(), value.encode())
# slv1 = Slave1()
# slv2 = Slave2()
# if((slv1.get(key))!=value):
# slv1.put(key, value)
# if(slv2.get(key)!=value):
# slv2.put(key, value)
# slv1.put(args[0], args[1])
return function(*args, **kwargs)
return wrapper
def get_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave1_db.get(key.encode())).decode())
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave2_db.get(key.encode())).decode())
return function(*args, **kwargs)
return wrapper
|
Add File with decorator functions
|
Add File with decorator functions
|
Python
|
mit
|
rimpybharot/CMPE273
|
Add File with decorator functions
|
# from slave1 import Slave1
# from slave2 import Slave2
import rocksdb
import encodings
# class Replica:
# def __init__(self):
# print("Decorator")
# def decorator(self, rep):
# def wrapper():
# key, value = rep()
# #if((slave1.Slave1.get(key))!=value):
# slave1.Slave1.put(self, key, value)
# #if(slave2.Slave2.get(key)!=value):
# slave1.Slave1.put(self, key, value)
# print("Something is happening after some_function() is called.")
# return wrapper
# class Replica:
# def my_decorator(argument):
def put_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
value = (args[1])
print("Put data in Slave1 DB")
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
slave1_db.put(key.encode(), value.encode())
print("Put data in Slave2 DB")
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
slave2_db.put(key.encode(), value.encode())
# slv1 = Slave1()
# slv2 = Slave2()
# if((slv1.get(key))!=value):
# slv1.put(key, value)
# if(slv2.get(key)!=value):
# slv2.put(key, value)
# slv1.put(args[0], args[1])
return function(*args, **kwargs)
return wrapper
def get_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave1_db.get(key.encode())).decode())
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave2_db.get(key.encode())).decode())
return function(*args, **kwargs)
return wrapper
|
<commit_before><commit_msg>Add File with decorator functions<commit_after>
|
# from slave1 import Slave1
# from slave2 import Slave2
import rocksdb
import encodings
# class Replica:
# def __init__(self):
# print("Decorator")
# def decorator(self, rep):
# def wrapper():
# key, value = rep()
# #if((slave1.Slave1.get(key))!=value):
# slave1.Slave1.put(self, key, value)
# #if(slave2.Slave2.get(key)!=value):
# slave1.Slave1.put(self, key, value)
# print("Something is happening after some_function() is called.")
# return wrapper
# class Replica:
# def my_decorator(argument):
def put_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
value = (args[1])
print("Put data in Slave1 DB")
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
slave1_db.put(key.encode(), value.encode())
print("Put data in Slave2 DB")
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
slave2_db.put(key.encode(), value.encode())
# slv1 = Slave1()
# slv2 = Slave2()
# if((slv1.get(key))!=value):
# slv1.put(key, value)
# if(slv2.get(key)!=value):
# slv2.put(key, value)
# slv1.put(args[0], args[1])
return function(*args, **kwargs)
return wrapper
def get_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave1_db.get(key.encode())).decode())
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave2_db.get(key.encode())).decode())
return function(*args, **kwargs)
return wrapper
|
Add File with decorator functions# from slave1 import Slave1
# from slave2 import Slave2
import rocksdb
import encodings
# class Replica:
# def __init__(self):
# print("Decorator")
# def decorator(self, rep):
# def wrapper():
# key, value = rep()
# #if((slave1.Slave1.get(key))!=value):
# slave1.Slave1.put(self, key, value)
# #if(slave2.Slave2.get(key)!=value):
# slave1.Slave1.put(self, key, value)
# print("Something is happening after some_function() is called.")
# return wrapper
# class Replica:
# def my_decorator(argument):
def put_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
value = (args[1])
print("Put data in Slave1 DB")
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
slave1_db.put(key.encode(), value.encode())
print("Put data in Slave2 DB")
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
slave2_db.put(key.encode(), value.encode())
# slv1 = Slave1()
# slv2 = Slave2()
# if((slv1.get(key))!=value):
# slv1.put(key, value)
# if(slv2.get(key)!=value):
# slv2.put(key, value)
# slv1.put(args[0], args[1])
return function(*args, **kwargs)
return wrapper
def get_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave1_db.get(key.encode())).decode())
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave2_db.get(key.encode())).decode())
return function(*args, **kwargs)
return wrapper
|
<commit_before><commit_msg>Add File with decorator functions<commit_after># from slave1 import Slave1
# from slave2 import Slave2
import rocksdb
import encodings
# class Replica:
# def __init__(self):
# print("Decorator")
# def decorator(self, rep):
# def wrapper():
# key, value = rep()
# #if((slave1.Slave1.get(key))!=value):
# slave1.Slave1.put(self, key, value)
# #if(slave2.Slave2.get(key)!=value):
# slave1.Slave1.put(self, key, value)
# print("Something is happening after some_function() is called.")
# return wrapper
# class Replica:
# def my_decorator(argument):
def put_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
value = (args[1])
print("Put data in Slave1 DB")
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
slave1_db.put(key.encode(), value.encode())
print("Put data in Slave2 DB")
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
slave2_db.put(key.encode(), value.encode())
# slv1 = Slave1()
# slv2 = Slave2()
# if((slv1.get(key))!=value):
# slv1.put(key, value)
# if(slv2.get(key)!=value):
# slv2.put(key, value)
# slv1.put(args[0], args[1])
return function(*args, **kwargs)
return wrapper
def get_decorator(function):
def wrapper(*args, **kwargs):
key = (args[0])
slave1_db = rocksdb.DB("slave_1.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave1_db.get(key.encode())).decode())
slave2_db = rocksdb.DB("slave_2.db", rocksdb.Options(create_if_missing=True))
print("Value of " + args[0] + " in Slave1 DB is " + (slave2_db.get(key.encode())).decode())
return function(*args, **kwargs)
return wrapper
|
|
348536786d6194f0f23475427f96a5f5b69c0743
|
heron/tools/cli/src/python/version.py
|
heron/tools/cli/src/python/version.py
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
Add back a deleted newline.
|
Add back a deleted newline.
|
Python
|
apache-2.0
|
tomncooper/heron,tomncooper/heron,lewiskan/heron,streamlio/heron,twitter/heron,ashvina/heron,mycFelix/heron,twitter/heron,huijunwu/heron,lucperkins/heron,tomncooper/heron,tomncooper/heron,lucperkins/heron,srkukarni/heron,objmagic/heron,lewiskan/heron,lewiskan/heron,ashvina/heron,lucperkins/heron,streamlio/heron,huijunwu/heron,ashvina/heron,lucperkins/heron,mycFelix/heron,tomncooper/heron,nlu90/heron,nlu90/heron,lewiskan/heron,twitter/heron,objmagic/heron,lucperkins/heron,streamlio/heron,huijunwu/heron,ashvina/heron,streamlio/heron,nlu90/heron,nlu90/heron,objmagic/heron,tomncooper/heron,mycFelix/heron,tomncooper/heron,huijunwu/heron,srkukarni/heron,nlu90/heron,nlu90/heron,streamlio/heron,nlu90/heron,mycFelix/heron,lewiskan/heron,tomncooper/heron,objmagic/heron,lewiskan/heron,streamlio/heron,nlu90/heron,mycFelix/heron,objmagic/heron,nlu90/heron,huijunwu/heron,ashvina/heron,lucperkins/heron,huijunwu/heron,srkukarni/heron,mycFelix/heron,lewiskan/heron,objmagic/heron,huijunwu/heron,lewiskan/heron,mycFelix/heron,srkukarni/heron,ashvina/heron,streamlio/heron,mycFelix/heron,objmagic/heron,twitter/heron,objmagic/heron,twitter/heron,ashvina/heron,tomncooper/heron,streamlio/heron,srkukarni/heron,twitter/heron,streamlio/heron,lucperkins/heron,twitter/heron,srkukarni/heron,lewiskan/heron,srkukarni/heron,srkukarni/heron,mycFelix/heron,ashvina/heron,ashvina/heron,twitter/heron,huijunwu/heron,lucperkins/heron,objmagic/heron
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
Add back a deleted newline.
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
<commit_before># Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
<commit_msg>Add back a deleted newline.<commit_after>
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
Add back a deleted newline.# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
<commit_before># Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
<commit_msg>Add back a deleted newline.<commit_after># Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' version.py '''
from heron.tools.cli.src.python.response import Response, Status
import heron.tools.cli.src.python.args as cli_args
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'version',
help='Print version of heron-cli',
usage="%(prog)s",
add_help=False)
cli_args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser
# pylint: disable=unused-argument
def run(command, parser, args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
config.print_build_info()
return Response(Status.Ok)
|
477db3854ce527cb410c5814ae8dfab1e6bef35b
|
ceph_deploy/tests/parser/test_mds.py
|
ceph_deploy/tests/parser/test_mds.py
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserMDS(object):
def setup(self):
self.parser = get_parser()
def test_mds_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mds' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_mds_create_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds create'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_mds_create_one_host(self):
args = self.parser.parse_args('mds create host1'.split())
assert args.mds[0][0] == 'host1'
def test_mds_create_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['mds', 'create'] + hostnames)
# args.mds is a list of tuples, and tuple[0] is the hostname
hosts = [x[0] for x in args.mds]
assert frozenset(hosts) == frozenset(hostnames)
|
Add argparse tests for mds
|
[RM-11742] Add argparse tests for mds
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
trhoden/ceph-deploy,Vicente-Cheng/ceph-deploy,shenhequnying/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,branto1/ceph-deploy,branto1/ceph-deploy,shenhequnying/ceph-deploy,isyippee/ceph-deploy,SUSE/ceph-deploy,SUSE/ceph-deploy,imzhulei/ceph-deploy,trhoden/ceph-deploy,zhouyuan/ceph-deploy,osynge/ceph-deploy,osynge/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,isyippee/ceph-deploy,codenrhoden/ceph-deploy,Vicente-Cheng/ceph-deploy,zhouyuan/ceph-deploy,ceph/ceph-deploy,ghxandsky/ceph-deploy,ghxandsky/ceph-deploy,codenrhoden/ceph-deploy,ceph/ceph-deploy
|
[RM-11742] Add argparse tests for mds
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserMDS(object):
def setup(self):
self.parser = get_parser()
def test_mds_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mds' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_mds_create_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds create'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_mds_create_one_host(self):
args = self.parser.parse_args('mds create host1'.split())
assert args.mds[0][0] == 'host1'
def test_mds_create_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['mds', 'create'] + hostnames)
# args.mds is a list of tuples, and tuple[0] is the hostname
hosts = [x[0] for x in args.mds]
assert frozenset(hosts) == frozenset(hostnames)
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for mds
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserMDS(object):
def setup(self):
self.parser = get_parser()
def test_mds_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mds' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_mds_create_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds create'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_mds_create_one_host(self):
args = self.parser.parse_args('mds create host1'.split())
assert args.mds[0][0] == 'host1'
def test_mds_create_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['mds', 'create'] + hostnames)
# args.mds is a list of tuples, and tuple[0] is the hostname
hosts = [x[0] for x in args.mds]
assert frozenset(hosts) == frozenset(hostnames)
|
[RM-11742] Add argparse tests for mds
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
from ceph_deploy.cli import get_parser
class TestParserMDS(object):
def setup(self):
self.parser = get_parser()
def test_mds_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mds' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_mds_create_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds create'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_mds_create_one_host(self):
args = self.parser.parse_args('mds create host1'.split())
assert args.mds[0][0] == 'host1'
def test_mds_create_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['mds', 'create'] + hostnames)
# args.mds is a list of tuples, and tuple[0] is the hostname
hosts = [x[0] for x in args.mds]
assert frozenset(hosts) == frozenset(hostnames)
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for mds
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
from ceph_deploy.cli import get_parser
class TestParserMDS(object):
def setup(self):
self.parser = get_parser()
def test_mds_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mds' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_mds_create_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mds create'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_mds_create_one_host(self):
args = self.parser.parse_args('mds create host1'.split())
assert args.mds[0][0] == 'host1'
def test_mds_create_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['mds', 'create'] + hostnames)
# args.mds is a list of tuples, and tuple[0] is the hostname
hosts = [x[0] for x in args.mds]
assert frozenset(hosts) == frozenset(hostnames)
|
|
b455d767cd8f9c57fad4f9bc4b322952512bf2c3
|
sensibility/_paths.py
|
sensibility/_paths.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paths for internal use.
"""
from pathlib import Path
# Get paths for here and repository root dir.
HERE = Path(__file__).parent
REPOSITORY_ROOT = HERE.parent
assert (REPOSITORY_ROOT / '.git').exists()
# Directories for storing data and models.
DATA_DIR = REPOSITORY_ROOT / 'data'
MODEL_DIR = REPOSITORY_ROOT / 'models'
# Paths to specific databases.
SOURCES_PATH = DATA_DIR / 'javascript-sources.sqlite3'
VECTORS_PATH = DATA_DIR / 'javascript-vectors.sqlite3'
MUTATIONS_PATH = DATA_DIR / 'javascript-mutations.sqlite3'
PREDICTIONS_PATH = DATA_DIR / 'javascript-predictions.sqlite3'
|
Put internal paths in one place.
|
Put internal paths in one place.
|
Python
|
apache-2.0
|
naturalness/sensibility,naturalness/sensibility,naturalness/sensibility,naturalness/sensibility
|
Put internal paths in one place.
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paths for internal use.
"""
from pathlib import Path
# Get paths for here and repository root dir.
HERE = Path(__file__).parent
REPOSITORY_ROOT = HERE.parent
assert (REPOSITORY_ROOT / '.git').exists()
# Directories for storing data and models.
DATA_DIR = REPOSITORY_ROOT / 'data'
MODEL_DIR = REPOSITORY_ROOT / 'models'
# Paths to specific databases.
SOURCES_PATH = DATA_DIR / 'javascript-sources.sqlite3'
VECTORS_PATH = DATA_DIR / 'javascript-vectors.sqlite3'
MUTATIONS_PATH = DATA_DIR / 'javascript-mutations.sqlite3'
PREDICTIONS_PATH = DATA_DIR / 'javascript-predictions.sqlite3'
|
<commit_before><commit_msg>Put internal paths in one place.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paths for internal use.
"""
from pathlib import Path
# Get paths for here and repository root dir.
HERE = Path(__file__).parent
REPOSITORY_ROOT = HERE.parent
assert (REPOSITORY_ROOT / '.git').exists()
# Directories for storing data and models.
DATA_DIR = REPOSITORY_ROOT / 'data'
MODEL_DIR = REPOSITORY_ROOT / 'models'
# Paths to specific databases.
SOURCES_PATH = DATA_DIR / 'javascript-sources.sqlite3'
VECTORS_PATH = DATA_DIR / 'javascript-vectors.sqlite3'
MUTATIONS_PATH = DATA_DIR / 'javascript-mutations.sqlite3'
PREDICTIONS_PATH = DATA_DIR / 'javascript-predictions.sqlite3'
|
Put internal paths in one place.#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paths for internal use.
"""
from pathlib import Path
# Get paths for here and repository root dir.
HERE = Path(__file__).parent
REPOSITORY_ROOT = HERE.parent
assert (REPOSITORY_ROOT / '.git').exists()
# Directories for storing data and models.
DATA_DIR = REPOSITORY_ROOT / 'data'
MODEL_DIR = REPOSITORY_ROOT / 'models'
# Paths to specific databases.
SOURCES_PATH = DATA_DIR / 'javascript-sources.sqlite3'
VECTORS_PATH = DATA_DIR / 'javascript-vectors.sqlite3'
MUTATIONS_PATH = DATA_DIR / 'javascript-mutations.sqlite3'
PREDICTIONS_PATH = DATA_DIR / 'javascript-predictions.sqlite3'
|
<commit_before><commit_msg>Put internal paths in one place.<commit_after>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Paths for internal use.
"""
from pathlib import Path
# Get paths for here and repository root dir.
HERE = Path(__file__).parent
REPOSITORY_ROOT = HERE.parent
assert (REPOSITORY_ROOT / '.git').exists()
# Directories for storing data and models.
DATA_DIR = REPOSITORY_ROOT / 'data'
MODEL_DIR = REPOSITORY_ROOT / 'models'
# Paths to specific databases.
SOURCES_PATH = DATA_DIR / 'javascript-sources.sqlite3'
VECTORS_PATH = DATA_DIR / 'javascript-vectors.sqlite3'
MUTATIONS_PATH = DATA_DIR / 'javascript-mutations.sqlite3'
PREDICTIONS_PATH = DATA_DIR / 'javascript-predictions.sqlite3'
|
|
8cf9dba3bafacca6fd3170f3de31d111b8e52e39
|
src/Mod/TemplatePyMod/SplineSurface.py
|
src/Mod/TemplatePyMod/SplineSurface.py
|
# http://de.wikipedia.org/wiki/Non-Uniform_Rational_B-Spline
# len(knot_u) := nNodes_u + degree_u + 1
# len(knot_v) := nNodes_v + degree_v + 1
degree_u=2
degree_v=2
nNodes_u=5
nNodes_v=5
#knot_u=[0,0,0,0.3333,0.6666,1,1,1]
#knot_v=[0,0,0,0.3333,0.6666,1,1,1]
knot_u=[0,0,0,0.2,0.7,1,1,1]
knot_v=[0,0,0,0.2,0.7,1,1,1]
#knot_u=[0,0,0.2,0.4,0.6,0.8,1,1]
#knot_v=[0,0,0.2,0.4,0.6,0.8,1,1]
coor=[[0,0,1],[1,0,2],[2,0,0],[3,0,1],[4,0,2],\
[0,1,2],[1,1,0],[2,1,0],[3,1,0],[4,1,0],\
[0,2,0],[1,2,0],[2,2,0],[3,2,0],[4,2,0],\
[0,3,1],[1,3,0],[2,3,0],[3,3,3],[4,3,0],\
[0,4,2],[1,4,0],[2,4,0],[3,4,0],[4,4,0]]
bs=Part.BSplineSurface()
bs.increaseDegree(degree_u,degree_v)
id=1
for i in range(0,len(knot_u)-1):
if knot_u[i+1] > knot_u[i]:
bs.insertUKnot(knot_u[i],id,0.0000001)
id=1
for i in range(0,len(knot_v)-1):
if knot_v[i+1] > knot_v[i]:
bs.insertVKnot(knot_v[i],id,0.0000001)
i=0
for jj in range(0,nNodes_v):
for ii in range(0,nNodes_u):
bs.setPole(ii+1,jj+1,FreeCAD.Vector((coor[i][0],coor[i][1],coor[i][2])),1);
i=i+1;
s=bs.toShape()
Part.show(s)
|
Add python example of how to use splines
|
Add python example of how to use splines
|
Python
|
lgpl-2.1
|
dsbrown/FreeCAD,maurerpe/FreeCAD,yantrabuddhi/FreeCAD,jriegel/FreeCAD,mickele77/FreeCAD,cpollard1001/FreeCAD_sf_master,cypsun/FreeCAD,thdtjsdn/FreeCAD,balazs-bamer/FreeCAD-Surface,timthelion/FreeCAD,wood-galaxy/FreeCAD,mickele77/FreeCAD,kkoksvik/FreeCAD,mickele77/FreeCAD,yantrabuddhi/FreeCAD,bblacey/FreeCAD-MacOS-CI,usakhelo/FreeCAD,usakhelo/FreeCAD,Alpistinho/FreeCAD,bblacey/FreeCAD-MacOS-CI,balazs-bamer/FreeCAD-Surface,jriegel/FreeCAD,bblacey/FreeCAD-MacOS-CI,Alpistinho/FreeCAD,wood-galaxy/FreeCAD,thdtjsdn/FreeCAD,marcoitur/Freecad_test,dsbrown/FreeCAD,YuanYouYuan/FreeCAD,maurerpe/FreeCAD,cpollard1001/FreeCAD_sf_master,elgambitero/FreeCAD_sf_master,marcoitur/Freecad_test,elgambitero/FreeCAD_sf_master,thdtjsdn/FreeCAD,timthelion/FreeCAD_sf_master,mickele77/FreeCAD,timthelion/FreeCAD,Creworker/FreeCAD,timthelion/FreeCAD_sf_master,jonnor/FreeCAD,mickele77/FreeCAD,Alpistinho/FreeCAD,dsbrown/FreeCAD,YuanYouYuan/FreeCAD,elgambitero/FreeCAD_sf_master,kkoksvik/FreeCAD,cpollard1001/FreeCAD_sf_master,dsbrown/FreeCAD,bblacey/FreeCAD-MacOS-CI,chrisjaquet/FreeCAD,cypsun/FreeCAD,kkoksvik/FreeCAD,cypsun/FreeCAD,jriegel/FreeCAD,wood-galaxy/FreeCAD,marcoitur/Freecad_test,yantrabuddhi/FreeCAD,usakhelo/FreeCAD,Creworker/FreeCAD,usakhelo/FreeCAD,usakhelo/FreeCAD,timthelion/FreeCAD_sf_master,timthelion/FreeCAD,timthelion/FreeCAD_sf_master,jriegel/FreeCAD,marcoitur/FreeCAD,marcoitur/FreeCAD,elgambitero/FreeCAD_sf_master,cpollard1001/FreeCAD_sf_master,Creworker/FreeCAD,Creworker/FreeCAD,timthelion/FreeCAD,Creworker/FreeCAD,yantrabuddhi/FreeCAD,balazs-bamer/FreeCAD-Surface,chrisjaquet/FreeCAD,chrisjaquet/FreeCAD,thdtjsdn/FreeCAD,wood-galaxy/FreeCAD,chrisjaquet/FreeCAD,balazs-bamer/FreeCAD-Surface,Fat-Zer/FreeCAD_sf_master,maurerpe/FreeCAD,jonnor/FreeCAD,kkoksvik/FreeCAD,Alpistinho/FreeCAD,cpollard1001/FreeCAD_sf_master,Alpistinho/FreeCAD,jonnor/FreeCAD,thdtjsdn/FreeCAD,chrisjaquet/FreeCAD,marcoitur/FreeCAD,bblacey/FreeCAD-MacOS-CI,Fat-Zer/FreeCAD_sf_master,jriegel/FreeCAD,usakhelo/FreeCAD,maurerpe/FreeCAD,marcoitur/FreeCAD,bblacey/FreeCAD-MacOS-CI,elgambitero/FreeCAD_sf_master,bblacey/FreeCAD-MacOS-CI,wood-galaxy/FreeCAD,Fat-Zer/FreeCAD_sf_master,jonnor/FreeCAD,chrisjaquet/FreeCAD,chrisjaquet/FreeCAD,wood-galaxy/FreeCAD,jonnor/FreeCAD,cypsun/FreeCAD,cypsun/FreeCAD,YuanYouYuan/FreeCAD,Fat-Zer/FreeCAD_sf_master,timthelion/FreeCAD_sf_master,marcoitur/FreeCAD,kkoksvik/FreeCAD,Fat-Zer/FreeCAD_sf_master,usakhelo/FreeCAD,yantrabuddhi/FreeCAD,marcoitur/Freecad_test,YuanYouYuan/FreeCAD,dsbrown/FreeCAD,YuanYouYuan/FreeCAD,marcoitur/Freecad_test,timthelion/FreeCAD,timthelion/FreeCAD,balazs-bamer/FreeCAD-Surface,maurerpe/FreeCAD
|
Add python example of how to use splines
|
# http://de.wikipedia.org/wiki/Non-Uniform_Rational_B-Spline
# len(knot_u) := nNodes_u + degree_u + 1
# len(knot_v) := nNodes_v + degree_v + 1
degree_u=2
degree_v=2
nNodes_u=5
nNodes_v=5
#knot_u=[0,0,0,0.3333,0.6666,1,1,1]
#knot_v=[0,0,0,0.3333,0.6666,1,1,1]
knot_u=[0,0,0,0.2,0.7,1,1,1]
knot_v=[0,0,0,0.2,0.7,1,1,1]
#knot_u=[0,0,0.2,0.4,0.6,0.8,1,1]
#knot_v=[0,0,0.2,0.4,0.6,0.8,1,1]
coor=[[0,0,1],[1,0,2],[2,0,0],[3,0,1],[4,0,2],\
[0,1,2],[1,1,0],[2,1,0],[3,1,0],[4,1,0],\
[0,2,0],[1,2,0],[2,2,0],[3,2,0],[4,2,0],\
[0,3,1],[1,3,0],[2,3,0],[3,3,3],[4,3,0],\
[0,4,2],[1,4,0],[2,4,0],[3,4,0],[4,4,0]]
bs=Part.BSplineSurface()
bs.increaseDegree(degree_u,degree_v)
id=1
for i in range(0,len(knot_u)-1):
if knot_u[i+1] > knot_u[i]:
bs.insertUKnot(knot_u[i],id,0.0000001)
id=1
for i in range(0,len(knot_v)-1):
if knot_v[i+1] > knot_v[i]:
bs.insertVKnot(knot_v[i],id,0.0000001)
i=0
for jj in range(0,nNodes_v):
for ii in range(0,nNodes_u):
bs.setPole(ii+1,jj+1,FreeCAD.Vector((coor[i][0],coor[i][1],coor[i][2])),1);
i=i+1;
s=bs.toShape()
Part.show(s)
|
<commit_before><commit_msg>Add python example of how to use splines<commit_after>
|
# http://de.wikipedia.org/wiki/Non-Uniform_Rational_B-Spline
# len(knot_u) := nNodes_u + degree_u + 1
# len(knot_v) := nNodes_v + degree_v + 1
degree_u=2
degree_v=2
nNodes_u=5
nNodes_v=5
#knot_u=[0,0,0,0.3333,0.6666,1,1,1]
#knot_v=[0,0,0,0.3333,0.6666,1,1,1]
knot_u=[0,0,0,0.2,0.7,1,1,1]
knot_v=[0,0,0,0.2,0.7,1,1,1]
#knot_u=[0,0,0.2,0.4,0.6,0.8,1,1]
#knot_v=[0,0,0.2,0.4,0.6,0.8,1,1]
coor=[[0,0,1],[1,0,2],[2,0,0],[3,0,1],[4,0,2],\
[0,1,2],[1,1,0],[2,1,0],[3,1,0],[4,1,0],\
[0,2,0],[1,2,0],[2,2,0],[3,2,0],[4,2,0],\
[0,3,1],[1,3,0],[2,3,0],[3,3,3],[4,3,0],\
[0,4,2],[1,4,0],[2,4,0],[3,4,0],[4,4,0]]
bs=Part.BSplineSurface()
bs.increaseDegree(degree_u,degree_v)
id=1
for i in range(0,len(knot_u)-1):
if knot_u[i+1] > knot_u[i]:
bs.insertUKnot(knot_u[i],id,0.0000001)
id=1
for i in range(0,len(knot_v)-1):
if knot_v[i+1] > knot_v[i]:
bs.insertVKnot(knot_v[i],id,0.0000001)
i=0
for jj in range(0,nNodes_v):
for ii in range(0,nNodes_u):
bs.setPole(ii+1,jj+1,FreeCAD.Vector((coor[i][0],coor[i][1],coor[i][2])),1);
i=i+1;
s=bs.toShape()
Part.show(s)
|
Add python example of how to use splines
# http://de.wikipedia.org/wiki/Non-Uniform_Rational_B-Spline
# len(knot_u) := nNodes_u + degree_u + 1
# len(knot_v) := nNodes_v + degree_v + 1
degree_u=2
degree_v=2
nNodes_u=5
nNodes_v=5
#knot_u=[0,0,0,0.3333,0.6666,1,1,1]
#knot_v=[0,0,0,0.3333,0.6666,1,1,1]
knot_u=[0,0,0,0.2,0.7,1,1,1]
knot_v=[0,0,0,0.2,0.7,1,1,1]
#knot_u=[0,0,0.2,0.4,0.6,0.8,1,1]
#knot_v=[0,0,0.2,0.4,0.6,0.8,1,1]
coor=[[0,0,1],[1,0,2],[2,0,0],[3,0,1],[4,0,2],\
[0,1,2],[1,1,0],[2,1,0],[3,1,0],[4,1,0],\
[0,2,0],[1,2,0],[2,2,0],[3,2,0],[4,2,0],\
[0,3,1],[1,3,0],[2,3,0],[3,3,3],[4,3,0],\
[0,4,2],[1,4,0],[2,4,0],[3,4,0],[4,4,0]]
bs=Part.BSplineSurface()
bs.increaseDegree(degree_u,degree_v)
id=1
for i in range(0,len(knot_u)-1):
if knot_u[i+1] > knot_u[i]:
bs.insertUKnot(knot_u[i],id,0.0000001)
id=1
for i in range(0,len(knot_v)-1):
if knot_v[i+1] > knot_v[i]:
bs.insertVKnot(knot_v[i],id,0.0000001)
i=0
for jj in range(0,nNodes_v):
for ii in range(0,nNodes_u):
bs.setPole(ii+1,jj+1,FreeCAD.Vector((coor[i][0],coor[i][1],coor[i][2])),1);
i=i+1;
s=bs.toShape()
Part.show(s)
|
<commit_before><commit_msg>Add python example of how to use splines<commit_after>
# http://de.wikipedia.org/wiki/Non-Uniform_Rational_B-Spline
# len(knot_u) := nNodes_u + degree_u + 1
# len(knot_v) := nNodes_v + degree_v + 1
degree_u=2
degree_v=2
nNodes_u=5
nNodes_v=5
#knot_u=[0,0,0,0.3333,0.6666,1,1,1]
#knot_v=[0,0,0,0.3333,0.6666,1,1,1]
knot_u=[0,0,0,0.2,0.7,1,1,1]
knot_v=[0,0,0,0.2,0.7,1,1,1]
#knot_u=[0,0,0.2,0.4,0.6,0.8,1,1]
#knot_v=[0,0,0.2,0.4,0.6,0.8,1,1]
coor=[[0,0,1],[1,0,2],[2,0,0],[3,0,1],[4,0,2],\
[0,1,2],[1,1,0],[2,1,0],[3,1,0],[4,1,0],\
[0,2,0],[1,2,0],[2,2,0],[3,2,0],[4,2,0],\
[0,3,1],[1,3,0],[2,3,0],[3,3,3],[4,3,0],\
[0,4,2],[1,4,0],[2,4,0],[3,4,0],[4,4,0]]
bs=Part.BSplineSurface()
bs.increaseDegree(degree_u,degree_v)
id=1
for i in range(0,len(knot_u)-1):
if knot_u[i+1] > knot_u[i]:
bs.insertUKnot(knot_u[i],id,0.0000001)
id=1
for i in range(0,len(knot_v)-1):
if knot_v[i+1] > knot_v[i]:
bs.insertVKnot(knot_v[i],id,0.0000001)
i=0
for jj in range(0,nNodes_v):
for ii in range(0,nNodes_u):
bs.setPole(ii+1,jj+1,FreeCAD.Vector((coor[i][0],coor[i][1],coor[i][2])),1);
i=i+1;
s=bs.toShape()
Part.show(s)
|
|
8cb59ab74717624f1f4acbacc496f9455d90a92c
|
ldb.py
|
ldb.py
|
#!/usr/bin/env python
from socket import socket, AF_UNIX, SOCK_STREAM
import os
s = socket(AF_UNIX, SOCK_STREAM)
s.bind('/tmp/socket_lua_debug')
s.listen(1)
lua = s.accept()[0]
print "Connected with Lua."
lua.recv(1024)
lua.close()
os.remove("/tmp/socket_lua_debug") # I don't know why it doesn't do it by itself.
|
Add a basic server to go along with.
|
Add a basic server to go along with.
|
Python
|
mit
|
laarmen/lua_debug,laarmen/lua_debug
|
Add a basic server to go along with.
|
#!/usr/bin/env python
from socket import socket, AF_UNIX, SOCK_STREAM
import os
s = socket(AF_UNIX, SOCK_STREAM)
s.bind('/tmp/socket_lua_debug')
s.listen(1)
lua = s.accept()[0]
print "Connected with Lua."
lua.recv(1024)
lua.close()
os.remove("/tmp/socket_lua_debug") # I don't know why it doesn't do it by itself.
|
<commit_before><commit_msg>Add a basic server to go along with.<commit_after>
|
#!/usr/bin/env python
from socket import socket, AF_UNIX, SOCK_STREAM
import os
s = socket(AF_UNIX, SOCK_STREAM)
s.bind('/tmp/socket_lua_debug')
s.listen(1)
lua = s.accept()[0]
print "Connected with Lua."
lua.recv(1024)
lua.close()
os.remove("/tmp/socket_lua_debug") # I don't know why it doesn't do it by itself.
|
Add a basic server to go along with.#!/usr/bin/env python
from socket import socket, AF_UNIX, SOCK_STREAM
import os
s = socket(AF_UNIX, SOCK_STREAM)
s.bind('/tmp/socket_lua_debug')
s.listen(1)
lua = s.accept()[0]
print "Connected with Lua."
lua.recv(1024)
lua.close()
os.remove("/tmp/socket_lua_debug") # I don't know why it doesn't do it by itself.
|
<commit_before><commit_msg>Add a basic server to go along with.<commit_after>#!/usr/bin/env python
from socket import socket, AF_UNIX, SOCK_STREAM
import os
s = socket(AF_UNIX, SOCK_STREAM)
s.bind('/tmp/socket_lua_debug')
s.listen(1)
lua = s.accept()[0]
print "Connected with Lua."
lua.recv(1024)
lua.close()
os.remove("/tmp/socket_lua_debug") # I don't know why it doesn't do it by itself.
|
|
b196d5e58c611508bbc0bf891752d6abf135b67d
|
generic/example/manage.py
|
generic/example/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
Make sure sys.path is setup correctly
|
Make sure sys.path is setup correctly
|
Python
|
apache-2.0
|
texastribune/tt_app_templates,texastribune/tt_app_templates
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
Make sure sys.path is setup correctly
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
<commit_before>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
<commit_msg>Make sure sys.path is setup correctly<commit_after>
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
Make sure sys.path is setup correctly#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
<commit_before>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
<commit_msg>Make sure sys.path is setup correctly<commit_after>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
e361aadf64ea10ec337c349e182608ab3a305dcf
|
share/management/commands/makeprovidermigrations.py
|
share/management/commands/makeprovidermigrations.py
|
import os
from django.apps import apps
from django.db.migrations.state import ProjectState
from django.core.management.base import BaseCommand
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.writer import MigrationWriter
from django.db.migrations.autodetector import MigrationAutodetector
from share.core import ProviderAppConfig
from share.core import ProviderMigration
class Command(BaseCommand):
can_import_settings = True
def write_migration(self, migration):
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [migration]}, graph=loader.graph,)
for m in changes['share']:
writer = MigrationWriter(m)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
def handle(self, *args, **options):
changes = {}
for config in apps.get_app_configs():
if isinstance(config, ProviderAppConfig):
changes[config.name] = [ProviderMigration(config).migration()]
for migrations in changes.values():
for m in migrations:
writer = MigrationWriter(m)
os.makedirs(os.path.dirname(writer.path), exist_ok=True)
with open(os.path.join(os.path.dirname(writer.path), '__init__.py'), 'wb') as fp:
fp.write(writer.as_string())
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
Add a command to create provider migrations
|
Add a command to create provider migrations
|
Python
|
apache-2.0
|
laurenbarker/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,aaxelb/SHARE,laurenbarker/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,zamattiac/SHARE,aaxelb/SHARE
|
Add a command to create provider migrations
|
import os
from django.apps import apps
from django.db.migrations.state import ProjectState
from django.core.management.base import BaseCommand
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.writer import MigrationWriter
from django.db.migrations.autodetector import MigrationAutodetector
from share.core import ProviderAppConfig
from share.core import ProviderMigration
class Command(BaseCommand):
can_import_settings = True
def write_migration(self, migration):
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [migration]}, graph=loader.graph,)
for m in changes['share']:
writer = MigrationWriter(m)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
def handle(self, *args, **options):
changes = {}
for config in apps.get_app_configs():
if isinstance(config, ProviderAppConfig):
changes[config.name] = [ProviderMigration(config).migration()]
for migrations in changes.values():
for m in migrations:
writer = MigrationWriter(m)
os.makedirs(os.path.dirname(writer.path), exist_ok=True)
with open(os.path.join(os.path.dirname(writer.path), '__init__.py'), 'wb') as fp:
fp.write(writer.as_string())
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
<commit_before><commit_msg>Add a command to create provider migrations<commit_after>
|
import os
from django.apps import apps
from django.db.migrations.state import ProjectState
from django.core.management.base import BaseCommand
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.writer import MigrationWriter
from django.db.migrations.autodetector import MigrationAutodetector
from share.core import ProviderAppConfig
from share.core import ProviderMigration
class Command(BaseCommand):
can_import_settings = True
def write_migration(self, migration):
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [migration]}, graph=loader.graph,)
for m in changes['share']:
writer = MigrationWriter(m)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
def handle(self, *args, **options):
changes = {}
for config in apps.get_app_configs():
if isinstance(config, ProviderAppConfig):
changes[config.name] = [ProviderMigration(config).migration()]
for migrations in changes.values():
for m in migrations:
writer = MigrationWriter(m)
os.makedirs(os.path.dirname(writer.path), exist_ok=True)
with open(os.path.join(os.path.dirname(writer.path), '__init__.py'), 'wb') as fp:
fp.write(writer.as_string())
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
Add a command to create provider migrationsimport os
from django.apps import apps
from django.db.migrations.state import ProjectState
from django.core.management.base import BaseCommand
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.writer import MigrationWriter
from django.db.migrations.autodetector import MigrationAutodetector
from share.core import ProviderAppConfig
from share.core import ProviderMigration
class Command(BaseCommand):
can_import_settings = True
def write_migration(self, migration):
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [migration]}, graph=loader.graph,)
for m in changes['share']:
writer = MigrationWriter(m)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
def handle(self, *args, **options):
changes = {}
for config in apps.get_app_configs():
if isinstance(config, ProviderAppConfig):
changes[config.name] = [ProviderMigration(config).migration()]
for migrations in changes.values():
for m in migrations:
writer = MigrationWriter(m)
os.makedirs(os.path.dirname(writer.path), exist_ok=True)
with open(os.path.join(os.path.dirname(writer.path), '__init__.py'), 'wb') as fp:
fp.write(writer.as_string())
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
<commit_before><commit_msg>Add a command to create provider migrations<commit_after>import os
from django.apps import apps
from django.db.migrations.state import ProjectState
from django.core.management.base import BaseCommand
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.writer import MigrationWriter
from django.db.migrations.autodetector import MigrationAutodetector
from share.core import ProviderAppConfig
from share.core import ProviderMigration
class Command(BaseCommand):
can_import_settings = True
def write_migration(self, migration):
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [migration]}, graph=loader.graph,)
for m in changes['share']:
writer = MigrationWriter(m)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
def handle(self, *args, **options):
changes = {}
for config in apps.get_app_configs():
if isinstance(config, ProviderAppConfig):
changes[config.name] = [ProviderMigration(config).migration()]
for migrations in changes.values():
for m in migrations:
writer = MigrationWriter(m)
os.makedirs(os.path.dirname(writer.path), exist_ok=True)
with open(os.path.join(os.path.dirname(writer.path), '__init__.py'), 'wb') as fp:
fp.write(writer.as_string())
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
|
f1b4309cd515ac2c689026c10f025dff92c6504e
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
Update development status to beta
|
Update development status to beta
|
Python
|
mit
|
ascott1/inflection,SiviVuk/inflection,willbarton/inflection,panvagenas/inflection,jpvanhal/inflection
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
Update development status to beta
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
<commit_msg>Update development status to beta<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
Update development status to beta#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
<commit_msg>Update development status to beta<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inflection
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='inflection',
version=inflection.__version__,
description="A port of Ruby on Rails inflector to Python",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='Janne Vanhala',
author_email='janne.vanhala@gmail.com',
url='http://github.com/jpvanhal/inflection',
license=open('LICENSE').read(),
py_modules=['inflection'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
|
ae928afd0752c9f66b3b6701f67502013f2869ca
|
cla_backend/apps/legalaid/management/commands/recalculate_assigned_out_of_hours.py
|
cla_backend/apps/legalaid/management/commands/recalculate_assigned_out_of_hours.py
|
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.utils import timezone
from legalaid.models import Case
class Command(BaseCommand):
help = "Recalculate case.assigned_out_of_hours since a given date"
def handle(self, *args, **options):
try:
date_string = args[0]
except IndexError:
raise CommandError("A start date is required")
try:
dt = timezone.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
raise CommandError("The start date should be a valid datetime in yyyy-mm-dd format")
cases = Case.objects.filter(provider_assigned_at__gte=dt)
count = cases.count()
self.stdout.write("{count} cases assigned since {dt}.".format(count=count, dt=dt))
if not count:
return
unchanged = []
changed_to_true = []
changed_to_false = []
for case in cases:
current_value = case.assigned_out_of_hours
new_value = self.recalculate_field(case)
if new_value == current_value:
unchanged.append(case.pk)
elif new_value:
changed_to_true.append(case.pk)
else:
changed_to_false.append(case.pk)
self.stdout.write("{count} cases already had the correct value.".format(count=len(unchanged)))
self.stdout.write("{count} cases will be changed to `True`.".format(count=len(changed_to_true)))
self.stdout.write("{count} cases will be changed to `False`.".format(count=len(changed_to_false)))
def recalculate_field(self, case):
case_category = getattr(case.eligibility_check.category, "code") if case.eligibility_check else None
non_rota_hours = settings.NON_ROTA_OPENING_HOURS[case_category]
return not non_rota_hours.available(case.provider_assigned_at, tz_aware=True)
|
Add management command to recalculate the field
|
Add management command to recalculate the field
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Add management command to recalculate the field
|
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.utils import timezone
from legalaid.models import Case
class Command(BaseCommand):
help = "Recalculate case.assigned_out_of_hours since a given date"
def handle(self, *args, **options):
try:
date_string = args[0]
except IndexError:
raise CommandError("A start date is required")
try:
dt = timezone.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
raise CommandError("The start date should be a valid datetime in yyyy-mm-dd format")
cases = Case.objects.filter(provider_assigned_at__gte=dt)
count = cases.count()
self.stdout.write("{count} cases assigned since {dt}.".format(count=count, dt=dt))
if not count:
return
unchanged = []
changed_to_true = []
changed_to_false = []
for case in cases:
current_value = case.assigned_out_of_hours
new_value = self.recalculate_field(case)
if new_value == current_value:
unchanged.append(case.pk)
elif new_value:
changed_to_true.append(case.pk)
else:
changed_to_false.append(case.pk)
self.stdout.write("{count} cases already had the correct value.".format(count=len(unchanged)))
self.stdout.write("{count} cases will be changed to `True`.".format(count=len(changed_to_true)))
self.stdout.write("{count} cases will be changed to `False`.".format(count=len(changed_to_false)))
def recalculate_field(self, case):
case_category = getattr(case.eligibility_check.category, "code") if case.eligibility_check else None
non_rota_hours = settings.NON_ROTA_OPENING_HOURS[case_category]
return not non_rota_hours.available(case.provider_assigned_at, tz_aware=True)
|
<commit_before><commit_msg>Add management command to recalculate the field<commit_after>
|
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.utils import timezone
from legalaid.models import Case
class Command(BaseCommand):
help = "Recalculate case.assigned_out_of_hours since a given date"
def handle(self, *args, **options):
try:
date_string = args[0]
except IndexError:
raise CommandError("A start date is required")
try:
dt = timezone.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
raise CommandError("The start date should be a valid datetime in yyyy-mm-dd format")
cases = Case.objects.filter(provider_assigned_at__gte=dt)
count = cases.count()
self.stdout.write("{count} cases assigned since {dt}.".format(count=count, dt=dt))
if not count:
return
unchanged = []
changed_to_true = []
changed_to_false = []
for case in cases:
current_value = case.assigned_out_of_hours
new_value = self.recalculate_field(case)
if new_value == current_value:
unchanged.append(case.pk)
elif new_value:
changed_to_true.append(case.pk)
else:
changed_to_false.append(case.pk)
self.stdout.write("{count} cases already had the correct value.".format(count=len(unchanged)))
self.stdout.write("{count} cases will be changed to `True`.".format(count=len(changed_to_true)))
self.stdout.write("{count} cases will be changed to `False`.".format(count=len(changed_to_false)))
def recalculate_field(self, case):
case_category = getattr(case.eligibility_check.category, "code") if case.eligibility_check else None
non_rota_hours = settings.NON_ROTA_OPENING_HOURS[case_category]
return not non_rota_hours.available(case.provider_assigned_at, tz_aware=True)
|
Add management command to recalculate the fieldfrom django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.utils import timezone
from legalaid.models import Case
class Command(BaseCommand):
help = "Recalculate case.assigned_out_of_hours since a given date"
def handle(self, *args, **options):
try:
date_string = args[0]
except IndexError:
raise CommandError("A start date is required")
try:
dt = timezone.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
raise CommandError("The start date should be a valid datetime in yyyy-mm-dd format")
cases = Case.objects.filter(provider_assigned_at__gte=dt)
count = cases.count()
self.stdout.write("{count} cases assigned since {dt}.".format(count=count, dt=dt))
if not count:
return
unchanged = []
changed_to_true = []
changed_to_false = []
for case in cases:
current_value = case.assigned_out_of_hours
new_value = self.recalculate_field(case)
if new_value == current_value:
unchanged.append(case.pk)
elif new_value:
changed_to_true.append(case.pk)
else:
changed_to_false.append(case.pk)
self.stdout.write("{count} cases already had the correct value.".format(count=len(unchanged)))
self.stdout.write("{count} cases will be changed to `True`.".format(count=len(changed_to_true)))
self.stdout.write("{count} cases will be changed to `False`.".format(count=len(changed_to_false)))
def recalculate_field(self, case):
case_category = getattr(case.eligibility_check.category, "code") if case.eligibility_check else None
non_rota_hours = settings.NON_ROTA_OPENING_HOURS[case_category]
return not non_rota_hours.available(case.provider_assigned_at, tz_aware=True)
|
<commit_before><commit_msg>Add management command to recalculate the field<commit_after>from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.utils import timezone
from legalaid.models import Case
class Command(BaseCommand):
help = "Recalculate case.assigned_out_of_hours since a given date"
def handle(self, *args, **options):
try:
date_string = args[0]
except IndexError:
raise CommandError("A start date is required")
try:
dt = timezone.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
raise CommandError("The start date should be a valid datetime in yyyy-mm-dd format")
cases = Case.objects.filter(provider_assigned_at__gte=dt)
count = cases.count()
self.stdout.write("{count} cases assigned since {dt}.".format(count=count, dt=dt))
if not count:
return
unchanged = []
changed_to_true = []
changed_to_false = []
for case in cases:
current_value = case.assigned_out_of_hours
new_value = self.recalculate_field(case)
if new_value == current_value:
unchanged.append(case.pk)
elif new_value:
changed_to_true.append(case.pk)
else:
changed_to_false.append(case.pk)
self.stdout.write("{count} cases already had the correct value.".format(count=len(unchanged)))
self.stdout.write("{count} cases will be changed to `True`.".format(count=len(changed_to_true)))
self.stdout.write("{count} cases will be changed to `False`.".format(count=len(changed_to_false)))
def recalculate_field(self, case):
case_category = getattr(case.eligibility_check.category, "code") if case.eligibility_check else None
non_rota_hours = settings.NON_ROTA_OPENING_HOURS[case_category]
return not non_rota_hours.available(case.provider_assigned_at, tz_aware=True)
|
|
394bc87de8eb4903e53f1158d88471bcefc56aa9
|
alembic/versions/a9ecd1c767_add_pro_field_to_user_table.py
|
alembic/versions/a9ecd1c767_add_pro_field_to_user_table.py
|
"""add pro field to user table
Revision ID: a9ecd1c767
Revises: 66594a9866c
Create Date: 2014-11-05 10:31:37.734790
"""
# revision identifiers, used by Alembic.
revision = 'a9ecd1c767'
down_revision = '66594a9866c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('pro', sa.Boolean, default=False))
query = 'UPDATE "user" SET pro=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'pro')
|
Add migration to add pro users.
|
Add migration to add pro users.
|
Python
|
agpl-3.0
|
jean/pybossa,PyBossa/pybossa,geotagx/pybossa,geotagx/pybossa,jean/pybossa,stefanhahmann/pybossa,inteligencia-coletiva-lsd/pybossa,OpenNewsLabs/pybossa,stefanhahmann/pybossa,inteligencia-coletiva-lsd/pybossa,Scifabric/pybossa,Scifabric/pybossa,OpenNewsLabs/pybossa,PyBossa/pybossa
|
Add migration to add pro users.
|
"""add pro field to user table
Revision ID: a9ecd1c767
Revises: 66594a9866c
Create Date: 2014-11-05 10:31:37.734790
"""
# revision identifiers, used by Alembic.
revision = 'a9ecd1c767'
down_revision = '66594a9866c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('pro', sa.Boolean, default=False))
query = 'UPDATE "user" SET pro=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'pro')
|
<commit_before><commit_msg>Add migration to add pro users.<commit_after>
|
"""add pro field to user table
Revision ID: a9ecd1c767
Revises: 66594a9866c
Create Date: 2014-11-05 10:31:37.734790
"""
# revision identifiers, used by Alembic.
revision = 'a9ecd1c767'
down_revision = '66594a9866c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('pro', sa.Boolean, default=False))
query = 'UPDATE "user" SET pro=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'pro')
|
Add migration to add pro users."""add pro field to user table
Revision ID: a9ecd1c767
Revises: 66594a9866c
Create Date: 2014-11-05 10:31:37.734790
"""
# revision identifiers, used by Alembic.
revision = 'a9ecd1c767'
down_revision = '66594a9866c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('pro', sa.Boolean, default=False))
query = 'UPDATE "user" SET pro=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'pro')
|
<commit_before><commit_msg>Add migration to add pro users.<commit_after>"""add pro field to user table
Revision ID: a9ecd1c767
Revises: 66594a9866c
Create Date: 2014-11-05 10:31:37.734790
"""
# revision identifiers, used by Alembic.
revision = 'a9ecd1c767'
down_revision = '66594a9866c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('pro', sa.Boolean, default=False))
query = 'UPDATE "user" SET pro=false;'
op.execute(query)
def downgrade():
op.drop_column('user', 'pro')
|
|
6c479cb382ec76be3910b1f8c1c71b0a8a4e26c7
|
tests/continous-failure.py
|
tests/continous-failure.py
|
#!/usr/bin/env python
# Both have a very small chance of occuring
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, BYE, DATA
import sys
NUMNODES = 2
DELAY = 0.1
m = Mesh()
class TestMeshNode(MeshNode):
def newNode (self, data):
MeshNode.newNode (self, data)
print data + " joined"
self.fail(data)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data + " left"
n = TestMeshNode("node", m)
m.addMeshNode(n)
failnode = MeshNode("failnode", m)
m.addMeshNode(failnode)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
reactor.run()
|
Add a test continously failing one node, causing group split and reconnects
|
Add a test continously failing one node, causing group split and reconnects
20071126190126-93b9a-a75d8096a67cfd837b21cb9c6b20eec234993a90.gz
|
Python
|
lgpl-2.1
|
freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut
|
Add a test continously failing one node, causing group split and reconnects
20071126190126-93b9a-a75d8096a67cfd837b21cb9c6b20eec234993a90.gz
|
#!/usr/bin/env python
# Both have a very small chance of occuring
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, BYE, DATA
import sys
NUMNODES = 2
DELAY = 0.1
m = Mesh()
class TestMeshNode(MeshNode):
def newNode (self, data):
MeshNode.newNode (self, data)
print data + " joined"
self.fail(data)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data + " left"
n = TestMeshNode("node", m)
m.addMeshNode(n)
failnode = MeshNode("failnode", m)
m.addMeshNode(failnode)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
reactor.run()
|
<commit_before><commit_msg>Add a test continously failing one node, causing group split and reconnects
20071126190126-93b9a-a75d8096a67cfd837b21cb9c6b20eec234993a90.gz<commit_after>
|
#!/usr/bin/env python
# Both have a very small chance of occuring
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, BYE, DATA
import sys
NUMNODES = 2
DELAY = 0.1
m = Mesh()
class TestMeshNode(MeshNode):
def newNode (self, data):
MeshNode.newNode (self, data)
print data + " joined"
self.fail(data)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data + " left"
n = TestMeshNode("node", m)
m.addMeshNode(n)
failnode = MeshNode("failnode", m)
m.addMeshNode(failnode)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
reactor.run()
|
Add a test continously failing one node, causing group split and reconnects
20071126190126-93b9a-a75d8096a67cfd837b21cb9c6b20eec234993a90.gz#!/usr/bin/env python
# Both have a very small chance of occuring
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, BYE, DATA
import sys
NUMNODES = 2
DELAY = 0.1
m = Mesh()
class TestMeshNode(MeshNode):
def newNode (self, data):
MeshNode.newNode (self, data)
print data + " joined"
self.fail(data)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data + " left"
n = TestMeshNode("node", m)
m.addMeshNode(n)
failnode = MeshNode("failnode", m)
m.addMeshNode(failnode)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
reactor.run()
|
<commit_before><commit_msg>Add a test continously failing one node, causing group split and reconnects
20071126190126-93b9a-a75d8096a67cfd837b21cb9c6b20eec234993a90.gz<commit_after>#!/usr/bin/env python
# Both have a very small chance of occuring
from twisted.internet import reactor
from mesh import Mesh, MeshNode, packet_type, BYE, DATA
import sys
NUMNODES = 2
DELAY = 0.1
m = Mesh()
class TestMeshNode(MeshNode):
def newNode (self, data):
MeshNode.newNode (self, data)
print data + " joined"
self.fail(data)
def leftNode (self, data):
MeshNode.leftNode (self, data)
print data + " left"
n = TestMeshNode("node", m)
m.addMeshNode(n)
failnode = MeshNode("failnode", m)
m.addMeshNode(failnode)
# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%
# packet loss.. (bandwidth and delay aren't implemented just yet)
m.connect_full(1024, 50, 0.30)
reactor.run()
|
|
dc9c71cdfdcfec88f90d83111d159a9926c2191f
|
tests/create_references.py
|
tests/create_references.py
|
#! /usr/bin/env python
import os
from cyclus_tools import run_cyclus
from tests_list import sim_files
def main():
"""Creates reference databases. Assumes that cyclus is included into PATH.
"""
cwd = os.getcwd()
# Run cyclus
run_cyclus("cyclus", cwd, sim_files)
if __name__ == "__main__": main()
|
Add script to create reference output databases
|
Add script to create reference output databases
|
Python
|
bsd-3-clause
|
gonuke/cycamore,jlittell/cycamore,gonuke/cycamore,rwcarlsen/cycamore,cyclus/cycaless,rwcarlsen/cycamore,Baaaaam/cyCLASS,Baaaaam/cyCLASS,rwcarlsen/cycamore,Baaaaam/cycamore,cyclus/cycaless,Baaaaam/cyBaM,jlittell/cycamore,gonuke/cycamore,Baaaaam/cyBaM,rwcarlsen/cycamore,Baaaaam/cyBaM,jlittell/cycamore,Baaaaam/cyBaM,gonuke/cycamore,jlittell/cycamore,Baaaaam/cycamore,Baaaaam/cycamore
|
Add script to create reference output databases
|
#! /usr/bin/env python
import os
from cyclus_tools import run_cyclus
from tests_list import sim_files
def main():
"""Creates reference databases. Assumes that cyclus is included into PATH.
"""
cwd = os.getcwd()
# Run cyclus
run_cyclus("cyclus", cwd, sim_files)
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to create reference output databases<commit_after>
|
#! /usr/bin/env python
import os
from cyclus_tools import run_cyclus
from tests_list import sim_files
def main():
"""Creates reference databases. Assumes that cyclus is included into PATH.
"""
cwd = os.getcwd()
# Run cyclus
run_cyclus("cyclus", cwd, sim_files)
if __name__ == "__main__": main()
|
Add script to create reference output databases#! /usr/bin/env python
import os
from cyclus_tools import run_cyclus
from tests_list import sim_files
def main():
"""Creates reference databases. Assumes that cyclus is included into PATH.
"""
cwd = os.getcwd()
# Run cyclus
run_cyclus("cyclus", cwd, sim_files)
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add script to create reference output databases<commit_after>#! /usr/bin/env python
import os
from cyclus_tools import run_cyclus
from tests_list import sim_files
def main():
"""Creates reference databases. Assumes that cyclus is included into PATH.
"""
cwd = os.getcwd()
# Run cyclus
run_cyclus("cyclus", cwd, sim_files)
if __name__ == "__main__": main()
|
|
26450ba129b16d5a6c8a660e8df21c1d016903c4
|
setup.py
|
setup.py
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.1',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.2',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
Upgrade ldap3 0.9.9.1 => 0.9.9.2
|
Upgrade ldap3 0.9.9.1 => 0.9.9.2
|
Python
|
mit
|
wylee/django-arcutils,PSU-OIT-ARC/django-arcutils,wylee/django-arcutils,PSU-OIT-ARC/django-arcutils
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.1',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Upgrade ldap3 0.9.9.1 => 0.9.9.2
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.2',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
<commit_before>import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.1',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Upgrade ldap3 0.9.9.1 => 0.9.9.2<commit_after>
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.2',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.1',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Upgrade ldap3 0.9.9.1 => 0.9.9.2import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.2',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
<commit_before>import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.1',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Upgrade ldap3 0.9.9.1 => 0.9.9.2<commit_after>import sys
from setuptools import find_packages, setup
VERSION = '2.0.dev0'
install_requires = [
'django-local-settings>=1.0a10',
'stashward',
]
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34')
setup(
name='django-arcutils',
version=VERSION,
url='https://github.com/PSU-OIT-ARC/django-arcutils',
author='PSU - OIT - ARC',
author_email='consultants@pdx.edu',
description='Common utilities used in ARC Django projects',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'cas': [
'django-cas-client>=1.2.0',
],
'ldap': [
'ldap3>=0.9.9.2',
],
'dev': [
'django>=1.7',
'flake8',
'ldap3',
'mock',
'model_mommy',
],
},
entry_points="""
[console_scripts]
arcutils = arcutils.__main__:main
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
dfe5f213efd523891d77a8f390c690324f7bbb45
|
scripts/add_all_users_to_instances.py
|
scripts/add_all_users_to_instances.py
|
#!/usr/bin/env python
"""Add all users into the instances given on the command line.
"""
import os
import sys
from argparse import ArgumentParser
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy import model
def load_config(filename):
conf = appconfig('config:' + os.path.abspath(filename) + '#content')
load_environment(conf.global_conf, conf.local_conf)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("conf_file", help="configuration to use")
parser.add_argument("instance_keys", metavar='instance', nargs='+',
help=("Instance key(s). If ALL is given, the "
"users will be added to all visible instances."))
return parser.parse_args()
def main():
args = parse_args()
load_config(args.conf_file)
session = model.meta.Session
num_added = {}
keys_added = {}
def increment(dict_, key):
num = dict_.get(key, 0)
num += 1
dict_[key] = num
# filter the instances we have to add
instance_keys = args.instance_keys
instances = model.Instance.all()
if not 'ALL' in instance_keys:
instances = [instance for instance in instances if instance.key
in instance_keys]
num_instances = len(instances)
users = model.User.all()
num_users = len(users)
total_added = 0
print "\n***Adding users***\n"
for user in users:
user_status = user.user_name + ': '
added = 0
for instance in set(instances) - set(user.instances):
increment(keys_added, instance.key)
user_status += instance.key + ' '
added += 1
total_added += 1
membership = model.Membership(user, instance,
instance.default_group)
session.add(membership)
if (total_added % 2) == 0:
session.commit()
if added:
print "%s (%s)" % (user_status, added)
increment(num_added, added)
session.commit()
print "\n***Statistics***"
print "Instances:", num_instances
print "Users:", num_users
print "\n**Instances added per user**\n"
added = 0
for (key, count) in sorted(num_added.items()):
added += key * count
print "%s membership added for %s users" % (key, count)
print ''
print "Mean number of memberships added per user: %s" % (float(added) /
float(num_users))
print "\n**Users added per group**\n"
for key in sorted(keys_added.keys()):
print "%s: %s" % (key, (float(keys_added[key]) / float(num_users)))
if __name__ == '__main__':
sys.exit(main())
|
Add a command line script to ad all users to instances
|
Add a command line script to ad all users to instances
|
Python
|
agpl-3.0
|
DanielNeugebauer/adhocracy,phihag/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,SysTheron/adhocracy,SysTheron/adhocracy,liqd/adhocracy,phihag/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,liqd/adhocracy,liqd/adhocracy,phihag/adhocracy,phihag/adhocracy,alkadis/vcv,SysTheron/adhocracy,alkadis/vcv,alkadis/vcv
|
Add a command line script to ad all users to instances
|
#!/usr/bin/env python
"""Add all users into the instances given on the command line.
"""
import os
import sys
from argparse import ArgumentParser
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy import model
def load_config(filename):
conf = appconfig('config:' + os.path.abspath(filename) + '#content')
load_environment(conf.global_conf, conf.local_conf)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("conf_file", help="configuration to use")
parser.add_argument("instance_keys", metavar='instance', nargs='+',
help=("Instance key(s). If ALL is given, the "
"users will be added to all visible instances."))
return parser.parse_args()
def main():
args = parse_args()
load_config(args.conf_file)
session = model.meta.Session
num_added = {}
keys_added = {}
def increment(dict_, key):
num = dict_.get(key, 0)
num += 1
dict_[key] = num
# filter the instances we have to add
instance_keys = args.instance_keys
instances = model.Instance.all()
if not 'ALL' in instance_keys:
instances = [instance for instance in instances if instance.key
in instance_keys]
num_instances = len(instances)
users = model.User.all()
num_users = len(users)
total_added = 0
print "\n***Adding users***\n"
for user in users:
user_status = user.user_name + ': '
added = 0
for instance in set(instances) - set(user.instances):
increment(keys_added, instance.key)
user_status += instance.key + ' '
added += 1
total_added += 1
membership = model.Membership(user, instance,
instance.default_group)
session.add(membership)
if (total_added % 2) == 0:
session.commit()
if added:
print "%s (%s)" % (user_status, added)
increment(num_added, added)
session.commit()
print "\n***Statistics***"
print "Instances:", num_instances
print "Users:", num_users
print "\n**Instances added per user**\n"
added = 0
for (key, count) in sorted(num_added.items()):
added += key * count
print "%s membership added for %s users" % (key, count)
print ''
print "Mean number of memberships added per user: %s" % (float(added) /
float(num_users))
print "\n**Users added per group**\n"
for key in sorted(keys_added.keys()):
print "%s: %s" % (key, (float(keys_added[key]) / float(num_users)))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a command line script to ad all users to instances<commit_after>
|
#!/usr/bin/env python
"""Add all users into the instances given on the command line.
"""
import os
import sys
from argparse import ArgumentParser
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy import model
def load_config(filename):
conf = appconfig('config:' + os.path.abspath(filename) + '#content')
load_environment(conf.global_conf, conf.local_conf)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("conf_file", help="configuration to use")
parser.add_argument("instance_keys", metavar='instance', nargs='+',
help=("Instance key(s). If ALL is given, the "
"users will be added to all visible instances."))
return parser.parse_args()
def main():
args = parse_args()
load_config(args.conf_file)
session = model.meta.Session
num_added = {}
keys_added = {}
def increment(dict_, key):
num = dict_.get(key, 0)
num += 1
dict_[key] = num
# filter the instances we have to add
instance_keys = args.instance_keys
instances = model.Instance.all()
if not 'ALL' in instance_keys:
instances = [instance for instance in instances if instance.key
in instance_keys]
num_instances = len(instances)
users = model.User.all()
num_users = len(users)
total_added = 0
print "\n***Adding users***\n"
for user in users:
user_status = user.user_name + ': '
added = 0
for instance in set(instances) - set(user.instances):
increment(keys_added, instance.key)
user_status += instance.key + ' '
added += 1
total_added += 1
membership = model.Membership(user, instance,
instance.default_group)
session.add(membership)
if (total_added % 2) == 0:
session.commit()
if added:
print "%s (%s)" % (user_status, added)
increment(num_added, added)
session.commit()
print "\n***Statistics***"
print "Instances:", num_instances
print "Users:", num_users
print "\n**Instances added per user**\n"
added = 0
for (key, count) in sorted(num_added.items()):
added += key * count
print "%s membership added for %s users" % (key, count)
print ''
print "Mean number of memberships added per user: %s" % (float(added) /
float(num_users))
print "\n**Users added per group**\n"
for key in sorted(keys_added.keys()):
print "%s: %s" % (key, (float(keys_added[key]) / float(num_users)))
if __name__ == '__main__':
sys.exit(main())
|
Add a command line script to ad all users to instances#!/usr/bin/env python
"""Add all users into the instances given on the command line.
"""
import os
import sys
from argparse import ArgumentParser
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy import model
def load_config(filename):
conf = appconfig('config:' + os.path.abspath(filename) + '#content')
load_environment(conf.global_conf, conf.local_conf)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("conf_file", help="configuration to use")
parser.add_argument("instance_keys", metavar='instance', nargs='+',
help=("Instance key(s). If ALL is given, the "
"users will be added to all visible instances."))
return parser.parse_args()
def main():
args = parse_args()
load_config(args.conf_file)
session = model.meta.Session
num_added = {}
keys_added = {}
def increment(dict_, key):
num = dict_.get(key, 0)
num += 1
dict_[key] = num
# filter the instances we have to add
instance_keys = args.instance_keys
instances = model.Instance.all()
if not 'ALL' in instance_keys:
instances = [instance for instance in instances if instance.key
in instance_keys]
num_instances = len(instances)
users = model.User.all()
num_users = len(users)
total_added = 0
print "\n***Adding users***\n"
for user in users:
user_status = user.user_name + ': '
added = 0
for instance in set(instances) - set(user.instances):
increment(keys_added, instance.key)
user_status += instance.key + ' '
added += 1
total_added += 1
membership = model.Membership(user, instance,
instance.default_group)
session.add(membership)
if (total_added % 2) == 0:
session.commit()
if added:
print "%s (%s)" % (user_status, added)
increment(num_added, added)
session.commit()
print "\n***Statistics***"
print "Instances:", num_instances
print "Users:", num_users
print "\n**Instances added per user**\n"
added = 0
for (key, count) in sorted(num_added.items()):
added += key * count
print "%s membership added for %s users" % (key, count)
print ''
print "Mean number of memberships added per user: %s" % (float(added) /
float(num_users))
print "\n**Users added per group**\n"
for key in sorted(keys_added.keys()):
print "%s: %s" % (key, (float(keys_added[key]) / float(num_users)))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a command line script to ad all users to instances<commit_after>#!/usr/bin/env python
"""Add all users into the instances given on the command line.
"""
import os
import sys
from argparse import ArgumentParser
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy import model
def load_config(filename):
conf = appconfig('config:' + os.path.abspath(filename) + '#content')
load_environment(conf.global_conf, conf.local_conf)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument("conf_file", help="configuration to use")
parser.add_argument("instance_keys", metavar='instance', nargs='+',
help=("Instance key(s). If ALL is given, the "
"users will be added to all visible instances."))
return parser.parse_args()
def main():
args = parse_args()
load_config(args.conf_file)
session = model.meta.Session
num_added = {}
keys_added = {}
def increment(dict_, key):
num = dict_.get(key, 0)
num += 1
dict_[key] = num
# filter the instances we have to add
instance_keys = args.instance_keys
instances = model.Instance.all()
if not 'ALL' in instance_keys:
instances = [instance for instance in instances if instance.key
in instance_keys]
num_instances = len(instances)
users = model.User.all()
num_users = len(users)
total_added = 0
print "\n***Adding users***\n"
for user in users:
user_status = user.user_name + ': '
added = 0
for instance in set(instances) - set(user.instances):
increment(keys_added, instance.key)
user_status += instance.key + ' '
added += 1
total_added += 1
membership = model.Membership(user, instance,
instance.default_group)
session.add(membership)
if (total_added % 2) == 0:
session.commit()
if added:
print "%s (%s)" % (user_status, added)
increment(num_added, added)
session.commit()
print "\n***Statistics***"
print "Instances:", num_instances
print "Users:", num_users
print "\n**Instances added per user**\n"
added = 0
for (key, count) in sorted(num_added.items()):
added += key * count
print "%s membership added for %s users" % (key, count)
print ''
print "Mean number of memberships added per user: %s" % (float(added) /
float(num_users))
print "\n**Users added per group**\n"
for key in sorted(keys_added.keys()):
print "%s: %s" % (key, (float(keys_added[key]) / float(num_users)))
if __name__ == '__main__':
sys.exit(main())
|
|
0628d8df0eb59e1b47d8bc01cb9c3bd566829d6e
|
beetsplug/embedcoverart.py
|
beetsplug/embedcoverart.py
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
Add initial version of the embed coverart plugin.
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.
|
Python
|
mit
|
ruippeixotog/beets,dfc/beets,mried/beets,arabenjamin/beets,krig/beets,ibmibmibm/beets,jackwilsdon/beets,asteven/beets,sampsyo/beets,asteven/beets,Kraymer/beets,beetbox/beets,Andypsamp/CODfinalJUNIT,jackwilsdon/beets,Wen777/beets,krig/beets,swt30/beets,YetAnotherNerd/beets,shamangeorge/beets,artemutin/beets,drm00/beets,jcoady9/beets,ttsda/beets,tima/beets,tima/beets,mathstuf/beets,gabrielaraujof/beets,ruippeixotog/beets,multikatt/beets,Andypsamp/CODjunit,multikatt/beets,lightwang1/beets,untitaker/beets,ngokevin/beets,arabenjamin/beets,LordSputnik/beets,swt30/beets,randybias/beets,ibmibmibm/beets,pdf/beets,ngokevin/beets,bj-yinyan/beets,SusannaMaria/beets,moodboom/beets,xsteadfastx/beets,lightwang1/beets,Andypsamp/CODjunit,aspidites/beets,sadatay/beets,beetbox/beets,andremiller/beets,PierreRust/beets,LordSputnik/beets,xsteadfastx/beets,pkess/beets,swt30/beets,MyTunesFreeMusic/privacy-policy,Freso/beets,Wen777/beets,ttsda/beets,xsteadfastx/beets,marcuskrahl/beets,YetAnotherNerd/beets,Wen777/beets,sampsyo/beets,sampsyo/beets,artemutin/beets,untitaker/beets,beetbox/beets,pkess/beets,tima/beets,mried/beets,pkess/beets,kareemallen/beets,moodboom/beets,randybias/beets,krig/beets,m-urban/beets,mosesfistos1/beetbox,SusannaMaria/beets,sampsyo/beets,gabrielaraujof/beets,artemutin/beets,ibmibmibm/beets,Dishwishy/beets,kareemallen/beets,jcoady9/beets,andremiller/beets,Dishwishy/beets,mclearc/beets,imsparsh/beets,sadatay/beets,MyTunesFreeMusic/privacy-policy,marcuskrahl/beets,jmwatte/beets,drm00/beets,PierreRust/beets,imsparsh/beets,shamangeorge/beets,andremiller/beets,parapente/beets,diego-plan9/beets,mathstuf/beets,jcoady9/beets,Andypsamp/CODfinalJUNIT,Andypsamp/CODfinalJUNIT,gabrielaraujof/beets,jmwatte/beets,jbaiter/beets,mosesfistos1/beetbox,diego-plan9/beets,shamangeorge/beets,dfc/beets,shamangeorge/beets,mried/beets,asteven/beets,mclearc/beets,ttsda/beets,Kraymer/beets,imsparsh/beets,jbaiter/beets,dfc/beets,arabenjamin/beets,mathstuf/beets,LordSputnik/beets,mosesfistos1/beetbox,bj-yinyan/beets,imsparsh/beets,diego-plan9/beets,shanemikel/beets,jcoady9/beets,SusannaMaria/beets,tima/beets,shanemikel/beets,Andypsamp/CODjunit,gabrielaraujof/beets,Dishwishy/beets,marcuskrahl/beets,SusannaMaria/beets,bj-yinyan/beets,LordSputnik/beets,kelvinhammond/beets,ttsda/beets,drm00/beets,MyTunesFreeMusic/privacy-policy,beetbox/beets,asteven/beets,marcuskrahl/beets,sadatay/beets,dfc/beets,xsteadfastx/beets,parapente/beets,diego-plan9/beets,kelvinhammond/beets,lightwang1/beets,mried/beets,ruippeixotog/beets,lengtche/beets,ruippeixotog/beets,parapente/beets,Andypsamp/CODjunit,Freso/beets,MyTunesFreeMusic/privacy-policy,moodboom/beets,ibmibmibm/beets,artemutin/beets,kareemallen/beets,lengtche/beets,untitaker/beets,pkess/beets,kareemallen/beets,m-urban/beets,PierreRust/beets,aspidites/beets,mclearc/beets,parapente/beets,mosesfistos1/beetbox,jayme-github/beets,moodboom/beets,jmwatte/beets,lightwang1/beets,madmouser1/beets,madmouser1/beets,jackwilsdon/beets,YetAnotherNerd/beets,Andypsamp/CODjunit,multikatt/beets,PierreRust/beets,Freso/beets,Kraymer/beets,pdf/beets,jackwilsdon/beets,jmwatte/beets,bj-yinyan/beets,kelvinhammond/beets,Freso/beets,Kraymer/beets,Andypsamp/CODfinalJUNIT,madmouser1/beets,madmouser1/beets,swt30/beets,m-urban/beets,Andypsamp/CODfinalJUNIT,shanemikel/beets,drm00/beets,lengtche/beets,m-urban/beets,Dishwishy/beets,arabenjamin/beets,randybias/beets,lengtche/beets,untitaker/beets,multikatt/beets,jayme-github/beets,kelvinhammond/beets,randybias/beets,sadatay/beets,shanemikel/beets,mathstuf/beets,YetAnotherNerd/beets
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
<commit_before><commit_msg>Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.<commit_after>
|
from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
<commit_before><commit_msg>Add initial version of the embed coverart plugin.
This plugin allows users to embed the cover into the audio file.
Probaly still has a few bugs but it should work in most cases right
now.<commit_after>from beets.plugins import BeetsPlugin
from beets import mediafile
import os, logging
from email.mime.image import MIMEImage
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
class EmbedAlbumartPlugin(BeetsPlugin):
'''Allows albumart to be embedded into the actual files'''
def __init__(self):
self.register_listener('loaded', self.loaded)
self.register_listener('album_imported', self.album_imported)
def configure(self, config):
pass
def loaded(self):
pass
def album_imported(self, album):
albumart = album.artpath
ALLOWED_MIMES = ('jpeg','png')
if albumart:
albumart_raw = open(albumart, 'rb').read()
img = MIMEImage(albumart_raw)
mime_img = img.get_content_subtype()
if mime_img in ALLOWED_MIMES:
mime_type = 'image/%s' % mime_img
for item in album.items():
f = mediafile.MediaFile(item)
if "mp3" in item.type:
f.albumart_mime = mime_type
f.albumart_data = albumart_raw
f.save()
|
|
13fc24f18f0b86781aa96deb95d4d8fa2cfc1706
|
KMeans.py
|
KMeans.py
|
import numpy as np
class KMeans(object):
def __init__(self, inputs, clusters):
self.inputs = np.array(inputs)
if self.inputs.ndim == 1:
self.inputs = self.inputs.reshape(self.inputs.shape[0], 1)
self.test_cases = self.inputs.shape[0]
self.num_clusters = clusters
self.clusters = self.inputs[np.random.choice(self.test_cases, clusters, replace=False), :]
self.cluster_dist = np.array([np.zeros((1, self.test_cases)) for i in range(self.num_clusters)])
def calculate_cluster_distances(self):
for i in range(self.num_clusters):
self.cluster_dist[i] = np.sum(self.inputs - self.clusters[i], axis=1)
def assign_data_points(self):
new_clusters = np.zeros(self.clusters.shape)
point_counts = np.zeros((self.num_clusters, 1))
for i in range(self.test_cases):
b_cluster = np.where(self.cluster_dist[:, :, i] == self.cluster_dist[:,:,i].min())[0][0]
new_clusters[b_cluster] += self.inputs[i]
point_counts[b_cluster] += 1
self.clusters[point_counts>0] = new_clusters[point_counts>0] / point_counts[point_counts>0]
def train(self):
while True:
old_clusters = self.clusters.copy()
self.calculate_cluster_distances()
self.assign_data_points()
if np.sum(old_clusters - self.clusters) == 0: break
|
Add K Means clustering algorithm
|
Add K Means clustering algorithm
|
Python
|
mit
|
prasanna08/MachineLearning
|
Add K Means clustering algorithm
|
import numpy as np
class KMeans(object):
def __init__(self, inputs, clusters):
self.inputs = np.array(inputs)
if self.inputs.ndim == 1:
self.inputs = self.inputs.reshape(self.inputs.shape[0], 1)
self.test_cases = self.inputs.shape[0]
self.num_clusters = clusters
self.clusters = self.inputs[np.random.choice(self.test_cases, clusters, replace=False), :]
self.cluster_dist = np.array([np.zeros((1, self.test_cases)) for i in range(self.num_clusters)])
def calculate_cluster_distances(self):
for i in range(self.num_clusters):
self.cluster_dist[i] = np.sum(self.inputs - self.clusters[i], axis=1)
def assign_data_points(self):
new_clusters = np.zeros(self.clusters.shape)
point_counts = np.zeros((self.num_clusters, 1))
for i in range(self.test_cases):
b_cluster = np.where(self.cluster_dist[:, :, i] == self.cluster_dist[:,:,i].min())[0][0]
new_clusters[b_cluster] += self.inputs[i]
point_counts[b_cluster] += 1
self.clusters[point_counts>0] = new_clusters[point_counts>0] / point_counts[point_counts>0]
def train(self):
while True:
old_clusters = self.clusters.copy()
self.calculate_cluster_distances()
self.assign_data_points()
if np.sum(old_clusters - self.clusters) == 0: break
|
<commit_before><commit_msg>Add K Means clustering algorithm<commit_after>
|
import numpy as np
class KMeans(object):
def __init__(self, inputs, clusters):
self.inputs = np.array(inputs)
if self.inputs.ndim == 1:
self.inputs = self.inputs.reshape(self.inputs.shape[0], 1)
self.test_cases = self.inputs.shape[0]
self.num_clusters = clusters
self.clusters = self.inputs[np.random.choice(self.test_cases, clusters, replace=False), :]
self.cluster_dist = np.array([np.zeros((1, self.test_cases)) for i in range(self.num_clusters)])
def calculate_cluster_distances(self):
for i in range(self.num_clusters):
self.cluster_dist[i] = np.sum(self.inputs - self.clusters[i], axis=1)
def assign_data_points(self):
new_clusters = np.zeros(self.clusters.shape)
point_counts = np.zeros((self.num_clusters, 1))
for i in range(self.test_cases):
b_cluster = np.where(self.cluster_dist[:, :, i] == self.cluster_dist[:,:,i].min())[0][0]
new_clusters[b_cluster] += self.inputs[i]
point_counts[b_cluster] += 1
self.clusters[point_counts>0] = new_clusters[point_counts>0] / point_counts[point_counts>0]
def train(self):
while True:
old_clusters = self.clusters.copy()
self.calculate_cluster_distances()
self.assign_data_points()
if np.sum(old_clusters - self.clusters) == 0: break
|
Add K Means clustering algorithmimport numpy as np
class KMeans(object):
def __init__(self, inputs, clusters):
self.inputs = np.array(inputs)
if self.inputs.ndim == 1:
self.inputs = self.inputs.reshape(self.inputs.shape[0], 1)
self.test_cases = self.inputs.shape[0]
self.num_clusters = clusters
self.clusters = self.inputs[np.random.choice(self.test_cases, clusters, replace=False), :]
self.cluster_dist = np.array([np.zeros((1, self.test_cases)) for i in range(self.num_clusters)])
def calculate_cluster_distances(self):
for i in range(self.num_clusters):
self.cluster_dist[i] = np.sum(self.inputs - self.clusters[i], axis=1)
def assign_data_points(self):
new_clusters = np.zeros(self.clusters.shape)
point_counts = np.zeros((self.num_clusters, 1))
for i in range(self.test_cases):
b_cluster = np.where(self.cluster_dist[:, :, i] == self.cluster_dist[:,:,i].min())[0][0]
new_clusters[b_cluster] += self.inputs[i]
point_counts[b_cluster] += 1
self.clusters[point_counts>0] = new_clusters[point_counts>0] / point_counts[point_counts>0]
def train(self):
while True:
old_clusters = self.clusters.copy()
self.calculate_cluster_distances()
self.assign_data_points()
if np.sum(old_clusters - self.clusters) == 0: break
|
<commit_before><commit_msg>Add K Means clustering algorithm<commit_after>import numpy as np
class KMeans(object):
def __init__(self, inputs, clusters):
self.inputs = np.array(inputs)
if self.inputs.ndim == 1:
self.inputs = self.inputs.reshape(self.inputs.shape[0], 1)
self.test_cases = self.inputs.shape[0]
self.num_clusters = clusters
self.clusters = self.inputs[np.random.choice(self.test_cases, clusters, replace=False), :]
self.cluster_dist = np.array([np.zeros((1, self.test_cases)) for i in range(self.num_clusters)])
def calculate_cluster_distances(self):
for i in range(self.num_clusters):
self.cluster_dist[i] = np.sum(self.inputs - self.clusters[i], axis=1)
def assign_data_points(self):
new_clusters = np.zeros(self.clusters.shape)
point_counts = np.zeros((self.num_clusters, 1))
for i in range(self.test_cases):
b_cluster = np.where(self.cluster_dist[:, :, i] == self.cluster_dist[:,:,i].min())[0][0]
new_clusters[b_cluster] += self.inputs[i]
point_counts[b_cluster] += 1
self.clusters[point_counts>0] = new_clusters[point_counts>0] / point_counts[point_counts>0]
def train(self):
while True:
old_clusters = self.clusters.copy()
self.calculate_cluster_distances()
self.assign_data_points()
if np.sum(old_clusters - self.clusters) == 0: break
|
|
4561811f536e887cb1fda331bee813ed313cbef8
|
custom/icds/management/commands/find_cases_with_no_delivery.py
|
custom/icds/management/commands/find_cases_with_no_delivery.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from django.core.management import BaseCommand
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""https://manage.dimagi.com/default.asp?265190
Some ccs_record cases have evidence of a delivery occurring, but do not
have an associated delivery form. The implications on the case are that an
add property is not set and the schedule phase is 2.
"""
def add_arguments(self, parser):
parser.add_argument(
'csv_file',
help="File path for csv file",
)
def handle(self, csv_file, **options):
self.domain = 'icds-cas'
self.case_accessor = CaseAccessors(self.domain)
with open(csv_file, "w") as csv_file:
field_names = [
'case_id', 'owner_id', 'modified_on', 'server_modified_on',
'add', 'edd', 'ccs_phase', 'num_pnc_visits', 'current_schedule_phase'
]
csv_writer = csv.DictWriter(csv_file, field_names, extrasaction='ignore')
csv_writer.writeheader()
for ccs_case in self._get_cases():
properties = ccs_case.case_json
if 'add' in properties:
continue
if properties.get('current_schedule_phase') != 2:
continue
properties.update({
'case_id': ccs_case.case_id,
'owner_id': ccs_case.owner_id,
'modified_on': ccs_case.modified_on,
'server_modified_on': ccs_case.modified_on
})
csv_writer.write_row(properties)
def _get_cases(self):
dbs = get_db_aliases_for_partitioned_query()
for db in dbs:
ccs_record_case_ids = (
CommCareCaseSQL.objects
.using(db)
.filter(domain=self.domain, type='ccs_record', closed=False)
.values_list('case_id', flat=True)
)
for case_ids in chunked(ccs_record_case_ids, 100):
cases = self.case_accessor.get_cases(case_ids)
for case in cases:
yield case
|
Add management command to find ccs record cases without delivery form
|
Add management command to find ccs record cases without delivery form
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add management command to find ccs record cases without delivery form
|
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from django.core.management import BaseCommand
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""https://manage.dimagi.com/default.asp?265190
Some ccs_record cases have evidence of a delivery occurring, but do not
have an associated delivery form. The implications on the case are that an
add property is not set and the schedule phase is 2.
"""
def add_arguments(self, parser):
parser.add_argument(
'csv_file',
help="File path for csv file",
)
def handle(self, csv_file, **options):
self.domain = 'icds-cas'
self.case_accessor = CaseAccessors(self.domain)
with open(csv_file, "w") as csv_file:
field_names = [
'case_id', 'owner_id', 'modified_on', 'server_modified_on',
'add', 'edd', 'ccs_phase', 'num_pnc_visits', 'current_schedule_phase'
]
csv_writer = csv.DictWriter(csv_file, field_names, extrasaction='ignore')
csv_writer.writeheader()
for ccs_case in self._get_cases():
properties = ccs_case.case_json
if 'add' in properties:
continue
if properties.get('current_schedule_phase') != 2:
continue
properties.update({
'case_id': ccs_case.case_id,
'owner_id': ccs_case.owner_id,
'modified_on': ccs_case.modified_on,
'server_modified_on': ccs_case.modified_on
})
csv_writer.write_row(properties)
def _get_cases(self):
dbs = get_db_aliases_for_partitioned_query()
for db in dbs:
ccs_record_case_ids = (
CommCareCaseSQL.objects
.using(db)
.filter(domain=self.domain, type='ccs_record', closed=False)
.values_list('case_id', flat=True)
)
for case_ids in chunked(ccs_record_case_ids, 100):
cases = self.case_accessor.get_cases(case_ids)
for case in cases:
yield case
|
<commit_before><commit_msg>Add management command to find ccs record cases without delivery form<commit_after>
|
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from django.core.management import BaseCommand
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""https://manage.dimagi.com/default.asp?265190
Some ccs_record cases have evidence of a delivery occurring, but do not
have an associated delivery form. The implications on the case are that an
add property is not set and the schedule phase is 2.
"""
def add_arguments(self, parser):
parser.add_argument(
'csv_file',
help="File path for csv file",
)
def handle(self, csv_file, **options):
self.domain = 'icds-cas'
self.case_accessor = CaseAccessors(self.domain)
with open(csv_file, "w") as csv_file:
field_names = [
'case_id', 'owner_id', 'modified_on', 'server_modified_on',
'add', 'edd', 'ccs_phase', 'num_pnc_visits', 'current_schedule_phase'
]
csv_writer = csv.DictWriter(csv_file, field_names, extrasaction='ignore')
csv_writer.writeheader()
for ccs_case in self._get_cases():
properties = ccs_case.case_json
if 'add' in properties:
continue
if properties.get('current_schedule_phase') != 2:
continue
properties.update({
'case_id': ccs_case.case_id,
'owner_id': ccs_case.owner_id,
'modified_on': ccs_case.modified_on,
'server_modified_on': ccs_case.modified_on
})
csv_writer.write_row(properties)
def _get_cases(self):
dbs = get_db_aliases_for_partitioned_query()
for db in dbs:
ccs_record_case_ids = (
CommCareCaseSQL.objects
.using(db)
.filter(domain=self.domain, type='ccs_record', closed=False)
.values_list('case_id', flat=True)
)
for case_ids in chunked(ccs_record_case_ids, 100):
cases = self.case_accessor.get_cases(case_ids)
for case in cases:
yield case
|
Add management command to find ccs record cases without delivery formfrom __future__ import absolute_import
from __future__ import unicode_literals
import csv
from django.core.management import BaseCommand
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""https://manage.dimagi.com/default.asp?265190
Some ccs_record cases have evidence of a delivery occurring, but do not
have an associated delivery form. The implications on the case are that an
add property is not set and the schedule phase is 2.
"""
def add_arguments(self, parser):
parser.add_argument(
'csv_file',
help="File path for csv file",
)
def handle(self, csv_file, **options):
self.domain = 'icds-cas'
self.case_accessor = CaseAccessors(self.domain)
with open(csv_file, "w") as csv_file:
field_names = [
'case_id', 'owner_id', 'modified_on', 'server_modified_on',
'add', 'edd', 'ccs_phase', 'num_pnc_visits', 'current_schedule_phase'
]
csv_writer = csv.DictWriter(csv_file, field_names, extrasaction='ignore')
csv_writer.writeheader()
for ccs_case in self._get_cases():
properties = ccs_case.case_json
if 'add' in properties:
continue
if properties.get('current_schedule_phase') != 2:
continue
properties.update({
'case_id': ccs_case.case_id,
'owner_id': ccs_case.owner_id,
'modified_on': ccs_case.modified_on,
'server_modified_on': ccs_case.modified_on
})
csv_writer.write_row(properties)
def _get_cases(self):
dbs = get_db_aliases_for_partitioned_query()
for db in dbs:
ccs_record_case_ids = (
CommCareCaseSQL.objects
.using(db)
.filter(domain=self.domain, type='ccs_record', closed=False)
.values_list('case_id', flat=True)
)
for case_ids in chunked(ccs_record_case_ids, 100):
cases = self.case_accessor.get_cases(case_ids)
for case in cases:
yield case
|
<commit_before><commit_msg>Add management command to find ccs record cases without delivery form<commit_after>from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from django.core.management import BaseCommand
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""https://manage.dimagi.com/default.asp?265190
Some ccs_record cases have evidence of a delivery occurring, but do not
have an associated delivery form. The implications on the case are that an
add property is not set and the schedule phase is 2.
"""
def add_arguments(self, parser):
parser.add_argument(
'csv_file',
help="File path for csv file",
)
def handle(self, csv_file, **options):
self.domain = 'icds-cas'
self.case_accessor = CaseAccessors(self.domain)
with open(csv_file, "w") as csv_file:
field_names = [
'case_id', 'owner_id', 'modified_on', 'server_modified_on',
'add', 'edd', 'ccs_phase', 'num_pnc_visits', 'current_schedule_phase'
]
csv_writer = csv.DictWriter(csv_file, field_names, extrasaction='ignore')
csv_writer.writeheader()
for ccs_case in self._get_cases():
properties = ccs_case.case_json
if 'add' in properties:
continue
if properties.get('current_schedule_phase') != 2:
continue
properties.update({
'case_id': ccs_case.case_id,
'owner_id': ccs_case.owner_id,
'modified_on': ccs_case.modified_on,
'server_modified_on': ccs_case.modified_on
})
csv_writer.write_row(properties)
def _get_cases(self):
dbs = get_db_aliases_for_partitioned_query()
for db in dbs:
ccs_record_case_ids = (
CommCareCaseSQL.objects
.using(db)
.filter(domain=self.domain, type='ccs_record', closed=False)
.values_list('case_id', flat=True)
)
for case_ids in chunked(ccs_record_case_ids, 100):
cases = self.case_accessor.get_cases(case_ids)
for case in cases:
yield case
|
|
69a07d161295e217324d28948b1d565e2170e636
|
app/error.py
|
app/error.py
|
import six
class AppError(Exception):
"""App exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
Add class to raise app exceptions
|
Add class to raise app exceptions
|
Python
|
mit
|
rebearteta/social-ideation,joausaga/social-ideation,rebearteta/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,joausaga/social-ideation,joausaga/social-ideation,rebearteta/social-ideation
|
Add class to raise app exceptions
|
import six
class AppError(Exception):
"""App exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
<commit_before><commit_msg>Add class to raise app exceptions<commit_after>
|
import six
class AppError(Exception):
"""App exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
Add class to raise app exceptionsimport six
class AppError(Exception):
"""App exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
<commit_before><commit_msg>Add class to raise app exceptions<commit_after>import six
class AppError(Exception):
"""App exception"""
def __init__(self, reason, response=None):
self.reason = six.text_type(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
|
ff0fc074972adad68173a44866343156696660f3
|
microlab_instruments/base_classes.py
|
microlab_instruments/base_classes.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import aardvark
import numpy as gpib
import scipy as serial
import socket
class Instrument(object):
def __init__(self):
pass
def write(self):
pass
def read(self):
pass
def ask(self, scpi_string):
self.write(scpi_string)
return self.read()
class AardvarkInstrument(Instrument):
def __init__(self):
pass
class GPIBInstrument(Instrument):
def __init__(self, nickname):
self.device = gpib.find(nickname)
def write(self, scpi_string):
gpib.write(self.device, scpi_string)
def read(self):
return gpib.read(self.device, 4096)
class TCPIPInstrument(Instrument):
def __init__(self, ip_address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip_address, port))
def __del__(self):
self.socket.close()
def write(self, scpi_string):
self.socket.send(scpi_string)
def read(self):
return self.socket.recv(4096)
class SerialInstrument(Instrument):
def __init__(self, device_port):
self.serial = serial.Serial(device_port)
def __del__(self):
self.serial.close()
|
Add placeholders for base classes
|
Add placeholders for base classes
|
Python
|
bsd-3-clause
|
kitmonisit/microlab-instruments
|
Add placeholders for base classes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import aardvark
import numpy as gpib
import scipy as serial
import socket
class Instrument(object):
def __init__(self):
pass
def write(self):
pass
def read(self):
pass
def ask(self, scpi_string):
self.write(scpi_string)
return self.read()
class AardvarkInstrument(Instrument):
def __init__(self):
pass
class GPIBInstrument(Instrument):
def __init__(self, nickname):
self.device = gpib.find(nickname)
def write(self, scpi_string):
gpib.write(self.device, scpi_string)
def read(self):
return gpib.read(self.device, 4096)
class TCPIPInstrument(Instrument):
def __init__(self, ip_address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip_address, port))
def __del__(self):
self.socket.close()
def write(self, scpi_string):
self.socket.send(scpi_string)
def read(self):
return self.socket.recv(4096)
class SerialInstrument(Instrument):
def __init__(self, device_port):
self.serial = serial.Serial(device_port)
def __del__(self):
self.serial.close()
|
<commit_before><commit_msg>Add placeholders for base classes<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import aardvark
import numpy as gpib
import scipy as serial
import socket
class Instrument(object):
def __init__(self):
pass
def write(self):
pass
def read(self):
pass
def ask(self, scpi_string):
self.write(scpi_string)
return self.read()
class AardvarkInstrument(Instrument):
def __init__(self):
pass
class GPIBInstrument(Instrument):
def __init__(self, nickname):
self.device = gpib.find(nickname)
def write(self, scpi_string):
gpib.write(self.device, scpi_string)
def read(self):
return gpib.read(self.device, 4096)
class TCPIPInstrument(Instrument):
def __init__(self, ip_address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip_address, port))
def __del__(self):
self.socket.close()
def write(self, scpi_string):
self.socket.send(scpi_string)
def read(self):
return self.socket.recv(4096)
class SerialInstrument(Instrument):
def __init__(self, device_port):
self.serial = serial.Serial(device_port)
def __del__(self):
self.serial.close()
|
Add placeholders for base classes#!/usr/bin/env python
# -*- coding: utf-8 -*-
import aardvark
import numpy as gpib
import scipy as serial
import socket
class Instrument(object):
def __init__(self):
pass
def write(self):
pass
def read(self):
pass
def ask(self, scpi_string):
self.write(scpi_string)
return self.read()
class AardvarkInstrument(Instrument):
def __init__(self):
pass
class GPIBInstrument(Instrument):
def __init__(self, nickname):
self.device = gpib.find(nickname)
def write(self, scpi_string):
gpib.write(self.device, scpi_string)
def read(self):
return gpib.read(self.device, 4096)
class TCPIPInstrument(Instrument):
def __init__(self, ip_address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip_address, port))
def __del__(self):
self.socket.close()
def write(self, scpi_string):
self.socket.send(scpi_string)
def read(self):
return self.socket.recv(4096)
class SerialInstrument(Instrument):
def __init__(self, device_port):
self.serial = serial.Serial(device_port)
def __del__(self):
self.serial.close()
|
<commit_before><commit_msg>Add placeholders for base classes<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import aardvark
import numpy as gpib
import scipy as serial
import socket
class Instrument(object):
def __init__(self):
pass
def write(self):
pass
def read(self):
pass
def ask(self, scpi_string):
self.write(scpi_string)
return self.read()
class AardvarkInstrument(Instrument):
def __init__(self):
pass
class GPIBInstrument(Instrument):
def __init__(self, nickname):
self.device = gpib.find(nickname)
def write(self, scpi_string):
gpib.write(self.device, scpi_string)
def read(self):
return gpib.read(self.device, 4096)
class TCPIPInstrument(Instrument):
def __init__(self, ip_address, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip_address, port))
def __del__(self):
self.socket.close()
def write(self, scpi_string):
self.socket.send(scpi_string)
def read(self):
return self.socket.recv(4096)
class SerialInstrument(Instrument):
def __init__(self, device_port):
self.serial = serial.Serial(device_port)
def __del__(self):
self.serial.close()
|
|
c6a0fe7c4e5046154beda2e2e60fdf5a20b34f95
|
Cura/plugins/pauseAtZ.py
|
Cura/plugins/pauseAtZ.py
|
#Name: Pause at height
#Info: Pause the printer at a certain height
#Depend: GCode
#Type: postprocess
#Param: pauseLevel(float:5.0) Pause height (mm)
#Param: parkX(float:190) Head park X (mm)
#Param: parkY(float:190) Head park Y (mm)
#Param: retractAmount(float:5) Retraction amount (mm)
import re
def getValue(line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
subPart = line[line.find(key) + 1:]
m = re.search('^[0-9]+\.?[0-9]*', subPart)
if m == None:
return default
try:
return float(m.group(0))
except:
return default
with open(filename, "r") as f:
lines = f.readlines()
z = 0
x = 0
y = 0
pauseState = 0
with open(filename, "w") as f:
for line in lines:
if getValue(line, 'G', None) == 1:
newZ = getValue(line, 'Z', z)
x = getValue(line, 'X', x)
y = getValue(line, 'Y', y)
if newZ != z:
z = newZ
if z < pauseLevel and pauseState == 0:
pauseState = 1
if z >= pauseLevel and pauseState == 1:
pauseState = 2
#Retract
f.write("M83\n")
f.write("G1 E-%f F6000\n" % (retractAmount))
#Move the head away
f.write("G1 X%f Y%f F9000\n" % (parkX, parkY))
#Wait till the user continues printing
f.write("M0\n")
#Move the head back
f.write("G1 X%f Y%f F9000\n" % (x, y))
f.write("G1 E%f F6000\n" % (retractAmount))
f.write("G1 F9000\n")
f.write("M82\n")
f.write(line)
|
Add pauze at height plugin.
|
Add pauze at height plugin.
|
Python
|
agpl-3.0
|
MolarAmbiguity/OctoPrint,nickverschoor/OctoPrint,shohei/Octoprint,madhuni/AstroBox,ErikDeBruijn/OctoPrint,shohei/Octoprint,AstroPrint/AstroBox,Mikk36/OctoPrint,shohei/Octoprint,beeverycreative/BEEweb,CapnBry/OctoPrint,mayoff/OctoPrint,MaxOLydian/OctoPrint,EZ3-India/EZ-Remote,mcanes/OctoPrint,markwal/OctoPrint,masterhou/OctoPrint,MoonshineSG/OctoPrint,abinashk-inf/AstroBox,dansantee/OctoPrint,mayoff/OctoPrint,uuv/OctoPrint,Mikk36/OctoPrint,alephobjects/Cura,spapadim/OctoPrint,alephobjects/Cura,punkkeks/OctoPrint,Salandora/OctoPrint,dansantee/OctoPrint,Jaesin/OctoPrint,Voxel8/OctoPrint,chriskoz/OctoPrint,markwal/OctoPrint,skieast/OctoPrint,MolarAmbiguity/OctoPrint,shaggythesheep/OctoPrint,foosel/OctoPrint,bicephale/OctoPrint,ryanneufeld/OctoPrint,3dprintcanalhouse/octoprint2,MaxOLydian/OctoPrint,eliasbakken/OctoPrint,jneves/OctoPrint,abinashk-inf/AstroBox,ryanneufeld/OctoPrint,mcanes/OctoPrint,javivi001/OctoPrint,Voxel8/OctoPrint,madhuni/AstroBox,Skeen/OctoPrint,leductan-nguyen/RaionPi,javivi001/OctoPrint,shaggythesheep/OctoPrint,SeveQ/OctoPrint,alex1818/OctoPrint,chriskoz/OctoPrint,abinashk-inf/AstroBox,alex1818/OctoPrint,madhuni/AstroBox,ymilord/OctoPrint-MrBeam,Skeen/OctoPrint,rurkowce/octoprint-fork,EZ3-India/EZ-Remote,foosel/OctoPrint,3dprintcanalhouse/octoprint1,ryanneufeld/OctoPrint,CapnBry/OctoPrint,mayoff/OctoPrint,Javierma/OctoPrint-TFG,nickverschoor/OctoPrint,skieast/OctoPrint,jneves/OctoPrint,ryanneufeld/OctoPrint,ymilord/OctoPrint-MrBeam,Mikk36/OctoPrint,abinashk-inf/AstroBox,mrbeam/OctoPrint,3dprintcanalhouse/octoprint2,Salandora/OctoPrint,hudbrog/OctoPrint,nicanor-romero/OctoPrint,shohei/Octoprint,CapnBry/OctoPrint,jneves/OctoPrint,Jaesin/OctoPrint,uuv/OctoPrint,MoonshineSG/OctoPrint,markwal/OctoPrint,ErikDeBruijn/OctoPrint,AstroPrint/AstroBox,masterhou/OctoPrint,nickverschoor/OctoPrint,Javierma/OctoPrint-TFG,MolarAmbiguity/OctoPrint,leductan-nguyen/RaionPi,ymilord/OctoPrint-MrBeam,shaggythesheep/OctoPrint,alex1818/OctoPrint,beeverycreative/BEEweb,C-o-r-E/OctoPrint,punkkeks/OctoPrint,dansantee/OctoPrint,Javierma/OctoPrint-TFG,foosel/OctoPrint,Salandora/OctoPrint,C-o-r-E/OctoPrint,MoonshineSG/OctoPrint,Skeen/OctoPrint,dragondgold/OctoPrint,MoonshineSG/OctoPrint,ErikDeBruijn/OctoPrint,rurkowce/octoprint-fork,eliasbakken/OctoPrint,eddieparker/OctoPrint,bicephale/OctoPrint,3dprintcanalhouse/octoprint1,hudbrog/OctoPrint,ymilord/OctoPrint-MrBeam,sstocker46/OctoPrint,nicanor-romero/OctoPrint,aerickson/OctoPrint,Catrodigious/OctoPrint-TAM,foosel/OctoPrint,senttech/OctoPrint,eddieparker/OctoPrint,eddieparker/OctoPrint,bicephale/OctoPrint,sstocker46/OctoPrint,sstocker46/OctoPrint,nickverschoor/OctoPrint,chriskoz/OctoPrint,mcanes/OctoPrint,leductan-nguyen/RaionPi,CapnBry/OctoPrint,Javierma/OctoPrint-TFG,chriskoz/OctoPrint,EZ3-India/EZ-Remote,JackGavin13/octoprint-test-not-finished,spapadim/OctoPrint,mrbeam/OctoPrint,beeverycreative/BEEweb,leductan-nguyen/RaionPi,Catrodigious/OctoPrint-TAM,eliasbakken/OctoPrint,uuv/OctoPrint,spapadim/OctoPrint,bicephale/OctoPrint,Catrodigious/OctoPrint-TAM,d42/octoprint-fork,dragondgold/OctoPrint,SeveQ/OctoPrint,skieast/OctoPrint,ymilord/OctoPrint-MrBeam,MaxOLydian/OctoPrint,masterhou/OctoPrint,C-o-r-E/OctoPrint,senttech/OctoPrint,punkkeks/OctoPrint,aerickson/OctoPrint,senttech/OctoPrint,JackGavin13/octoprint-test-not-finished,senttech/OctoPrint,JackGavin13/octoprint-test-not-finished,alephobjects/Cura,DanLipsitt/OctoPrint,madhuni/AstroBox,mrbeam/OctoPrint,Jaesin/OctoPrint,javivi001/OctoPrint,Voxel8/OctoPrint,Salandora/OctoPrint,aerickson/OctoPrint,Jaesin/OctoPrint,d42/octoprint-fork,dragondgold/OctoPrint,nicanor-romero/OctoPrint,DanLipsitt/OctoPrint,beeverycreative/BEEweb,JackGavin13/octoprint-test-not-finished,shohei/Octoprint,hudbrog/OctoPrint,EZ3-India/EZ-Remote,AstroPrint/AstroBox,SeveQ/OctoPrint
|
Add pauze at height plugin.
|
#Name: Pause at height
#Info: Pause the printer at a certain height
#Depend: GCode
#Type: postprocess
#Param: pauseLevel(float:5.0) Pause height (mm)
#Param: parkX(float:190) Head park X (mm)
#Param: parkY(float:190) Head park Y (mm)
#Param: retractAmount(float:5) Retraction amount (mm)
import re
def getValue(line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
subPart = line[line.find(key) + 1:]
m = re.search('^[0-9]+\.?[0-9]*', subPart)
if m == None:
return default
try:
return float(m.group(0))
except:
return default
with open(filename, "r") as f:
lines = f.readlines()
z = 0
x = 0
y = 0
pauseState = 0
with open(filename, "w") as f:
for line in lines:
if getValue(line, 'G', None) == 1:
newZ = getValue(line, 'Z', z)
x = getValue(line, 'X', x)
y = getValue(line, 'Y', y)
if newZ != z:
z = newZ
if z < pauseLevel and pauseState == 0:
pauseState = 1
if z >= pauseLevel and pauseState == 1:
pauseState = 2
#Retract
f.write("M83\n")
f.write("G1 E-%f F6000\n" % (retractAmount))
#Move the head away
f.write("G1 X%f Y%f F9000\n" % (parkX, parkY))
#Wait till the user continues printing
f.write("M0\n")
#Move the head back
f.write("G1 X%f Y%f F9000\n" % (x, y))
f.write("G1 E%f F6000\n" % (retractAmount))
f.write("G1 F9000\n")
f.write("M82\n")
f.write(line)
|
<commit_before><commit_msg>Add pauze at height plugin.<commit_after>
|
#Name: Pause at height
#Info: Pause the printer at a certain height
#Depend: GCode
#Type: postprocess
#Param: pauseLevel(float:5.0) Pause height (mm)
#Param: parkX(float:190) Head park X (mm)
#Param: parkY(float:190) Head park Y (mm)
#Param: retractAmount(float:5) Retraction amount (mm)
import re
def getValue(line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
subPart = line[line.find(key) + 1:]
m = re.search('^[0-9]+\.?[0-9]*', subPart)
if m == None:
return default
try:
return float(m.group(0))
except:
return default
with open(filename, "r") as f:
lines = f.readlines()
z = 0
x = 0
y = 0
pauseState = 0
with open(filename, "w") as f:
for line in lines:
if getValue(line, 'G', None) == 1:
newZ = getValue(line, 'Z', z)
x = getValue(line, 'X', x)
y = getValue(line, 'Y', y)
if newZ != z:
z = newZ
if z < pauseLevel and pauseState == 0:
pauseState = 1
if z >= pauseLevel and pauseState == 1:
pauseState = 2
#Retract
f.write("M83\n")
f.write("G1 E-%f F6000\n" % (retractAmount))
#Move the head away
f.write("G1 X%f Y%f F9000\n" % (parkX, parkY))
#Wait till the user continues printing
f.write("M0\n")
#Move the head back
f.write("G1 X%f Y%f F9000\n" % (x, y))
f.write("G1 E%f F6000\n" % (retractAmount))
f.write("G1 F9000\n")
f.write("M82\n")
f.write(line)
|
Add pauze at height plugin.#Name: Pause at height
#Info: Pause the printer at a certain height
#Depend: GCode
#Type: postprocess
#Param: pauseLevel(float:5.0) Pause height (mm)
#Param: parkX(float:190) Head park X (mm)
#Param: parkY(float:190) Head park Y (mm)
#Param: retractAmount(float:5) Retraction amount (mm)
import re
def getValue(line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
subPart = line[line.find(key) + 1:]
m = re.search('^[0-9]+\.?[0-9]*', subPart)
if m == None:
return default
try:
return float(m.group(0))
except:
return default
with open(filename, "r") as f:
lines = f.readlines()
z = 0
x = 0
y = 0
pauseState = 0
with open(filename, "w") as f:
for line in lines:
if getValue(line, 'G', None) == 1:
newZ = getValue(line, 'Z', z)
x = getValue(line, 'X', x)
y = getValue(line, 'Y', y)
if newZ != z:
z = newZ
if z < pauseLevel and pauseState == 0:
pauseState = 1
if z >= pauseLevel and pauseState == 1:
pauseState = 2
#Retract
f.write("M83\n")
f.write("G1 E-%f F6000\n" % (retractAmount))
#Move the head away
f.write("G1 X%f Y%f F9000\n" % (parkX, parkY))
#Wait till the user continues printing
f.write("M0\n")
#Move the head back
f.write("G1 X%f Y%f F9000\n" % (x, y))
f.write("G1 E%f F6000\n" % (retractAmount))
f.write("G1 F9000\n")
f.write("M82\n")
f.write(line)
|
<commit_before><commit_msg>Add pauze at height plugin.<commit_after>#Name: Pause at height
#Info: Pause the printer at a certain height
#Depend: GCode
#Type: postprocess
#Param: pauseLevel(float:5.0) Pause height (mm)
#Param: parkX(float:190) Head park X (mm)
#Param: parkY(float:190) Head park Y (mm)
#Param: retractAmount(float:5) Retraction amount (mm)
import re
def getValue(line, key, default = None):
if not key in line or (';' in line and line.find(key) > line.find(';')):
return default
subPart = line[line.find(key) + 1:]
m = re.search('^[0-9]+\.?[0-9]*', subPart)
if m == None:
return default
try:
return float(m.group(0))
except:
return default
with open(filename, "r") as f:
lines = f.readlines()
z = 0
x = 0
y = 0
pauseState = 0
with open(filename, "w") as f:
for line in lines:
if getValue(line, 'G', None) == 1:
newZ = getValue(line, 'Z', z)
x = getValue(line, 'X', x)
y = getValue(line, 'Y', y)
if newZ != z:
z = newZ
if z < pauseLevel and pauseState == 0:
pauseState = 1
if z >= pauseLevel and pauseState == 1:
pauseState = 2
#Retract
f.write("M83\n")
f.write("G1 E-%f F6000\n" % (retractAmount))
#Move the head away
f.write("G1 X%f Y%f F9000\n" % (parkX, parkY))
#Wait till the user continues printing
f.write("M0\n")
#Move the head back
f.write("G1 X%f Y%f F9000\n" % (x, y))
f.write("G1 E%f F6000\n" % (retractAmount))
f.write("G1 F9000\n")
f.write("M82\n")
f.write(line)
|
|
87ac04769baef5dd9e93cf72def19126012b94c7
|
backend/breach/tests/test_views.py
|
backend/breach/tests/test_views.py
|
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target
from breach.views import VictimListView
import json
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
|
Add test for /target post
|
Add test for /target post
|
Python
|
mit
|
dionyziz/rupture,esarafianou/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dionyziz/rupture,esarafianou/rupture,dimkarakostas/rupture,dimriou/rupture,dimkarakostas/rupture,dimriou/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dionyziz/rupture,dimkarakostas/rupture
|
Add test for /target post
|
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target
from breach.views import VictimListView
import json
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
|
<commit_before><commit_msg>Add test for /target post<commit_after>
|
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target
from breach.views import VictimListView
import json
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
|
Add test for /target postfrom django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target
from breach.views import VictimListView
import json
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
|
<commit_before><commit_msg>Add test for /target post<commit_after>from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from breach.models import Target
from breach.views import VictimListView
import json
class ViewsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.target1 = Target.objects.create(
name='ruptureit',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=1
)
self.target2 = Target.objects.create(
name='ruptureit2',
endpoint='https://ruptureit.com/test.php?reflection=%s',
prefix='imper',
alphabet='abcdefghijklmnopqrstuvwxyz',
secretlength=9,
alignmentalphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
recordscardinality=1,
method=2
)
self.target1_data = {
'name': 'ruptureit',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
self.target2_data = {
'name': 'ruptureit2',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 2
}
def test_target_post(self):
"""
Test post requests for /target
"""
# Create the request
data = {
'name': 'ruptureit3',
'endpoint': 'https://ruptureit.com/test.php?reflection=%s',
'prefix': 'imper',
'alphabet': 'abcdefghijklmnopqrstuvwxyz',
'secretlength': 9,
'alignmentalphabet': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'recordscardinality': 1,
'method': 1
}
response = self.client.post(reverse('TargetView'), json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['target_name'], 'ruptureit3')
|
|
acd5ab934394a8673439cafa01dba14b051b4250
|
Lib/test/curses_tests.py
|
Lib/test/curses_tests.py
|
#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
Add an interactive test script for exercising curses
|
Add an interactive test script for exercising curses
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add an interactive test script for exercising curses
|
#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
<commit_before><commit_msg>Add an interactive test script for exercising curses<commit_after>
|
#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
Add an interactive test script for exercising curses#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
<commit_before><commit_msg>Add an interactive test script for exercising curses<commit_after>#!/usr/bin/env python
#
# $Id: ncurses.py 36559 2004-07-18 05:56:09Z tim_one $
#
# Interactive test suite for the curses module.
# This script displays various things and the user should verify whether
# they display correctly.
#
import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly-3, ulx, "Use Ctrl-G to end editing (%s)." % mode)
stdscr.addstr(uly-2, ulx, "Be sure to try typing in the lower-right corner.")
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly+ncols+2, 0, "Text entered in the box\n")
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly+ncols+2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
|
|
7502d1a9b41f7c6978ace6f48577900ec1bc8a3c
|
Python/well_generator.py
|
Python/well_generator.py
|
#! /usr/bin/env python3
import argparse
import string
def main(args):
rows = string.ascii_uppercase[:8]
cols = range(1, 13)
if args.r:
wells = [(r, c) for r in rows for c in cols]
else:
wells = [(r, c) for c in cols for r in rows]
if args.p:
fstr = "{}{:02d}"
else:
fstr ="{}{}"
for w in wells[:args.n]:
print(fstr.format(*w))
def check_range(n):
x = int(n)
if x < 1 or x > 96:
raise argparse.ArgumentTypeError("{} must be an interger between 1–96".format(n))
return x
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="Generate plate coordinates/well labels")
p.add_argument("-r", action="store_true",
default=False, help="Order wells by row (default is by columns)")
p.add_argument("-p", action="store_true",
default=False, help="Zero-pad numbers (default is don't)")
p.add_argument("-n", type=check_range, action="store", default=96,
help="Stop after printing these many wells (1-96; default: 96)")
args = p.parse_args()
main(args)
|
Add simple script to generate plate labels
|
Add simple script to generate plate labels
|
Python
|
apache-2.0
|
jgruselius/misc,jgruselius/misc,jgruselius/misc,jgruselius/misc,jgruselius/misc
|
Add simple script to generate plate labels
|
#! /usr/bin/env python3
import argparse
import string
def main(args):
rows = string.ascii_uppercase[:8]
cols = range(1, 13)
if args.r:
wells = [(r, c) for r in rows for c in cols]
else:
wells = [(r, c) for c in cols for r in rows]
if args.p:
fstr = "{}{:02d}"
else:
fstr ="{}{}"
for w in wells[:args.n]:
print(fstr.format(*w))
def check_range(n):
x = int(n)
if x < 1 or x > 96:
raise argparse.ArgumentTypeError("{} must be an interger between 1–96".format(n))
return x
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="Generate plate coordinates/well labels")
p.add_argument("-r", action="store_true",
default=False, help="Order wells by row (default is by columns)")
p.add_argument("-p", action="store_true",
default=False, help="Zero-pad numbers (default is don't)")
p.add_argument("-n", type=check_range, action="store", default=96,
help="Stop after printing these many wells (1-96; default: 96)")
args = p.parse_args()
main(args)
|
<commit_before><commit_msg>Add simple script to generate plate labels<commit_after>
|
#! /usr/bin/env python3
import argparse
import string
def main(args):
rows = string.ascii_uppercase[:8]
cols = range(1, 13)
if args.r:
wells = [(r, c) for r in rows for c in cols]
else:
wells = [(r, c) for c in cols for r in rows]
if args.p:
fstr = "{}{:02d}"
else:
fstr ="{}{}"
for w in wells[:args.n]:
print(fstr.format(*w))
def check_range(n):
x = int(n)
if x < 1 or x > 96:
raise argparse.ArgumentTypeError("{} must be an interger between 1–96".format(n))
return x
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="Generate plate coordinates/well labels")
p.add_argument("-r", action="store_true",
default=False, help="Order wells by row (default is by columns)")
p.add_argument("-p", action="store_true",
default=False, help="Zero-pad numbers (default is don't)")
p.add_argument("-n", type=check_range, action="store", default=96,
help="Stop after printing these many wells (1-96; default: 96)")
args = p.parse_args()
main(args)
|
Add simple script to generate plate labels#! /usr/bin/env python3
import argparse
import string
def main(args):
rows = string.ascii_uppercase[:8]
cols = range(1, 13)
if args.r:
wells = [(r, c) for r in rows for c in cols]
else:
wells = [(r, c) for c in cols for r in rows]
if args.p:
fstr = "{}{:02d}"
else:
fstr ="{}{}"
for w in wells[:args.n]:
print(fstr.format(*w))
def check_range(n):
x = int(n)
if x < 1 or x > 96:
raise argparse.ArgumentTypeError("{} must be an interger between 1–96".format(n))
return x
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="Generate plate coordinates/well labels")
p.add_argument("-r", action="store_true",
default=False, help="Order wells by row (default is by columns)")
p.add_argument("-p", action="store_true",
default=False, help="Zero-pad numbers (default is don't)")
p.add_argument("-n", type=check_range, action="store", default=96,
help="Stop after printing these many wells (1-96; default: 96)")
args = p.parse_args()
main(args)
|
<commit_before><commit_msg>Add simple script to generate plate labels<commit_after>#! /usr/bin/env python3
import argparse
import string
def main(args):
rows = string.ascii_uppercase[:8]
cols = range(1, 13)
if args.r:
wells = [(r, c) for r in rows for c in cols]
else:
wells = [(r, c) for c in cols for r in rows]
if args.p:
fstr = "{}{:02d}"
else:
fstr ="{}{}"
for w in wells[:args.n]:
print(fstr.format(*w))
def check_range(n):
x = int(n)
if x < 1 or x > 96:
raise argparse.ArgumentTypeError("{} must be an interger between 1–96".format(n))
return x
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="Generate plate coordinates/well labels")
p.add_argument("-r", action="store_true",
default=False, help="Order wells by row (default is by columns)")
p.add_argument("-p", action="store_true",
default=False, help="Zero-pad numbers (default is don't)")
p.add_argument("-n", type=check_range, action="store", default=96,
help="Stop after printing these many wells (1-96; default: 96)")
args = p.parse_args()
main(args)
|
|
2f34973ff41c868c6102f6faf6952f2ea87e7895
|
server/proposal/migrations/0025_set_proposal_updated.py
|
server/proposal/migrations/0025_set_proposal_updated.py
|
from django.db import models, migrations
def set_updated(apps, schema_editor):
Proposal = apps.get_model("proposal", "Proposal")
db_alias = schema_editor.connection.alias
proposals = Proposal.objects.using(db_alias).all()
for prop in proposals:
prop.updated = prop.modified
prop.save()
def noop(_, __):
pass
class Migration(migrations.Migration):
dependencies = [
("proposal", "0024_proposal_updated")
]
operations = [
migrations.RunPython(set_updated, noop)
]
|
Use 'modified' field as default for 'updated'
|
Use 'modified' field as default for 'updated'
|
Python
|
mit
|
codeforboston/cornerwise,codeforboston/cornerwise,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise
|
Use 'modified' field as default for 'updated'
|
from django.db import models, migrations
def set_updated(apps, schema_editor):
Proposal = apps.get_model("proposal", "Proposal")
db_alias = schema_editor.connection.alias
proposals = Proposal.objects.using(db_alias).all()
for prop in proposals:
prop.updated = prop.modified
prop.save()
def noop(_, __):
pass
class Migration(migrations.Migration):
dependencies = [
("proposal", "0024_proposal_updated")
]
operations = [
migrations.RunPython(set_updated, noop)
]
|
<commit_before><commit_msg>Use 'modified' field as default for 'updated'<commit_after>
|
from django.db import models, migrations
def set_updated(apps, schema_editor):
Proposal = apps.get_model("proposal", "Proposal")
db_alias = schema_editor.connection.alias
proposals = Proposal.objects.using(db_alias).all()
for prop in proposals:
prop.updated = prop.modified
prop.save()
def noop(_, __):
pass
class Migration(migrations.Migration):
dependencies = [
("proposal", "0024_proposal_updated")
]
operations = [
migrations.RunPython(set_updated, noop)
]
|
Use 'modified' field as default for 'updated'from django.db import models, migrations
def set_updated(apps, schema_editor):
Proposal = apps.get_model("proposal", "Proposal")
db_alias = schema_editor.connection.alias
proposals = Proposal.objects.using(db_alias).all()
for prop in proposals:
prop.updated = prop.modified
prop.save()
def noop(_, __):
pass
class Migration(migrations.Migration):
dependencies = [
("proposal", "0024_proposal_updated")
]
operations = [
migrations.RunPython(set_updated, noop)
]
|
<commit_before><commit_msg>Use 'modified' field as default for 'updated'<commit_after>from django.db import models, migrations
def set_updated(apps, schema_editor):
Proposal = apps.get_model("proposal", "Proposal")
db_alias = schema_editor.connection.alias
proposals = Proposal.objects.using(db_alias).all()
for prop in proposals:
prop.updated = prop.modified
prop.save()
def noop(_, __):
pass
class Migration(migrations.Migration):
dependencies = [
("proposal", "0024_proposal_updated")
]
operations = [
migrations.RunPython(set_updated, noop)
]
|
|
4570ce14333ebc0bae3e09a59f28d7170cfc4621
|
dci/alembic/versions/b58867f72568_add_feeder_role.py
|
dci/alembic/versions/b58867f72568_add_feeder_role.py
|
#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_feeder_role
Revision ID: b58867f72568
Revises: 827c558895bc
Create Date: 2017-10-27 08:29:07.283357
"""
# revision identifiers, used by Alembic.
revision = 'b58867f72568'
down_revision = '827c558895bc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import datetime
import dci.common.utils as utils
RESOURCE_STATES = ['active', 'inactive', 'archived']
STATES = sa.Enum(*RESOURCE_STATES, name='states')
ROLES = sa.Table(
'roles', sa.MetaData(),
sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,
default=utils.gen_uuid),
sa.Column('created_at', sa.DateTime(),
default=datetime.datetime.utcnow, nullable=False),
sa.Column('updated_at', sa.DateTime(),
onupdate=datetime.datetime.utcnow,
default=datetime.datetime.utcnow, nullable=False),
sa.Column('etag', sa.String(40), nullable=False, default=utils.gen_etag,
onupdate=utils.gen_etag),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('description', sa.Text),
sa.UniqueConstraint('label', name='roles_label_key'),
sa.Column('state', STATES, default='active')
)
def upgrade():
db_conn = op.get_bind()
feeder_role_id = utils.gen_uuid()
feeder_role = {
'id': feeder_role_id,
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'name': 'Feeder',
'label': 'FEEDER',
'description': 'A Feeder',
}
db_conn.execute(ROLES.insert().values(**feeder_role))
def downgrade():
pass
|
Add the feeder role in the ROLES table
|
Feeder: Add the feeder role in the ROLES table
Change-Id: I4c09e0a5e7d08975602a683f4cecbf993cdec4ba
|
Python
|
apache-2.0
|
redhat-cip/dci-control-server,enovance/dci-control-server,redhat-cip/dci-control-server,enovance/dci-control-server
|
Feeder: Add the feeder role in the ROLES table
Change-Id: I4c09e0a5e7d08975602a683f4cecbf993cdec4ba
|
#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_feeder_role
Revision ID: b58867f72568
Revises: 827c558895bc
Create Date: 2017-10-27 08:29:07.283357
"""
# revision identifiers, used by Alembic.
revision = 'b58867f72568'
down_revision = '827c558895bc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import datetime
import dci.common.utils as utils
RESOURCE_STATES = ['active', 'inactive', 'archived']
STATES = sa.Enum(*RESOURCE_STATES, name='states')
ROLES = sa.Table(
'roles', sa.MetaData(),
sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,
default=utils.gen_uuid),
sa.Column('created_at', sa.DateTime(),
default=datetime.datetime.utcnow, nullable=False),
sa.Column('updated_at', sa.DateTime(),
onupdate=datetime.datetime.utcnow,
default=datetime.datetime.utcnow, nullable=False),
sa.Column('etag', sa.String(40), nullable=False, default=utils.gen_etag,
onupdate=utils.gen_etag),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('description', sa.Text),
sa.UniqueConstraint('label', name='roles_label_key'),
sa.Column('state', STATES, default='active')
)
def upgrade():
db_conn = op.get_bind()
feeder_role_id = utils.gen_uuid()
feeder_role = {
'id': feeder_role_id,
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'name': 'Feeder',
'label': 'FEEDER',
'description': 'A Feeder',
}
db_conn.execute(ROLES.insert().values(**feeder_role))
def downgrade():
pass
|
<commit_before><commit_msg>Feeder: Add the feeder role in the ROLES table
Change-Id: I4c09e0a5e7d08975602a683f4cecbf993cdec4ba<commit_after>
|
#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_feeder_role
Revision ID: b58867f72568
Revises: 827c558895bc
Create Date: 2017-10-27 08:29:07.283357
"""
# revision identifiers, used by Alembic.
revision = 'b58867f72568'
down_revision = '827c558895bc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import datetime
import dci.common.utils as utils
RESOURCE_STATES = ['active', 'inactive', 'archived']
STATES = sa.Enum(*RESOURCE_STATES, name='states')
ROLES = sa.Table(
'roles', sa.MetaData(),
sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,
default=utils.gen_uuid),
sa.Column('created_at', sa.DateTime(),
default=datetime.datetime.utcnow, nullable=False),
sa.Column('updated_at', sa.DateTime(),
onupdate=datetime.datetime.utcnow,
default=datetime.datetime.utcnow, nullable=False),
sa.Column('etag', sa.String(40), nullable=False, default=utils.gen_etag,
onupdate=utils.gen_etag),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('description', sa.Text),
sa.UniqueConstraint('label', name='roles_label_key'),
sa.Column('state', STATES, default='active')
)
def upgrade():
db_conn = op.get_bind()
feeder_role_id = utils.gen_uuid()
feeder_role = {
'id': feeder_role_id,
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'name': 'Feeder',
'label': 'FEEDER',
'description': 'A Feeder',
}
db_conn.execute(ROLES.insert().values(**feeder_role))
def downgrade():
pass
|
Feeder: Add the feeder role in the ROLES table
Change-Id: I4c09e0a5e7d08975602a683f4cecbf993cdec4ba#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_feeder_role
Revision ID: b58867f72568
Revises: 827c558895bc
Create Date: 2017-10-27 08:29:07.283357
"""
# revision identifiers, used by Alembic.
revision = 'b58867f72568'
down_revision = '827c558895bc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import datetime
import dci.common.utils as utils
RESOURCE_STATES = ['active', 'inactive', 'archived']
STATES = sa.Enum(*RESOURCE_STATES, name='states')
ROLES = sa.Table(
'roles', sa.MetaData(),
sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,
default=utils.gen_uuid),
sa.Column('created_at', sa.DateTime(),
default=datetime.datetime.utcnow, nullable=False),
sa.Column('updated_at', sa.DateTime(),
onupdate=datetime.datetime.utcnow,
default=datetime.datetime.utcnow, nullable=False),
sa.Column('etag', sa.String(40), nullable=False, default=utils.gen_etag,
onupdate=utils.gen_etag),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('description', sa.Text),
sa.UniqueConstraint('label', name='roles_label_key'),
sa.Column('state', STATES, default='active')
)
def upgrade():
db_conn = op.get_bind()
feeder_role_id = utils.gen_uuid()
feeder_role = {
'id': feeder_role_id,
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'name': 'Feeder',
'label': 'FEEDER',
'description': 'A Feeder',
}
db_conn.execute(ROLES.insert().values(**feeder_role))
def downgrade():
pass
|
<commit_before><commit_msg>Feeder: Add the feeder role in the ROLES table
Change-Id: I4c09e0a5e7d08975602a683f4cecbf993cdec4ba<commit_after>#
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_feeder_role
Revision ID: b58867f72568
Revises: 827c558895bc
Create Date: 2017-10-27 08:29:07.283357
"""
# revision identifiers, used by Alembic.
revision = 'b58867f72568'
down_revision = '827c558895bc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import datetime
import dci.common.utils as utils
RESOURCE_STATES = ['active', 'inactive', 'archived']
STATES = sa.Enum(*RESOURCE_STATES, name='states')
ROLES = sa.Table(
'roles', sa.MetaData(),
sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,
default=utils.gen_uuid),
sa.Column('created_at', sa.DateTime(),
default=datetime.datetime.utcnow, nullable=False),
sa.Column('updated_at', sa.DateTime(),
onupdate=datetime.datetime.utcnow,
default=datetime.datetime.utcnow, nullable=False),
sa.Column('etag', sa.String(40), nullable=False, default=utils.gen_etag,
onupdate=utils.gen_etag),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('label', sa.String(255), nullable=False),
sa.Column('description', sa.Text),
sa.UniqueConstraint('label', name='roles_label_key'),
sa.Column('state', STATES, default='active')
)
def upgrade():
db_conn = op.get_bind()
feeder_role_id = utils.gen_uuid()
feeder_role = {
'id': feeder_role_id,
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'name': 'Feeder',
'label': 'FEEDER',
'description': 'A Feeder',
}
db_conn.execute(ROLES.insert().values(**feeder_role))
def downgrade():
pass
|
|
cd735fe688840c94cb92562e3b96d51ec48afe44
|
openstack/tests/functional/network/v2/test_security_group_rule.py
|
openstack/tests/functional/network/v2/test_security_group_rule.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroupRule, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
rul = cls.conn.network.create_security_group_rule(
direction=cls.DIR, ethertype=cls.IPV4,
port_range_max=cls.PORT, port_range_min=cls.PORT,
protocol=cls.PROTO, security_group_id=cls.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
cls.assertIs(cls.ID, rul.security_group_id)
cls.RULE_ID = rul.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID,
ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
|
Add functional tests for security group rule
|
Add functional tests for security group rule
Tests:
test_find
test_get
test_list
Change-Id: If54342ec8c57b926a5217d888a43dcd98223bd69
|
Python
|
apache-2.0
|
dudymas/python-openstacksdk,dudymas/python-openstacksdk,dtroyer/python-openstacksdk,dtroyer/python-openstacksdk,mtougeron/python-openstacksdk,stackforge/python-openstacksdk,briancurtin/python-openstacksdk,stackforge/python-openstacksdk,openstack/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk
|
Add functional tests for security group rule
Tests:
test_find
test_get
test_list
Change-Id: If54342ec8c57b926a5217d888a43dcd98223bd69
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroupRule, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
rul = cls.conn.network.create_security_group_rule(
direction=cls.DIR, ethertype=cls.IPV4,
port_range_max=cls.PORT, port_range_min=cls.PORT,
protocol=cls.PROTO, security_group_id=cls.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
cls.assertIs(cls.ID, rul.security_group_id)
cls.RULE_ID = rul.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID,
ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
|
<commit_before><commit_msg>Add functional tests for security group rule
Tests:
test_find
test_get
test_list
Change-Id: If54342ec8c57b926a5217d888a43dcd98223bd69<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroupRule, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
rul = cls.conn.network.create_security_group_rule(
direction=cls.DIR, ethertype=cls.IPV4,
port_range_max=cls.PORT, port_range_min=cls.PORT,
protocol=cls.PROTO, security_group_id=cls.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
cls.assertIs(cls.ID, rul.security_group_id)
cls.RULE_ID = rul.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID,
ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
|
Add functional tests for security group rule
Tests:
test_find
test_get
test_list
Change-Id: If54342ec8c57b926a5217d888a43dcd98223bd69# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroupRule, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
rul = cls.conn.network.create_security_group_rule(
direction=cls.DIR, ethertype=cls.IPV4,
port_range_max=cls.PORT, port_range_min=cls.PORT,
protocol=cls.PROTO, security_group_id=cls.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
cls.assertIs(cls.ID, rul.security_group_id)
cls.RULE_ID = rul.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID,
ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
|
<commit_before><commit_msg>Add functional tests for security group rule
Tests:
test_find
test_get
test_list
Change-Id: If54342ec8c57b926a5217d888a43dcd98223bd69<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.network.v2 import security_group_rule
from openstack.tests.functional import base
class TestSecurityGroupRule(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
IPV4 = 'IPv4'
PROTO = 'tcp'
PORT = 22
DIR = 'ingress'
ID = None
RULE_ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroupRule, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
rul = cls.conn.network.create_security_group_rule(
direction=cls.DIR, ethertype=cls.IPV4,
port_range_max=cls.PORT, port_range_min=cls.PORT,
protocol=cls.PROTO, security_group_id=cls.ID)
assert isinstance(rul, security_group_rule.SecurityGroupRule)
cls.assertIs(cls.ID, rul.security_group_id)
cls.RULE_ID = rul.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID,
ignore_missing=False)
cls.assertIs(None, sot)
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group_rule(self.RULE_ID)
self.assertEqual(self.RULE_ID, sot.id)
self.assertEqual(self.DIR, sot.direction)
self.assertEqual(self.PROTO, sot.protocol)
self.assertEqual(self.PORT, sot.port_range_min)
self.assertEqual(self.PORT, sot.port_range_max)
self.assertEqual(self.ID, sot.security_group_id)
def test_list(self):
ids = [o.id for o in self.conn.network.security_group_rules()]
self.assertIn(self.RULE_ID, ids)
|
|
43d5041c09caadd7bd67195ba7519e8ef006f506
|
corehq/pillows/group.py
|
corehq/pillows/group.py
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
Switch over to extend HQPillow
|
Switch over to extend HQPillow
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
Switch over to extend HQPillow
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
<commit_before>from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
<commit_msg>Switch over to extend HQPillow<commit_after>
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
Switch over to extend HQPillowfrom corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
<commit_before>from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
<commit_msg>Switch over to extend HQPillow<commit_after>from corehq.apps.groups.models import Group
from corehq.pillows.mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from django.conf import settings
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
3bf3813ed03f755d7aa777fc023add5c51a54311
|
tests/test_sqlalchemy_create_table.py
|
tests/test_sqlalchemy_create_table.py
|
# -*- coding: utf-8; -*-
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.ddl import CreateTable
from pyathena.sqlalchemy_athena import AthenaDialect
def test_create_table():
# Given
table = Table('table_name', MetaData(), Column('column_name', String))
dialect = AthenaDialect()
# When
statement = CreateTable(table).compile(dialect=dialect)
# Then
assert statement is not None
# vim: et:sw=4:syntax=python:ts=4:
|
Add failing test showing the issue
|
Add failing test showing the issue
Relates to issue #258.
|
Python
|
mit
|
laughingman7743/PyAthena
|
Add failing test showing the issue
Relates to issue #258.
|
# -*- coding: utf-8; -*-
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.ddl import CreateTable
from pyathena.sqlalchemy_athena import AthenaDialect
def test_create_table():
# Given
table = Table('table_name', MetaData(), Column('column_name', String))
dialect = AthenaDialect()
# When
statement = CreateTable(table).compile(dialect=dialect)
# Then
assert statement is not None
# vim: et:sw=4:syntax=python:ts=4:
|
<commit_before><commit_msg>Add failing test showing the issue
Relates to issue #258.<commit_after>
|
# -*- coding: utf-8; -*-
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.ddl import CreateTable
from pyathena.sqlalchemy_athena import AthenaDialect
def test_create_table():
# Given
table = Table('table_name', MetaData(), Column('column_name', String))
dialect = AthenaDialect()
# When
statement = CreateTable(table).compile(dialect=dialect)
# Then
assert statement is not None
# vim: et:sw=4:syntax=python:ts=4:
|
Add failing test showing the issue
Relates to issue #258.# -*- coding: utf-8; -*-
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.ddl import CreateTable
from pyathena.sqlalchemy_athena import AthenaDialect
def test_create_table():
# Given
table = Table('table_name', MetaData(), Column('column_name', String))
dialect = AthenaDialect()
# When
statement = CreateTable(table).compile(dialect=dialect)
# Then
assert statement is not None
# vim: et:sw=4:syntax=python:ts=4:
|
<commit_before><commit_msg>Add failing test showing the issue
Relates to issue #258.<commit_after># -*- coding: utf-8; -*-
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.ddl import CreateTable
from pyathena.sqlalchemy_athena import AthenaDialect
def test_create_table():
# Given
table = Table('table_name', MetaData(), Column('column_name', String))
dialect = AthenaDialect()
# When
statement = CreateTable(table).compile(dialect=dialect)
# Then
assert statement is not None
# vim: et:sw=4:syntax=python:ts=4:
|
|
661647cfe4d7c1229f1f719cddf42b6c052e4b79
|
scripts/write_names.py
|
scripts/write_names.py
|
import json
names = list()
with open("viruses.json", "r") as f:
names = [virus["name"] for virus in json.load(f)]
names = sorted(names)
with open("names.txt", "w") as f:
f.write("\n".join(names))
|
Add script for writing virus names to txt file
|
Add script for writing virus names to txt file
|
Python
|
mit
|
virtool/virtool-database
|
Add script for writing virus names to txt file
|
import json
names = list()
with open("viruses.json", "r") as f:
names = [virus["name"] for virus in json.load(f)]
names = sorted(names)
with open("names.txt", "w") as f:
f.write("\n".join(names))
|
<commit_before><commit_msg>Add script for writing virus names to txt file<commit_after>
|
import json
names = list()
with open("viruses.json", "r") as f:
names = [virus["name"] for virus in json.load(f)]
names = sorted(names)
with open("names.txt", "w") as f:
f.write("\n".join(names))
|
Add script for writing virus names to txt fileimport json
names = list()
with open("viruses.json", "r") as f:
names = [virus["name"] for virus in json.load(f)]
names = sorted(names)
with open("names.txt", "w") as f:
f.write("\n".join(names))
|
<commit_before><commit_msg>Add script for writing virus names to txt file<commit_after>import json
names = list()
with open("viruses.json", "r") as f:
names = [virus["name"] for virus in json.load(f)]
names = sorted(names)
with open("names.txt", "w") as f:
f.write("\n".join(names))
|
|
ea893b3ed7c42522720cd98a0a98a397accd9e07
|
scripts/test-env.py
|
scripts/test-env.py
|
"""
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../geoplot/')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20))
|
Add test script for environment.
|
Add test script for environment.
|
Python
|
mit
|
ResidentMario/geoplot
|
Add test script for environment.
|
"""
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../geoplot/')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20))
|
<commit_before><commit_msg>Add test script for environment.<commit_after>
|
"""
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../geoplot/')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20))
|
Add test script for environment."""
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../geoplot/')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20))
|
<commit_before><commit_msg>Add test script for environment.<commit_after>"""
This script tests whether the current environment works correctly or not.
"""
import sys; sys.path.insert(0, '../geoplot/')
import geoplot as gplt
from geoplot import crs as gcrs
import geopandas as gpd
# cf. https://github.com/Toblerity/Shapely/issues/435
# Fiona/Shapely/Geopandas test.
cities = gpd.read_file("../data/cities/citiesx010g.shp")
boroughs = gpd.read_file("../data/nyc_boroughs/boroughs.geojson")
# Cartopy test.
gplt.pointplot(cities.head(50), extent=(10, 20, 10, 20))
|
|
80a9cc1ed5bed794769a511a4c2bf0070821ab7d
|
tests/serio/test_messages.py
|
tests/serio/test_messages.py
|
import unittest
from meshnet.serio.messages import SerialMessage, MessageType
KEY = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
class TestSerialMessage(unittest.TestCase):
def test_serialize(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4",
message.serialize(KEY))
def test_framed(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\xaf\xaf\x02\x14\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4\x03",
message.framed(KEY))
class TestSerialMessageConsumer(unittest.TestCase):
pass
|
Add simple unittests for serio message
|
Add simple unittests for serio message
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
Python
|
bsd-3-clause
|
janLo/automation_mesh,janLo/automation_mesh,janLo/automation_mesh
|
Add simple unittests for serio message
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
import unittest
from meshnet.serio.messages import SerialMessage, MessageType
KEY = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
class TestSerialMessage(unittest.TestCase):
def test_serialize(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4",
message.serialize(KEY))
def test_framed(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\xaf\xaf\x02\x14\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4\x03",
message.framed(KEY))
class TestSerialMessageConsumer(unittest.TestCase):
pass
|
<commit_before><commit_msg>Add simple unittests for serio message
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>
|
import unittest
from meshnet.serio.messages import SerialMessage, MessageType
KEY = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
class TestSerialMessage(unittest.TestCase):
def test_serialize(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4",
message.serialize(KEY))
def test_framed(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\xaf\xaf\x02\x14\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4\x03",
message.framed(KEY))
class TestSerialMessageConsumer(unittest.TestCase):
pass
|
Add simple unittests for serio message
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>import unittest
from meshnet.serio.messages import SerialMessage, MessageType
KEY = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
class TestSerialMessage(unittest.TestCase):
def test_serialize(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4",
message.serialize(KEY))
def test_framed(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\xaf\xaf\x02\x14\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4\x03",
message.framed(KEY))
class TestSerialMessageConsumer(unittest.TestCase):
pass
|
<commit_before><commit_msg>Add simple unittests for serio message
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>import unittest
from meshnet.serio.messages import SerialMessage, MessageType
KEY = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
class TestSerialMessage(unittest.TestCase):
def test_serialize(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4",
message.serialize(KEY))
def test_framed(self):
message = SerialMessage(0, 1, MessageType.booted, None, 12, 1, b"jsif")
self.assertEqual(b"\xaf\xaf\x02\x14\x00\x01F\t\x00\x0c\x00\x01jsif\xae\x9a\xc1S\x88\x9d\xbc\xa4\x03",
message.framed(KEY))
class TestSerialMessageConsumer(unittest.TestCase):
pass
|
|
889a70cd8bb9d7e063a8248bab3de958e697d9ae
|
tests/test_wonderful_bing.py
|
tests/test_wonderful_bing.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import mock
import pytest
from wonderful_bing.wonderful_bing import WonderfulBing
def test_picture_has_be_downloaded():
with mock.patch('os.path.exists', return_value=True):
with pytest.raises(SystemExit):
arguments = {'--directory': '/not/exist', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
wonderful_bing.download_picture()
def test_download_picture():
arguments = {'--directory': '/tmp', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
check_picture(wonderful_bing.picture_path)
with mock.patch('time.sleep', return_value=None):
wonderful_bing.download_picture()
assert path.exists(wonderful_bing.picture_path)
check_picture(wonderful_bing.picture_path)
def check_picture(picture_path):
if path.exists(picture_path):
os.remove(picture_path)
|
Add test for WonderfulBing class
|
Add test for WonderfulBing class
|
Python
|
mit
|
lord63/wonderful_bing
|
Add test for WonderfulBing class
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import mock
import pytest
from wonderful_bing.wonderful_bing import WonderfulBing
def test_picture_has_be_downloaded():
with mock.patch('os.path.exists', return_value=True):
with pytest.raises(SystemExit):
arguments = {'--directory': '/not/exist', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
wonderful_bing.download_picture()
def test_download_picture():
arguments = {'--directory': '/tmp', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
check_picture(wonderful_bing.picture_path)
with mock.patch('time.sleep', return_value=None):
wonderful_bing.download_picture()
assert path.exists(wonderful_bing.picture_path)
check_picture(wonderful_bing.picture_path)
def check_picture(picture_path):
if path.exists(picture_path):
os.remove(picture_path)
|
<commit_before><commit_msg>Add test for WonderfulBing class<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import mock
import pytest
from wonderful_bing.wonderful_bing import WonderfulBing
def test_picture_has_be_downloaded():
with mock.patch('os.path.exists', return_value=True):
with pytest.raises(SystemExit):
arguments = {'--directory': '/not/exist', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
wonderful_bing.download_picture()
def test_download_picture():
arguments = {'--directory': '/tmp', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
check_picture(wonderful_bing.picture_path)
with mock.patch('time.sleep', return_value=None):
wonderful_bing.download_picture()
assert path.exists(wonderful_bing.picture_path)
check_picture(wonderful_bing.picture_path)
def check_picture(picture_path):
if path.exists(picture_path):
os.remove(picture_path)
|
Add test for WonderfulBing class#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import mock
import pytest
from wonderful_bing.wonderful_bing import WonderfulBing
def test_picture_has_be_downloaded():
with mock.patch('os.path.exists', return_value=True):
with pytest.raises(SystemExit):
arguments = {'--directory': '/not/exist', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
wonderful_bing.download_picture()
def test_download_picture():
arguments = {'--directory': '/tmp', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
check_picture(wonderful_bing.picture_path)
with mock.patch('time.sleep', return_value=None):
wonderful_bing.download_picture()
assert path.exists(wonderful_bing.picture_path)
check_picture(wonderful_bing.picture_path)
def check_picture(picture_path):
if path.exists(picture_path):
os.remove(picture_path)
|
<commit_before><commit_msg>Add test for WonderfulBing class<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import mock
import pytest
from wonderful_bing.wonderful_bing import WonderfulBing
def test_picture_has_be_downloaded():
with mock.patch('os.path.exists', return_value=True):
with pytest.raises(SystemExit):
arguments = {'--directory': '/not/exist', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
wonderful_bing.download_picture()
def test_download_picture():
arguments = {'--directory': '/tmp', 'ENVIRONMENT': 'gnome'}
wonderful_bing = WonderfulBing(arguments)
check_picture(wonderful_bing.picture_path)
with mock.patch('time.sleep', return_value=None):
wonderful_bing.download_picture()
assert path.exists(wonderful_bing.picture_path)
check_picture(wonderful_bing.picture_path)
def check_picture(picture_path):
if path.exists(picture_path):
os.remove(picture_path)
|
|
9950d25a2659509c81c29f9d834dd6c39e8b4015
|
scripts/blog_image_refactor.py
|
scripts/blog_image_refactor.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
ROOT_FOLDER = '/home/mjulian/coding/figarocorso.github.io/'
POST_FOLDER = ROOT_FOLDER + '_posts/day-by-day/'
IMAGE_FOLDER = ROOT_FOLDER + 'images/blog/'
def get_file_lines(filename):
with open(POST_FOLDER + filename) as f:
return f.readlines()
def get_post_image_urls(lines):
image_urls = []
for line in lines:
image_url = get_image_url_or_none(line)
if image_url:
image_urls.append(image_url)
return image_urls
def get_image_url_or_none(line):
# TODO: Use re
try:
image_url_beggining = line.index('](') + 2
image_url_ending = line.index(')')
line.index('![')
except ValueError:
return None
return line[image_url_beggining:image_url_ending]
def url_is_blog_image(url):
return 'blog.migueljulian.com' in url and 'uploads' in url
def get_post_image(image_urls):
for image_url in image_urls:
if url_is_blog_image(image_url):
return image_url
return None
def get_image_name(url):
return url.split('/')[-1]
def add_image_header(file_lines, image_name):
header = "image: /images/blog/%s\n" % image_name
file_lines.insert(3, header)
def replace_image_url(file_lines, image_name):
needle = 'http://blog.migueljulian.com/wp-content/uploads'
line_count = 0
for line in file_lines:
new_line = ''
if needle in line:
new_line = line.replace(needle, '/images/blog')
if new_line:
file_lines[line_count] = new_line
line_count += 1
def write_file(filename, lines):
print filename
with open(POST_FOLDER + filename, 'w') as f:
for line in lines:
f.write(line)
if __name__ == "__main__":
for filename in os.listdir(POST_FOLDER):
file_lines = get_file_lines(filename)
post_images = get_post_image_urls(file_lines)
post_image = get_post_image(post_images)
if post_image:
image_name = get_image_name(post_image)
add_image_header(file_lines, image_name)
replace_image_url(file_lines, image_name)
write_file(filename, file_lines)
|
Add image url replacement script
|
Add image url replacement script
|
Python
|
mit
|
figarocorso/figarocorso.github.io,figarocorso/figarocorso.github.io,figarocorso/figarocorso.github.io,figarocorso/figarocorso.github.io
|
Add image url replacement script
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
ROOT_FOLDER = '/home/mjulian/coding/figarocorso.github.io/'
POST_FOLDER = ROOT_FOLDER + '_posts/day-by-day/'
IMAGE_FOLDER = ROOT_FOLDER + 'images/blog/'
def get_file_lines(filename):
with open(POST_FOLDER + filename) as f:
return f.readlines()
def get_post_image_urls(lines):
image_urls = []
for line in lines:
image_url = get_image_url_or_none(line)
if image_url:
image_urls.append(image_url)
return image_urls
def get_image_url_or_none(line):
# TODO: Use re
try:
image_url_beggining = line.index('](') + 2
image_url_ending = line.index(')')
line.index('![')
except ValueError:
return None
return line[image_url_beggining:image_url_ending]
def url_is_blog_image(url):
return 'blog.migueljulian.com' in url and 'uploads' in url
def get_post_image(image_urls):
for image_url in image_urls:
if url_is_blog_image(image_url):
return image_url
return None
def get_image_name(url):
return url.split('/')[-1]
def add_image_header(file_lines, image_name):
header = "image: /images/blog/%s\n" % image_name
file_lines.insert(3, header)
def replace_image_url(file_lines, image_name):
needle = 'http://blog.migueljulian.com/wp-content/uploads'
line_count = 0
for line in file_lines:
new_line = ''
if needle in line:
new_line = line.replace(needle, '/images/blog')
if new_line:
file_lines[line_count] = new_line
line_count += 1
def write_file(filename, lines):
print filename
with open(POST_FOLDER + filename, 'w') as f:
for line in lines:
f.write(line)
if __name__ == "__main__":
for filename in os.listdir(POST_FOLDER):
file_lines = get_file_lines(filename)
post_images = get_post_image_urls(file_lines)
post_image = get_post_image(post_images)
if post_image:
image_name = get_image_name(post_image)
add_image_header(file_lines, image_name)
replace_image_url(file_lines, image_name)
write_file(filename, file_lines)
|
<commit_before><commit_msg>Add image url replacement script<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
ROOT_FOLDER = '/home/mjulian/coding/figarocorso.github.io/'
POST_FOLDER = ROOT_FOLDER + '_posts/day-by-day/'
IMAGE_FOLDER = ROOT_FOLDER + 'images/blog/'
def get_file_lines(filename):
with open(POST_FOLDER + filename) as f:
return f.readlines()
def get_post_image_urls(lines):
image_urls = []
for line in lines:
image_url = get_image_url_or_none(line)
if image_url:
image_urls.append(image_url)
return image_urls
def get_image_url_or_none(line):
# TODO: Use re
try:
image_url_beggining = line.index('](') + 2
image_url_ending = line.index(')')
line.index('![')
except ValueError:
return None
return line[image_url_beggining:image_url_ending]
def url_is_blog_image(url):
return 'blog.migueljulian.com' in url and 'uploads' in url
def get_post_image(image_urls):
for image_url in image_urls:
if url_is_blog_image(image_url):
return image_url
return None
def get_image_name(url):
return url.split('/')[-1]
def add_image_header(file_lines, image_name):
header = "image: /images/blog/%s\n" % image_name
file_lines.insert(3, header)
def replace_image_url(file_lines, image_name):
needle = 'http://blog.migueljulian.com/wp-content/uploads'
line_count = 0
for line in file_lines:
new_line = ''
if needle in line:
new_line = line.replace(needle, '/images/blog')
if new_line:
file_lines[line_count] = new_line
line_count += 1
def write_file(filename, lines):
print filename
with open(POST_FOLDER + filename, 'w') as f:
for line in lines:
f.write(line)
if __name__ == "__main__":
for filename in os.listdir(POST_FOLDER):
file_lines = get_file_lines(filename)
post_images = get_post_image_urls(file_lines)
post_image = get_post_image(post_images)
if post_image:
image_name = get_image_name(post_image)
add_image_header(file_lines, image_name)
replace_image_url(file_lines, image_name)
write_file(filename, file_lines)
|
Add image url replacement script#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
ROOT_FOLDER = '/home/mjulian/coding/figarocorso.github.io/'
POST_FOLDER = ROOT_FOLDER + '_posts/day-by-day/'
IMAGE_FOLDER = ROOT_FOLDER + 'images/blog/'
def get_file_lines(filename):
with open(POST_FOLDER + filename) as f:
return f.readlines()
def get_post_image_urls(lines):
image_urls = []
for line in lines:
image_url = get_image_url_or_none(line)
if image_url:
image_urls.append(image_url)
return image_urls
def get_image_url_or_none(line):
# TODO: Use re
try:
image_url_beggining = line.index('](') + 2
image_url_ending = line.index(')')
line.index('![')
except ValueError:
return None
return line[image_url_beggining:image_url_ending]
def url_is_blog_image(url):
return 'blog.migueljulian.com' in url and 'uploads' in url
def get_post_image(image_urls):
for image_url in image_urls:
if url_is_blog_image(image_url):
return image_url
return None
def get_image_name(url):
return url.split('/')[-1]
def add_image_header(file_lines, image_name):
header = "image: /images/blog/%s\n" % image_name
file_lines.insert(3, header)
def replace_image_url(file_lines, image_name):
needle = 'http://blog.migueljulian.com/wp-content/uploads'
line_count = 0
for line in file_lines:
new_line = ''
if needle in line:
new_line = line.replace(needle, '/images/blog')
if new_line:
file_lines[line_count] = new_line
line_count += 1
def write_file(filename, lines):
print filename
with open(POST_FOLDER + filename, 'w') as f:
for line in lines:
f.write(line)
if __name__ == "__main__":
for filename in os.listdir(POST_FOLDER):
file_lines = get_file_lines(filename)
post_images = get_post_image_urls(file_lines)
post_image = get_post_image(post_images)
if post_image:
image_name = get_image_name(post_image)
add_image_header(file_lines, image_name)
replace_image_url(file_lines, image_name)
write_file(filename, file_lines)
|
<commit_before><commit_msg>Add image url replacement script<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
ROOT_FOLDER = '/home/mjulian/coding/figarocorso.github.io/'
POST_FOLDER = ROOT_FOLDER + '_posts/day-by-day/'
IMAGE_FOLDER = ROOT_FOLDER + 'images/blog/'
def get_file_lines(filename):
with open(POST_FOLDER + filename) as f:
return f.readlines()
def get_post_image_urls(lines):
image_urls = []
for line in lines:
image_url = get_image_url_or_none(line)
if image_url:
image_urls.append(image_url)
return image_urls
def get_image_url_or_none(line):
# TODO: Use re
try:
image_url_beggining = line.index('](') + 2
image_url_ending = line.index(')')
line.index('![')
except ValueError:
return None
return line[image_url_beggining:image_url_ending]
def url_is_blog_image(url):
return 'blog.migueljulian.com' in url and 'uploads' in url
def get_post_image(image_urls):
for image_url in image_urls:
if url_is_blog_image(image_url):
return image_url
return None
def get_image_name(url):
return url.split('/')[-1]
def add_image_header(file_lines, image_name):
header = "image: /images/blog/%s\n" % image_name
file_lines.insert(3, header)
def replace_image_url(file_lines, image_name):
needle = 'http://blog.migueljulian.com/wp-content/uploads'
line_count = 0
for line in file_lines:
new_line = ''
if needle in line:
new_line = line.replace(needle, '/images/blog')
if new_line:
file_lines[line_count] = new_line
line_count += 1
def write_file(filename, lines):
print filename
with open(POST_FOLDER + filename, 'w') as f:
for line in lines:
f.write(line)
if __name__ == "__main__":
for filename in os.listdir(POST_FOLDER):
file_lines = get_file_lines(filename)
post_images = get_post_image_urls(file_lines)
post_image = get_post_image(post_images)
if post_image:
image_name = get_image_name(post_image)
add_image_header(file_lines, image_name)
replace_image_url(file_lines, image_name)
write_file(filename, file_lines)
|
|
ed7570acc307207b96e1df848edbeef104230179
|
material/admin/modules.py
|
material/admin/modules.py
|
from karenina import modules
class Admin(modules.InstallableModule):
icon = "mdi-action-settings-applications"
order = 1000
@property
def label(self):
return 'Administration'
def has_perm(self, user):
return user.is_staff
|
Add module declaration for karenina
|
Add module declaration for karenina
|
Python
|
bsd-3-clause
|
afifnz/django-material,2947721120/django-material,thiagoramos-luizalabs/django-material,un33k/django-material,Axelio/django-material,thiagoramos-luizalabs/django-material,Axelio/django-material,sourabhdattawad/django-material,viewflow/django-material,afifnz/django-material,MonsterKiller/django-material,koopauy/django-material,sourabhdattawad/django-material,2947721120/django-material,refnode/django-material,lukasgarcya/django-material,thiagoramos-luizalabs/django-material,MonsterKiller/django-material,pombredanne/django-material,viewflow/django-material,sourabhdattawad/django-material,un33k/django-material,pombredanne/django-material,barseghyanartur/django-material,lukasgarcya/django-material,MonsterKiller/django-material,2947721120/django-material,afifnz/django-material,viewflow/django-material,un33k/django-material,barseghyanartur/django-material,Axelio/django-material,refnode/django-material,koopauy/django-material,lukasgarcya/django-material,barseghyanartur/django-material,koopauy/django-material,pombredanne/django-material,refnode/django-material
|
Add module declaration for karenina
|
from karenina import modules
class Admin(modules.InstallableModule):
icon = "mdi-action-settings-applications"
order = 1000
@property
def label(self):
return 'Administration'
def has_perm(self, user):
return user.is_staff
|
<commit_before><commit_msg>Add module declaration for karenina<commit_after>
|
from karenina import modules
class Admin(modules.InstallableModule):
icon = "mdi-action-settings-applications"
order = 1000
@property
def label(self):
return 'Administration'
def has_perm(self, user):
return user.is_staff
|
Add module declaration for kareninafrom karenina import modules
class Admin(modules.InstallableModule):
icon = "mdi-action-settings-applications"
order = 1000
@property
def label(self):
return 'Administration'
def has_perm(self, user):
return user.is_staff
|
<commit_before><commit_msg>Add module declaration for karenina<commit_after>from karenina import modules
class Admin(modules.InstallableModule):
icon = "mdi-action-settings-applications"
order = 1000
@property
def label(self):
return 'Administration'
def has_perm(self, user):
return user.is_staff
|
|
cc52901c65480a4a3662f5302b2b2a92832a3dd5
|
python/reverse-a-linked-list.py
|
python/reverse-a-linked-list.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def reverse(head):
return reverse_node(head)
def reverse_node(node, prev=None):
if node is None:
return prev
next_node_to_reverse = node.next
node.next = prev
return reverse_node(next_node_to_reverse, node)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
llist1 = reverse(llist.head)
print_singly_linked_list(llist1, ' ', fptr)
fptr.write('\n')
fptr.close()
|
Solve reverse a linked list
|
Solve reverse a linked list
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve reverse a linked list
|
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def reverse(head):
return reverse_node(head)
def reverse_node(node, prev=None):
if node is None:
return prev
next_node_to_reverse = node.next
node.next = prev
return reverse_node(next_node_to_reverse, node)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
llist1 = reverse(llist.head)
print_singly_linked_list(llist1, ' ', fptr)
fptr.write('\n')
fptr.close()
|
<commit_before><commit_msg>Solve reverse a linked list<commit_after>
|
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def reverse(head):
return reverse_node(head)
def reverse_node(node, prev=None):
if node is None:
return prev
next_node_to_reverse = node.next
node.next = prev
return reverse_node(next_node_to_reverse, node)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
llist1 = reverse(llist.head)
print_singly_linked_list(llist1, ' ', fptr)
fptr.write('\n')
fptr.close()
|
Solve reverse a linked list#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def reverse(head):
return reverse_node(head)
def reverse_node(node, prev=None):
if node is None:
return prev
next_node_to_reverse = node.next
node.next = prev
return reverse_node(next_node_to_reverse, node)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
llist1 = reverse(llist.head)
print_singly_linked_list(llist1, ' ', fptr)
fptr.write('\n')
fptr.close()
|
<commit_before><commit_msg>Solve reverse a linked list<commit_after>#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def reverse(head):
return reverse_node(head)
def reverse_node(node, prev=None):
if node is None:
return prev
next_node_to_reverse = node.next
node.next = prev
return reverse_node(next_node_to_reverse, node)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
llist1 = reverse(llist.head)
print_singly_linked_list(llist1, ' ', fptr)
fptr.write('\n')
fptr.close()
|
|
82e3f0d72f6d334108c4777c0e4906d258c9efc6
|
apps/network/tests/test_routes/test_users.py
|
apps/network/tests/test_routes/test_users.py
|
def test_create_user(client):
result = client.post("/users/", data={"username": "test", "password": "1234"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User created succesfully!"}
def test_get_all_users(client):
result = client.get("/users/")
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_get_specific_user(client):
result = client.get("/users/5484626")
assert result.status_code == 200
assert result.get_json() == {"user": {"name": "Bob", "id": "5484626"}}
def test_search_users(client):
result = client.post("/users/search", data={"query": "query_sample"})
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_search_users(client):
result = client.put("/users/546313/email", data={"email": "new_email@email.com"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User email was changed succesfully!"}
def test_change_password(client):
result = client.put("/users/546313/password", data={"password": "new_password123"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User password was changed succesfully!"}
def test_change_role(client):
result = client.put("/users/546313/role", data={"role": "new_role"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User role was changed succesfully!"}
def test_delete_user(client):
result = client.delete("/users/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "User was deleted succesfully!"}
def test_login(client):
result = client.post("/users/login", data={"username": "user", "password": "pwd123"})
assert result.status_code == 200
|
ADD Network users unit tests
|
ADD Network users unit tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD Network users unit tests
|
def test_create_user(client):
result = client.post("/users/", data={"username": "test", "password": "1234"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User created succesfully!"}
def test_get_all_users(client):
result = client.get("/users/")
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_get_specific_user(client):
result = client.get("/users/5484626")
assert result.status_code == 200
assert result.get_json() == {"user": {"name": "Bob", "id": "5484626"}}
def test_search_users(client):
result = client.post("/users/search", data={"query": "query_sample"})
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_search_users(client):
result = client.put("/users/546313/email", data={"email": "new_email@email.com"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User email was changed succesfully!"}
def test_change_password(client):
result = client.put("/users/546313/password", data={"password": "new_password123"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User password was changed succesfully!"}
def test_change_role(client):
result = client.put("/users/546313/role", data={"role": "new_role"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User role was changed succesfully!"}
def test_delete_user(client):
result = client.delete("/users/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "User was deleted succesfully!"}
def test_login(client):
result = client.post("/users/login", data={"username": "user", "password": "pwd123"})
assert result.status_code == 200
|
<commit_before><commit_msg>ADD Network users unit tests<commit_after>
|
def test_create_user(client):
result = client.post("/users/", data={"username": "test", "password": "1234"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User created succesfully!"}
def test_get_all_users(client):
result = client.get("/users/")
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_get_specific_user(client):
result = client.get("/users/5484626")
assert result.status_code == 200
assert result.get_json() == {"user": {"name": "Bob", "id": "5484626"}}
def test_search_users(client):
result = client.post("/users/search", data={"query": "query_sample"})
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_search_users(client):
result = client.put("/users/546313/email", data={"email": "new_email@email.com"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User email was changed succesfully!"}
def test_change_password(client):
result = client.put("/users/546313/password", data={"password": "new_password123"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User password was changed succesfully!"}
def test_change_role(client):
result = client.put("/users/546313/role", data={"role": "new_role"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User role was changed succesfully!"}
def test_delete_user(client):
result = client.delete("/users/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "User was deleted succesfully!"}
def test_login(client):
result = client.post("/users/login", data={"username": "user", "password": "pwd123"})
assert result.status_code == 200
|
ADD Network users unit tests
def test_create_user(client):
result = client.post("/users/", data={"username": "test", "password": "1234"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User created succesfully!"}
def test_get_all_users(client):
result = client.get("/users/")
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_get_specific_user(client):
result = client.get("/users/5484626")
assert result.status_code == 200
assert result.get_json() == {"user": {"name": "Bob", "id": "5484626"}}
def test_search_users(client):
result = client.post("/users/search", data={"query": "query_sample"})
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_search_users(client):
result = client.put("/users/546313/email", data={"email": "new_email@email.com"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User email was changed succesfully!"}
def test_change_password(client):
result = client.put("/users/546313/password", data={"password": "new_password123"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User password was changed succesfully!"}
def test_change_role(client):
result = client.put("/users/546313/role", data={"role": "new_role"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User role was changed succesfully!"}
def test_delete_user(client):
result = client.delete("/users/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "User was deleted succesfully!"}
def test_login(client):
result = client.post("/users/login", data={"username": "user", "password": "pwd123"})
assert result.status_code == 200
|
<commit_before><commit_msg>ADD Network users unit tests<commit_after>
def test_create_user(client):
result = client.post("/users/", data={"username": "test", "password": "1234"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User created succesfully!"}
def test_get_all_users(client):
result = client.get("/users/")
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_get_specific_user(client):
result = client.get("/users/5484626")
assert result.status_code == 200
assert result.get_json() == {"user": {"name": "Bob", "id": "5484626"}}
def test_search_users(client):
result = client.post("/users/search", data={"query": "query_sample"})
assert result.status_code == 200
assert result.get_json() == {"users": ["Bob", "Alice", "James"]}
def test_search_users(client):
result = client.put("/users/546313/email", data={"email": "new_email@email.com"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User email was changed succesfully!"}
def test_change_password(client):
result = client.put("/users/546313/password", data={"password": "new_password123"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User password was changed succesfully!"}
def test_change_role(client):
result = client.put("/users/546313/role", data={"role": "new_role"})
assert result.status_code == 200
assert result.get_json() == {"msg": "User role was changed succesfully!"}
def test_delete_user(client):
result = client.delete("/users/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "User was deleted succesfully!"}
def test_login(client):
result = client.post("/users/login", data={"username": "user", "password": "pwd123"})
assert result.status_code == 200
|
|
58221f7915d6f22e882e2094df57aa47840b23a6
|
tests/grammar_creation_test/TerminalAdding.py
|
tests/grammar_creation_test/TerminalAdding.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class TerminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Create class for terminal adding test when gramar is create
|
Create class for terminal adding test when gramar is create
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Create class for terminal adding test when gramar is create
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class TerminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create class for terminal adding test when gramar is create<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class TerminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Create class for terminal adding test when gramar is create#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class TerminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create class for terminal adding test when gramar is create<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class TerminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
522918a2edd8c664771cc8dde2c4e91174ff83f7
|
sympy/utilities/tests/test_exceptions.py
|
sympy/utilities/tests/test_exceptions.py
|
import warnings
from sympy.core.decorators import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_deprecated():
@deprecated(useinstead="bar", issue=1234, deprecated_since_version="0.7.2")
def foo():
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
foo()
assert len(w) == 1
assert issubclass(w[-1].category, SymPyDeprecationWarning)
assert str(w[-1].message) == """\n\n\
foo has been deprecated since SymPy 0.7.2. Use bar instead. See\n\
https://github.com/sympy/sympy/issues/1234 for more info.\n\
"""
|
Add coverage test for "deprecated" decorater
|
Add coverage test for "deprecated" decorater
|
Python
|
bsd-3-clause
|
diofant/diofant,skirpichev/omg
|
Add coverage test for "deprecated" decorater
|
import warnings
from sympy.core.decorators import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_deprecated():
@deprecated(useinstead="bar", issue=1234, deprecated_since_version="0.7.2")
def foo():
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
foo()
assert len(w) == 1
assert issubclass(w[-1].category, SymPyDeprecationWarning)
assert str(w[-1].message) == """\n\n\
foo has been deprecated since SymPy 0.7.2. Use bar instead. See\n\
https://github.com/sympy/sympy/issues/1234 for more info.\n\
"""
|
<commit_before><commit_msg>Add coverage test for "deprecated" decorater<commit_after>
|
import warnings
from sympy.core.decorators import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_deprecated():
@deprecated(useinstead="bar", issue=1234, deprecated_since_version="0.7.2")
def foo():
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
foo()
assert len(w) == 1
assert issubclass(w[-1].category, SymPyDeprecationWarning)
assert str(w[-1].message) == """\n\n\
foo has been deprecated since SymPy 0.7.2. Use bar instead. See\n\
https://github.com/sympy/sympy/issues/1234 for more info.\n\
"""
|
Add coverage test for "deprecated" decoraterimport warnings
from sympy.core.decorators import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_deprecated():
@deprecated(useinstead="bar", issue=1234, deprecated_since_version="0.7.2")
def foo():
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
foo()
assert len(w) == 1
assert issubclass(w[-1].category, SymPyDeprecationWarning)
assert str(w[-1].message) == """\n\n\
foo has been deprecated since SymPy 0.7.2. Use bar instead. See\n\
https://github.com/sympy/sympy/issues/1234 for more info.\n\
"""
|
<commit_before><commit_msg>Add coverage test for "deprecated" decorater<commit_after>import warnings
from sympy.core.decorators import deprecated
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_deprecated():
@deprecated(useinstead="bar", issue=1234, deprecated_since_version="0.7.2")
def foo():
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
foo()
assert len(w) == 1
assert issubclass(w[-1].category, SymPyDeprecationWarning)
assert str(w[-1].message) == """\n\n\
foo has been deprecated since SymPy 0.7.2. Use bar instead. See\n\
https://github.com/sympy/sympy/issues/1234 for more info.\n\
"""
|
|
57f20289f942489c961ccbc9f7f8c5c3f2cb8c83
|
test/widgets/test_cpu.py
|
test/widgets/test_cpu.py
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
import libqtile.config
import libqtile.widget
from libqtile.bar import Bar
class MockPsutil(ModuleType):
@classmethod
def cpu_percent(cls):
return 2.6
@classmethod
def cpu_freq(cls):
class Freq:
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq()
@pytest.fixture
def cpu_manager(monkeypatch, manager_nospawn, minimal_conf_noscreen):
monkeypatch.setitem(sys.modules, "psutil", MockPsutil("psutil"))
from libqtile.widget import cpu
reload(cpu)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=Bar(
[cpu.CPU()],
10
)
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_cpu(cpu_manager):
assert cpu_manager.c.widget["cpu"].info()["text"] == "CPU 0.5GHz 2.6%"
|
Add test for CPU widget
|
Add test for CPU widget
|
Python
|
mit
|
qtile/qtile,qtile/qtile,ramnes/qtile,ramnes/qtile
|
Add test for CPU widget
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
import libqtile.config
import libqtile.widget
from libqtile.bar import Bar
class MockPsutil(ModuleType):
@classmethod
def cpu_percent(cls):
return 2.6
@classmethod
def cpu_freq(cls):
class Freq:
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq()
@pytest.fixture
def cpu_manager(monkeypatch, manager_nospawn, minimal_conf_noscreen):
monkeypatch.setitem(sys.modules, "psutil", MockPsutil("psutil"))
from libqtile.widget import cpu
reload(cpu)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=Bar(
[cpu.CPU()],
10
)
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_cpu(cpu_manager):
assert cpu_manager.c.widget["cpu"].info()["text"] == "CPU 0.5GHz 2.6%"
|
<commit_before><commit_msg>Add test for CPU widget<commit_after>
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
import libqtile.config
import libqtile.widget
from libqtile.bar import Bar
class MockPsutil(ModuleType):
@classmethod
def cpu_percent(cls):
return 2.6
@classmethod
def cpu_freq(cls):
class Freq:
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq()
@pytest.fixture
def cpu_manager(monkeypatch, manager_nospawn, minimal_conf_noscreen):
monkeypatch.setitem(sys.modules, "psutil", MockPsutil("psutil"))
from libqtile.widget import cpu
reload(cpu)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=Bar(
[cpu.CPU()],
10
)
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_cpu(cpu_manager):
assert cpu_manager.c.widget["cpu"].info()["text"] == "CPU 0.5GHz 2.6%"
|
Add test for CPU widget# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
import libqtile.config
import libqtile.widget
from libqtile.bar import Bar
class MockPsutil(ModuleType):
@classmethod
def cpu_percent(cls):
return 2.6
@classmethod
def cpu_freq(cls):
class Freq:
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq()
@pytest.fixture
def cpu_manager(monkeypatch, manager_nospawn, minimal_conf_noscreen):
monkeypatch.setitem(sys.modules, "psutil", MockPsutil("psutil"))
from libqtile.widget import cpu
reload(cpu)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=Bar(
[cpu.CPU()],
10
)
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_cpu(cpu_manager):
assert cpu_manager.c.widget["cpu"].info()["text"] == "CPU 0.5GHz 2.6%"
|
<commit_before><commit_msg>Add test for CPU widget<commit_after># Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
import libqtile.config
import libqtile.widget
from libqtile.bar import Bar
class MockPsutil(ModuleType):
@classmethod
def cpu_percent(cls):
return 2.6
@classmethod
def cpu_freq(cls):
class Freq:
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq()
@pytest.fixture
def cpu_manager(monkeypatch, manager_nospawn, minimal_conf_noscreen):
monkeypatch.setitem(sys.modules, "psutil", MockPsutil("psutil"))
from libqtile.widget import cpu
reload(cpu)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=Bar(
[cpu.CPU()],
10
)
)
]
manager_nospawn.start(config)
yield manager_nospawn
def test_cpu(cpu_manager):
assert cpu_manager.c.widget["cpu"].info()["text"] == "CPU 0.5GHz 2.6%"
|
|
224c7487e8f8f7b19fe81b02b60a72ce5b2cc12f
|
find_params.py
|
find_params.py
|
import asl
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
n_samples = len(asl.data)
X = asl.data
y = asl.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100,
# 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.00055, 0.0001], 'C':
[10, 50, 100]}]
for score in ['precision', 'recall']:
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_weighted' %
score)
clf.fit(X_train, y_train)
print score, clf.best_params_
|
Add script to find parameters for SVC
|
Add script to find parameters for SVC
|
Python
|
mit
|
ssaamm/sign-language-translator,ssaamm/sign-language-tutor,ssaamm/sign-language-translator,ssaamm/sign-language-tutor
|
Add script to find parameters for SVC
|
import asl
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
n_samples = len(asl.data)
X = asl.data
y = asl.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100,
# 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.00055, 0.0001], 'C':
[10, 50, 100]}]
for score in ['precision', 'recall']:
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_weighted' %
score)
clf.fit(X_train, y_train)
print score, clf.best_params_
|
<commit_before><commit_msg>Add script to find parameters for SVC<commit_after>
|
import asl
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
n_samples = len(asl.data)
X = asl.data
y = asl.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100,
# 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.00055, 0.0001], 'C':
[10, 50, 100]}]
for score in ['precision', 'recall']:
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_weighted' %
score)
clf.fit(X_train, y_train)
print score, clf.best_params_
|
Add script to find parameters for SVCimport asl
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
n_samples = len(asl.data)
X = asl.data
y = asl.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100,
# 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.00055, 0.0001], 'C':
[10, 50, 100]}]
for score in ['precision', 'recall']:
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_weighted' %
score)
clf.fit(X_train, y_train)
print score, clf.best_params_
|
<commit_before><commit_msg>Add script to find parameters for SVC<commit_after>import asl
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
n_samples = len(asl.data)
X = asl.data
y = asl.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
#tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100,
# 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.00055, 0.0001], 'C':
[10, 50, 100]}]
for score in ['precision', 'recall']:
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_weighted' %
score)
clf.fit(X_train, y_train)
print score, clf.best_params_
|
|
9eb265fa2142b559b15063bc9322fc60b46a300b
|
mezzanine/project_template/deploy/gunicorn.conf.py
|
mezzanine/project_template/deploy/gunicorn.conf.py
|
from __future__ import unicode_literals
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
from __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:%(gunicorn_port)s"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
Update to use multiprocessing library
|
Update to use multiprocessing library
|
Python
|
bsd-2-clause
|
promil23/mezzanine,damnfine/mezzanine,Cajoline/mezzanine,stephenmcd/mezzanine,agepoly/mezzanine,damnfine/mezzanine,mush42/mezzanine,eino-makitalo/mezzanine,mush42/mezzanine,stephenmcd/mezzanine,stephenmcd/mezzanine,emile2016/mezzanine,frankier/mezzanine,promil23/mezzanine,readevalprint/mezzanine,webounty/mezzanine,dovydas/mezzanine,ryneeverett/mezzanine,wbtuomela/mezzanine,tuxinhang1989/mezzanine,cccs-web/mezzanine,joshcartme/mezzanine,tuxinhang1989/mezzanine,joshcartme/mezzanine,eino-makitalo/mezzanine,mush42/mezzanine,dsanders11/mezzanine,dekomote/mezzanine-modeltranslation-backport,Kniyl/mezzanine,vladir/mezzanine,tuxinhang1989/mezzanine,agepoly/mezzanine,AlexHill/mezzanine,molokov/mezzanine,Skytorn86/mezzanine,Kniyl/mezzanine,SoLoHiC/mezzanine,molokov/mezzanine,dustinrb/mezzanine,sjdines/mezzanine,gradel/mezzanine,saintbird/mezzanine,PegasusWang/mezzanine,cccs-web/mezzanine,PegasusWang/mezzanine,christianwgd/mezzanine,wyzex/mezzanine,adrian-the-git/mezzanine,sjdines/mezzanine,PegasusWang/mezzanine,agepoly/mezzanine,damnfine/mezzanine,eino-makitalo/mezzanine,frankchin/mezzanine,jerivas/mezzanine,viaregio/mezzanine,douglaskastle/mezzanine,douglaskastle/mezzanine,gradel/mezzanine,SoLoHiC/mezzanine,adrian-the-git/mezzanine,jjz/mezzanine,ZeroXn/mezzanine,biomassives/mezzanine,promil23/mezzanine,spookylukey/mezzanine,frankchin/mezzanine,fusionbox/mezzanine,frankchin/mezzanine,dsanders11/mezzanine,gradel/mezzanine,sjuxax/mezzanine,ZeroXn/mezzanine,viaregio/mezzanine,frankier/mezzanine,spookylukey/mezzanine,Skytorn86/mezzanine,jjz/mezzanine,saintbird/mezzanine,dsanders11/mezzanine,webounty/mezzanine,vladir/mezzanine,webounty/mezzanine,jerivas/mezzanine,readevalprint/mezzanine,wbtuomela/mezzanine,fusionbox/mezzanine,theclanks/mezzanine,ryneeverett/mezzanine,batpad/mezzanine,frankier/mezzanine,jjz/mezzanine,sjuxax/mezzanine,douglaskastle/mezzanine,Cajoline/mezzanine,dustinrb/mezzanine,emile2016/mezzanine,viaregio/mezzanine,dovydas/mezzanine,biomassives/mezzanine,AlexHill/mezzanine,molokov/mezzanine,jerivas/mezzanine,Cicero-Zhao/mezzanine,nikolas/mezzanine,ryneeverett/mezzanine,dekomote/mezzanine-modeltranslation-backport,dovydas/mezzanine,sjdines/mezzanine,vladir/mezzanine,adrian-the-git/mezzanine,emile2016/mezzanine,industrydive/mezzanine,saintbird/mezzanine,batpad/mezzanine,spookylukey/mezzanine,geodesign/mezzanine,ZeroXn/mezzanine,sjuxax/mezzanine,industrydive/mezzanine,geodesign/mezzanine,SoLoHiC/mezzanine,wyzex/mezzanine,wbtuomela/mezzanine,nikolas/mezzanine,readevalprint/mezzanine,dustinrb/mezzanine,industrydive/mezzanine,Cajoline/mezzanine,Kniyl/mezzanine,Skytorn86/mezzanine,wyzex/mezzanine,nikolas/mezzanine,christianwgd/mezzanine,dekomote/mezzanine-modeltranslation-backport,joshcartme/mezzanine,Cicero-Zhao/mezzanine,christianwgd/mezzanine,geodesign/mezzanine,theclanks/mezzanine,biomassives/mezzanine,theclanks/mezzanine
|
from __future__ import unicode_literals
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
Update to use multiprocessing library
|
from __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:%(gunicorn_port)s"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
<commit_before>from __future__ import unicode_literals
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
<commit_msg>Update to use multiprocessing library<commit_after>
|
from __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:%(gunicorn_port)s"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
from __future__ import unicode_literals
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
Update to use multiprocessing libraryfrom __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:%(gunicorn_port)s"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
<commit_before>from __future__ import unicode_literals
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
<commit_msg>Update to use multiprocessing library<commit_after>from __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:%(gunicorn_port)s"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
7bea4c66c6d44af743955500d31ea1f963edf013
|
Snippets/fix-dflt-langsys.py
|
Snippets/fix-dflt-langsys.py
|
#!/usr/bin/env python
import argparse
import logging
import os
import sys
from fontTools.ttLib import TTFont
def ProcessTable(table):
found = set()
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
logging.info("Removing %d extraneous LangSys records: %s",
rec.Script.LangSysCount, " ".join(tags))
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
if not found:
logging.info("All fine")
return False
else:
for rec in table.ScriptList.ScriptRecord:
tags = set([r.LangSysTag for r in rec.Script.LangSysRecord])
found -= tags
if found:
logging.warning("Records are missing from non-DFLT scripts: %s",
" ".join(found))
return True
def ProcessFont(font):
found = False
for tag in ("GSUB", "GPOS"):
if tag in font:
logging.info("Processing %s table", tag)
if ProcessTable(font[tag].table):
found = True
else:
# Unmark the table as loaded so that it is read from disk when
# writing the font, to avoid any unnecessary changes caused by
# decompiling then recompiling again.
del font.tables[tag]
return found
def ProcessFiles(filenames):
for filename in filenames:
logging.info("Processing %s", filename)
font = TTFont(filename)
name, ext = os.path.splitext(filename)
fixedname = name + ".fixed" + ext
if ProcessFont(font):
logging.info("Saving fixed font to %s\n", fixedname)
font.save(fixedname)
else:
logging.info("Font file is fine, nothing to fix\n")
def main():
parser = argparse.ArgumentParser(
description="Fix LangSys records for DFLT script")
parser.add_argument("files", metavar="FILE", type=str, nargs="+",
help="input font to process")
parser.add_argument("-s", "--silent", action='store_true',
help="suppress normal messages")
args = parser.parse_args()
logformat = "%(levelname)s: %(message)s"
if args.silent:
logging.basicConfig(format=logformat, level=logging.DEBUG)
else:
logging.basicConfig(format=logformat, level=logging.INFO)
ProcessFiles(args.files)
if __name__ == "__main__":
sys.exit(main())
|
Add a snippet to remove LangSys from DFLT script
|
Add a snippet to remove LangSys from DFLT script
Such fonts violate the spec and OTS rejects them, this snippet should
help quickly fixing such fonts.
|
Python
|
mit
|
googlefonts/fonttools,fonttools/fonttools
|
Add a snippet to remove LangSys from DFLT script
Such fonts violate the spec and OTS rejects them, this snippet should
help quickly fixing such fonts.
|
#!/usr/bin/env python
import argparse
import logging
import os
import sys
from fontTools.ttLib import TTFont
def ProcessTable(table):
found = set()
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
logging.info("Removing %d extraneous LangSys records: %s",
rec.Script.LangSysCount, " ".join(tags))
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
if not found:
logging.info("All fine")
return False
else:
for rec in table.ScriptList.ScriptRecord:
tags = set([r.LangSysTag for r in rec.Script.LangSysRecord])
found -= tags
if found:
logging.warning("Records are missing from non-DFLT scripts: %s",
" ".join(found))
return True
def ProcessFont(font):
found = False
for tag in ("GSUB", "GPOS"):
if tag in font:
logging.info("Processing %s table", tag)
if ProcessTable(font[tag].table):
found = True
else:
# Unmark the table as loaded so that it is read from disk when
# writing the font, to avoid any unnecessary changes caused by
# decompiling then recompiling again.
del font.tables[tag]
return found
def ProcessFiles(filenames):
for filename in filenames:
logging.info("Processing %s", filename)
font = TTFont(filename)
name, ext = os.path.splitext(filename)
fixedname = name + ".fixed" + ext
if ProcessFont(font):
logging.info("Saving fixed font to %s\n", fixedname)
font.save(fixedname)
else:
logging.info("Font file is fine, nothing to fix\n")
def main():
parser = argparse.ArgumentParser(
description="Fix LangSys records for DFLT script")
parser.add_argument("files", metavar="FILE", type=str, nargs="+",
help="input font to process")
parser.add_argument("-s", "--silent", action='store_true',
help="suppress normal messages")
args = parser.parse_args()
logformat = "%(levelname)s: %(message)s"
if args.silent:
logging.basicConfig(format=logformat, level=logging.DEBUG)
else:
logging.basicConfig(format=logformat, level=logging.INFO)
ProcessFiles(args.files)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a snippet to remove LangSys from DFLT script
Such fonts violate the spec and OTS rejects them, this snippet should
help quickly fixing such fonts.<commit_after>
|
#!/usr/bin/env python
import argparse
import logging
import os
import sys
from fontTools.ttLib import TTFont
def ProcessTable(table):
found = set()
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
logging.info("Removing %d extraneous LangSys records: %s",
rec.Script.LangSysCount, " ".join(tags))
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
if not found:
logging.info("All fine")
return False
else:
for rec in table.ScriptList.ScriptRecord:
tags = set([r.LangSysTag for r in rec.Script.LangSysRecord])
found -= tags
if found:
logging.warning("Records are missing from non-DFLT scripts: %s",
" ".join(found))
return True
def ProcessFont(font):
found = False
for tag in ("GSUB", "GPOS"):
if tag in font:
logging.info("Processing %s table", tag)
if ProcessTable(font[tag].table):
found = True
else:
# Unmark the table as loaded so that it is read from disk when
# writing the font, to avoid any unnecessary changes caused by
# decompiling then recompiling again.
del font.tables[tag]
return found
def ProcessFiles(filenames):
for filename in filenames:
logging.info("Processing %s", filename)
font = TTFont(filename)
name, ext = os.path.splitext(filename)
fixedname = name + ".fixed" + ext
if ProcessFont(font):
logging.info("Saving fixed font to %s\n", fixedname)
font.save(fixedname)
else:
logging.info("Font file is fine, nothing to fix\n")
def main():
parser = argparse.ArgumentParser(
description="Fix LangSys records for DFLT script")
parser.add_argument("files", metavar="FILE", type=str, nargs="+",
help="input font to process")
parser.add_argument("-s", "--silent", action='store_true',
help="suppress normal messages")
args = parser.parse_args()
logformat = "%(levelname)s: %(message)s"
if args.silent:
logging.basicConfig(format=logformat, level=logging.DEBUG)
else:
logging.basicConfig(format=logformat, level=logging.INFO)
ProcessFiles(args.files)
if __name__ == "__main__":
sys.exit(main())
|
Add a snippet to remove LangSys from DFLT script
Such fonts violate the spec and OTS rejects them, this snippet should
help quickly fixing such fonts.#!/usr/bin/env python
import argparse
import logging
import os
import sys
from fontTools.ttLib import TTFont
def ProcessTable(table):
found = set()
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
logging.info("Removing %d extraneous LangSys records: %s",
rec.Script.LangSysCount, " ".join(tags))
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
if not found:
logging.info("All fine")
return False
else:
for rec in table.ScriptList.ScriptRecord:
tags = set([r.LangSysTag for r in rec.Script.LangSysRecord])
found -= tags
if found:
logging.warning("Records are missing from non-DFLT scripts: %s",
" ".join(found))
return True
def ProcessFont(font):
found = False
for tag in ("GSUB", "GPOS"):
if tag in font:
logging.info("Processing %s table", tag)
if ProcessTable(font[tag].table):
found = True
else:
# Unmark the table as loaded so that it is read from disk when
# writing the font, to avoid any unnecessary changes caused by
# decompiling then recompiling again.
del font.tables[tag]
return found
def ProcessFiles(filenames):
for filename in filenames:
logging.info("Processing %s", filename)
font = TTFont(filename)
name, ext = os.path.splitext(filename)
fixedname = name + ".fixed" + ext
if ProcessFont(font):
logging.info("Saving fixed font to %s\n", fixedname)
font.save(fixedname)
else:
logging.info("Font file is fine, nothing to fix\n")
def main():
parser = argparse.ArgumentParser(
description="Fix LangSys records for DFLT script")
parser.add_argument("files", metavar="FILE", type=str, nargs="+",
help="input font to process")
parser.add_argument("-s", "--silent", action='store_true',
help="suppress normal messages")
args = parser.parse_args()
logformat = "%(levelname)s: %(message)s"
if args.silent:
logging.basicConfig(format=logformat, level=logging.DEBUG)
else:
logging.basicConfig(format=logformat, level=logging.INFO)
ProcessFiles(args.files)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a snippet to remove LangSys from DFLT script
Such fonts violate the spec and OTS rejects them, this snippet should
help quickly fixing such fonts.<commit_after>#!/usr/bin/env python
import argparse
import logging
import os
import sys
from fontTools.ttLib import TTFont
def ProcessTable(table):
found = set()
for rec in table.ScriptList.ScriptRecord:
if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0:
tags = [r.LangSysTag for r in rec.Script.LangSysRecord]
logging.info("Removing %d extraneous LangSys records: %s",
rec.Script.LangSysCount, " ".join(tags))
rec.Script.LangSysRecord = []
rec.Script.LangSysCount = 0
found.update(tags)
if not found:
logging.info("All fine")
return False
else:
for rec in table.ScriptList.ScriptRecord:
tags = set([r.LangSysTag for r in rec.Script.LangSysRecord])
found -= tags
if found:
logging.warning("Records are missing from non-DFLT scripts: %s",
" ".join(found))
return True
def ProcessFont(font):
found = False
for tag in ("GSUB", "GPOS"):
if tag in font:
logging.info("Processing %s table", tag)
if ProcessTable(font[tag].table):
found = True
else:
# Unmark the table as loaded so that it is read from disk when
# writing the font, to avoid any unnecessary changes caused by
# decompiling then recompiling again.
del font.tables[tag]
return found
def ProcessFiles(filenames):
for filename in filenames:
logging.info("Processing %s", filename)
font = TTFont(filename)
name, ext = os.path.splitext(filename)
fixedname = name + ".fixed" + ext
if ProcessFont(font):
logging.info("Saving fixed font to %s\n", fixedname)
font.save(fixedname)
else:
logging.info("Font file is fine, nothing to fix\n")
def main():
parser = argparse.ArgumentParser(
description="Fix LangSys records for DFLT script")
parser.add_argument("files", metavar="FILE", type=str, nargs="+",
help="input font to process")
parser.add_argument("-s", "--silent", action='store_true',
help="suppress normal messages")
args = parser.parse_args()
logformat = "%(levelname)s: %(message)s"
if args.silent:
logging.basicConfig(format=logformat, level=logging.DEBUG)
else:
logging.basicConfig(format=logformat, level=logging.INFO)
ProcessFiles(args.files)
if __name__ == "__main__":
sys.exit(main())
|
|
493a2323187448f925abefdc63600b4deba3d95c
|
src/olympia/addons/migrations/0008_auto_20200604_0928.py
|
src/olympia/addons/migrations/0008_auto_20200604_0928.py
|
# Generated by Django 2.2.12 on 2020-06-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0007_addonreviewerflags_notified_about_auto_approval_delay'),
]
operations = [
migrations.RemoveIndex(
model_name='addoncategory',
name='feature_addon_idx',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature_locales',
),
]
|
Add missing addoncategory migrations (follow-up from fd2e3b1d)
|
Add missing addoncategory migrations (follow-up from fd2e3b1d)
|
Python
|
bsd-3-clause
|
diox/olympia,eviljeff/olympia,mozilla/olympia,diox/olympia,eviljeff/olympia,mozilla/olympia,mozilla/addons-server,eviljeff/olympia,wagnerand/addons-server,bqbn/addons-server,diox/olympia,mozilla/olympia,eviljeff/olympia,diox/olympia,mozilla/addons-server,mozilla/addons-server,wagnerand/addons-server,wagnerand/addons-server,bqbn/addons-server,bqbn/addons-server,wagnerand/addons-server,mozilla/olympia,mozilla/addons-server,bqbn/addons-server
|
Add missing addoncategory migrations (follow-up from fd2e3b1d)
|
# Generated by Django 2.2.12 on 2020-06-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0007_addonreviewerflags_notified_about_auto_approval_delay'),
]
operations = [
migrations.RemoveIndex(
model_name='addoncategory',
name='feature_addon_idx',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature_locales',
),
]
|
<commit_before><commit_msg>Add missing addoncategory migrations (follow-up from fd2e3b1d)<commit_after>
|
# Generated by Django 2.2.12 on 2020-06-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0007_addonreviewerflags_notified_about_auto_approval_delay'),
]
operations = [
migrations.RemoveIndex(
model_name='addoncategory',
name='feature_addon_idx',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature_locales',
),
]
|
Add missing addoncategory migrations (follow-up from fd2e3b1d)# Generated by Django 2.2.12 on 2020-06-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0007_addonreviewerflags_notified_about_auto_approval_delay'),
]
operations = [
migrations.RemoveIndex(
model_name='addoncategory',
name='feature_addon_idx',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature_locales',
),
]
|
<commit_before><commit_msg>Add missing addoncategory migrations (follow-up from fd2e3b1d)<commit_after># Generated by Django 2.2.12 on 2020-06-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons', '0007_addonreviewerflags_notified_about_auto_approval_delay'),
]
operations = [
migrations.RemoveIndex(
model_name='addoncategory',
name='feature_addon_idx',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature',
),
migrations.RemoveField(
model_name='addoncategory',
name='feature_locales',
),
]
|
|
a4a6901638cf2240e48bd29eb364ad54de6ba81e
|
tests/test-server-chain-exec-shutdown.py
|
tests/test-server-chain-exec-shutdown.py
|
#!/usr/bin/env python3
from subprocess import Popen
from test_common import *
import socket, ssl, time, os, signal
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
# start ghostunnel server with false as child
ghostunnel = run_ghostunnel(['server', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--cacert=root.crt', '--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--allow-all', '--', 'nc', '-kl', LOCALHOST, '13002'])
urlopen = lambda path: urllib.request.urlopen(path, cafile='root.crt')
# block until ghostunnel is up
TcpClient(STATUS_PORT).connect(20)
# get child pid
status = json.loads(str(urlopen("https://{0}:{1}/_status".format(LOCALHOST, STATUS_PORT)).read(), 'utf-8'))
# send sigterm to ghostunnel, wait for child to terminate
stopped = False
ghostunnel.terminate()
for n in range(0, 10):
try:
os.kill(status['child_pid'], 0)
print_ok("child is still alive")
except:
stopped = True
break
time.sleep(1)
if not stopped:
raise Exception('child never terminated')
print_ok("child terminated")
print_ok("OK")
finally:
terminate(ghostunnel)
|
Add test to check tunnel shuts down child
|
Add test to check tunnel shuts down child
|
Python
|
apache-2.0
|
square/ghostunnel,square/ghostunnel
|
Add test to check tunnel shuts down child
|
#!/usr/bin/env python3
from subprocess import Popen
from test_common import *
import socket, ssl, time, os, signal
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
# start ghostunnel server with false as child
ghostunnel = run_ghostunnel(['server', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--cacert=root.crt', '--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--allow-all', '--', 'nc', '-kl', LOCALHOST, '13002'])
urlopen = lambda path: urllib.request.urlopen(path, cafile='root.crt')
# block until ghostunnel is up
TcpClient(STATUS_PORT).connect(20)
# get child pid
status = json.loads(str(urlopen("https://{0}:{1}/_status".format(LOCALHOST, STATUS_PORT)).read(), 'utf-8'))
# send sigterm to ghostunnel, wait for child to terminate
stopped = False
ghostunnel.terminate()
for n in range(0, 10):
try:
os.kill(status['child_pid'], 0)
print_ok("child is still alive")
except:
stopped = True
break
time.sleep(1)
if not stopped:
raise Exception('child never terminated')
print_ok("child terminated")
print_ok("OK")
finally:
terminate(ghostunnel)
|
<commit_before><commit_msg>Add test to check tunnel shuts down child<commit_after>
|
#!/usr/bin/env python3
from subprocess import Popen
from test_common import *
import socket, ssl, time, os, signal
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
# start ghostunnel server with false as child
ghostunnel = run_ghostunnel(['server', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--cacert=root.crt', '--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--allow-all', '--', 'nc', '-kl', LOCALHOST, '13002'])
urlopen = lambda path: urllib.request.urlopen(path, cafile='root.crt')
# block until ghostunnel is up
TcpClient(STATUS_PORT).connect(20)
# get child pid
status = json.loads(str(urlopen("https://{0}:{1}/_status".format(LOCALHOST, STATUS_PORT)).read(), 'utf-8'))
# send sigterm to ghostunnel, wait for child to terminate
stopped = False
ghostunnel.terminate()
for n in range(0, 10):
try:
os.kill(status['child_pid'], 0)
print_ok("child is still alive")
except:
stopped = True
break
time.sleep(1)
if not stopped:
raise Exception('child never terminated')
print_ok("child terminated")
print_ok("OK")
finally:
terminate(ghostunnel)
|
Add test to check tunnel shuts down child#!/usr/bin/env python3
from subprocess import Popen
from test_common import *
import socket, ssl, time, os, signal
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
# start ghostunnel server with false as child
ghostunnel = run_ghostunnel(['server', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--cacert=root.crt', '--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--allow-all', '--', 'nc', '-kl', LOCALHOST, '13002'])
urlopen = lambda path: urllib.request.urlopen(path, cafile='root.crt')
# block until ghostunnel is up
TcpClient(STATUS_PORT).connect(20)
# get child pid
status = json.loads(str(urlopen("https://{0}:{1}/_status".format(LOCALHOST, STATUS_PORT)).read(), 'utf-8'))
# send sigterm to ghostunnel, wait for child to terminate
stopped = False
ghostunnel.terminate()
for n in range(0, 10):
try:
os.kill(status['child_pid'], 0)
print_ok("child is still alive")
except:
stopped = True
break
time.sleep(1)
if not stopped:
raise Exception('child never terminated')
print_ok("child terminated")
print_ok("OK")
finally:
terminate(ghostunnel)
|
<commit_before><commit_msg>Add test to check tunnel shuts down child<commit_after>#!/usr/bin/env python3
from subprocess import Popen
from test_common import *
import socket, ssl, time, os, signal
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
# start ghostunnel server with false as child
ghostunnel = run_ghostunnel(['server', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--cacert=root.crt', '--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--allow-all', '--', 'nc', '-kl', LOCALHOST, '13002'])
urlopen = lambda path: urllib.request.urlopen(path, cafile='root.crt')
# block until ghostunnel is up
TcpClient(STATUS_PORT).connect(20)
# get child pid
status = json.loads(str(urlopen("https://{0}:{1}/_status".format(LOCALHOST, STATUS_PORT)).read(), 'utf-8'))
# send sigterm to ghostunnel, wait for child to terminate
stopped = False
ghostunnel.terminate()
for n in range(0, 10):
try:
os.kill(status['child_pid'], 0)
print_ok("child is still alive")
except:
stopped = True
break
time.sleep(1)
if not stopped:
raise Exception('child never terminated')
print_ok("child terminated")
print_ok("OK")
finally:
terminate(ghostunnel)
|
|
c55f4d6fd10463db590e2763f95b20d3b7004347
|
tests/test_exclusions.py
|
tests/test_exclusions.py
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_exclusion_date():
rule = Rule(
recurrence.DAILY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[rule],
exdates=[
datetime(2014, 1, 3, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
def test_exclusion_date_no_limits():
pattern = Recurrence(
rdates=[
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
],
exdates=[
datetime(2014, 1, 2, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
]
assert 1 == pattern.count()
def test_exclusion_rule():
inclusion_rule = Rule(
recurrence.DAILY
)
exclusion_rule = Rule(
recurrence.WEEKLY,
byday=recurrence.THURSDAY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[inclusion_rule],
exrules=[exclusion_rule]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
|
Check occurrences for exclusion dates and patterns
|
Check occurrences for exclusion dates and patterns
|
Python
|
bsd-3-clause
|
django-recurrence/django-recurrence,FrankSalad/django-recurrence,Nikola-K/django-recurrence,linux2400/django-recurrence,Nikola-K/django-recurrence,linux2400/django-recurrence,FrankSalad/django-recurrence,django-recurrence/django-recurrence
|
Check occurrences for exclusion dates and patterns
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_exclusion_date():
rule = Rule(
recurrence.DAILY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[rule],
exdates=[
datetime(2014, 1, 3, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
def test_exclusion_date_no_limits():
pattern = Recurrence(
rdates=[
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
],
exdates=[
datetime(2014, 1, 2, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
]
assert 1 == pattern.count()
def test_exclusion_rule():
inclusion_rule = Rule(
recurrence.DAILY
)
exclusion_rule = Rule(
recurrence.WEEKLY,
byday=recurrence.THURSDAY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[inclusion_rule],
exrules=[exclusion_rule]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
|
<commit_before><commit_msg>Check occurrences for exclusion dates and patterns<commit_after>
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_exclusion_date():
rule = Rule(
recurrence.DAILY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[rule],
exdates=[
datetime(2014, 1, 3, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
def test_exclusion_date_no_limits():
pattern = Recurrence(
rdates=[
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
],
exdates=[
datetime(2014, 1, 2, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
]
assert 1 == pattern.count()
def test_exclusion_rule():
inclusion_rule = Rule(
recurrence.DAILY
)
exclusion_rule = Rule(
recurrence.WEEKLY,
byday=recurrence.THURSDAY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[inclusion_rule],
exrules=[exclusion_rule]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
|
Check occurrences for exclusion dates and patternsfrom datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_exclusion_date():
rule = Rule(
recurrence.DAILY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[rule],
exdates=[
datetime(2014, 1, 3, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
def test_exclusion_date_no_limits():
pattern = Recurrence(
rdates=[
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
],
exdates=[
datetime(2014, 1, 2, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
]
assert 1 == pattern.count()
def test_exclusion_rule():
inclusion_rule = Rule(
recurrence.DAILY
)
exclusion_rule = Rule(
recurrence.WEEKLY,
byday=recurrence.THURSDAY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[inclusion_rule],
exrules=[exclusion_rule]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
|
<commit_before><commit_msg>Check occurrences for exclusion dates and patterns<commit_after>from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_exclusion_date():
rule = Rule(
recurrence.DAILY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[rule],
exdates=[
datetime(2014, 1, 3, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 2, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
def test_exclusion_date_no_limits():
pattern = Recurrence(
rdates=[
datetime(2014, 1, 1, 0, 0, 0),
datetime(2014, 1, 2, 0, 0, 0),
],
exdates=[
datetime(2014, 1, 2, 0, 0, 0)
]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 1, 0, 0, 0),
]
assert 1 == pattern.count()
def test_exclusion_rule():
inclusion_rule = Rule(
recurrence.DAILY
)
exclusion_rule = Rule(
recurrence.WEEKLY,
byday=recurrence.THURSDAY
)
pattern = Recurrence(
dtstart=datetime(2014, 1, 2, 0, 0, 0),
dtend=datetime(2014, 1, 4, 0, 0, 0),
rrules=[inclusion_rule],
exrules=[exclusion_rule]
)
occurrences = [
instance for instance in
pattern.occurrences()
]
assert occurrences == [
datetime(2014, 1, 3, 0, 0, 0),
datetime(2014, 1, 4, 0, 0, 0),
]
assert 2 == pattern.count()
|
|
7b791900202cb60746af54e63fc166dc0de3560f
|
tests/test_mixed_java.py
|
tests/test_mixed_java.py
|
import os
import angr
self_dir = os.path.dirname(os.path.realpath(__file__))
def test_loading_of_native_libs():
binary_dir = os.path.join(self_dir, "..", "..", "angr-doc", "examples", "java_mixed_ictf")
jar_path = os.path.join(binary_dir, "service.jar")
native_libs_path = os.path.join(binary_dir, "native_libs")
# define which libraries to load (+ the load path)
jni_options = {
'native_libs' : ['libnotfun.so'],
'native_libs_ld_path' : native_libs_path
}
# information about native libraries are passed as additional options
# of the main binary (e.g. the JAR/APK) to the project
proj = angr.Project(jar_path, main_opts=jni_options)
# check if native library libnotfun.so was loaded
loaded_libs_names = [lib.provides for lib in proj.loader.all_elf_objects]
assert 'libnotfun.so' in loaded_libs_names
def main():
test_loading_of_native_libs()
if __name__ == "__main__":
main()
|
Add test for loading native libraries of a Java archive.
|
Add test for loading native libraries of a Java archive.
|
Python
|
bsd-2-clause
|
iamahuman/angr,schieb/angr,iamahuman/angr,angr/angr,angr/angr,schieb/angr,iamahuman/angr,angr/angr,schieb/angr
|
Add test for loading native libraries of a Java archive.
|
import os
import angr
self_dir = os.path.dirname(os.path.realpath(__file__))
def test_loading_of_native_libs():
binary_dir = os.path.join(self_dir, "..", "..", "angr-doc", "examples", "java_mixed_ictf")
jar_path = os.path.join(binary_dir, "service.jar")
native_libs_path = os.path.join(binary_dir, "native_libs")
# define which libraries to load (+ the load path)
jni_options = {
'native_libs' : ['libnotfun.so'],
'native_libs_ld_path' : native_libs_path
}
# information about native libraries are passed as additional options
# of the main binary (e.g. the JAR/APK) to the project
proj = angr.Project(jar_path, main_opts=jni_options)
# check if native library libnotfun.so was loaded
loaded_libs_names = [lib.provides for lib in proj.loader.all_elf_objects]
assert 'libnotfun.so' in loaded_libs_names
def main():
test_loading_of_native_libs()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test for loading native libraries of a Java archive.<commit_after>
|
import os
import angr
self_dir = os.path.dirname(os.path.realpath(__file__))
def test_loading_of_native_libs():
binary_dir = os.path.join(self_dir, "..", "..", "angr-doc", "examples", "java_mixed_ictf")
jar_path = os.path.join(binary_dir, "service.jar")
native_libs_path = os.path.join(binary_dir, "native_libs")
# define which libraries to load (+ the load path)
jni_options = {
'native_libs' : ['libnotfun.so'],
'native_libs_ld_path' : native_libs_path
}
# information about native libraries are passed as additional options
# of the main binary (e.g. the JAR/APK) to the project
proj = angr.Project(jar_path, main_opts=jni_options)
# check if native library libnotfun.so was loaded
loaded_libs_names = [lib.provides for lib in proj.loader.all_elf_objects]
assert 'libnotfun.so' in loaded_libs_names
def main():
test_loading_of_native_libs()
if __name__ == "__main__":
main()
|
Add test for loading native libraries of a Java archive.import os
import angr
self_dir = os.path.dirname(os.path.realpath(__file__))
def test_loading_of_native_libs():
binary_dir = os.path.join(self_dir, "..", "..", "angr-doc", "examples", "java_mixed_ictf")
jar_path = os.path.join(binary_dir, "service.jar")
native_libs_path = os.path.join(binary_dir, "native_libs")
# define which libraries to load (+ the load path)
jni_options = {
'native_libs' : ['libnotfun.so'],
'native_libs_ld_path' : native_libs_path
}
# information about native libraries are passed as additional options
# of the main binary (e.g. the JAR/APK) to the project
proj = angr.Project(jar_path, main_opts=jni_options)
# check if native library libnotfun.so was loaded
loaded_libs_names = [lib.provides for lib in proj.loader.all_elf_objects]
assert 'libnotfun.so' in loaded_libs_names
def main():
test_loading_of_native_libs()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test for loading native libraries of a Java archive.<commit_after>import os
import angr
self_dir = os.path.dirname(os.path.realpath(__file__))
def test_loading_of_native_libs():
binary_dir = os.path.join(self_dir, "..", "..", "angr-doc", "examples", "java_mixed_ictf")
jar_path = os.path.join(binary_dir, "service.jar")
native_libs_path = os.path.join(binary_dir, "native_libs")
# define which libraries to load (+ the load path)
jni_options = {
'native_libs' : ['libnotfun.so'],
'native_libs_ld_path' : native_libs_path
}
# information about native libraries are passed as additional options
# of the main binary (e.g. the JAR/APK) to the project
proj = angr.Project(jar_path, main_opts=jni_options)
# check if native library libnotfun.so was loaded
loaded_libs_names = [lib.provides for lib in proj.loader.all_elf_objects]
assert 'libnotfun.so' in loaded_libs_names
def main():
test_loading_of_native_libs()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.