commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcad82d44e08b6f645a1ffc43c9ba22ec4ce8c30
|
src/redmill/api/json_.py
|
src/redmill/api/json_.py
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import flask
from .. import Album, Media
class JSONEncoder(json.JSONEncoder):
""" Encode database objects to JSON.
"""
def default(self, obj):
if isinstance(obj, (Album, Media)):
fields = {
Album: ["id", "name", "parent_id"],
Media: [
"id", "title", "author", "keywords", "filename", "album_id",
"location"
]
}
type_ = type(obj)
value = { field: getattr(obj, field) for field in fields[type_] }
value["type"] = type_.__name__
if isinstance(obj, Album):
children = [("album", x.id) for x in obj.children]
children += [("media", x.id) for x in obj.media]
value["children"] = [
flask.url_for("get_collection_item", table=table, id_=id_)
for table, id_ in children]
else:
value = obj
return value
|
Add JSON converter for database objects
|
Add JSON converter for database objects
|
Python
|
agpl-3.0
|
lamyj/redmill,lamyj/redmill,lamyj/redmill
|
Add JSON converter for database objects
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import flask
from .. import Album, Media
class JSONEncoder(json.JSONEncoder):
""" Encode database objects to JSON.
"""
def default(self, obj):
if isinstance(obj, (Album, Media)):
fields = {
Album: ["id", "name", "parent_id"],
Media: [
"id", "title", "author", "keywords", "filename", "album_id",
"location"
]
}
type_ = type(obj)
value = { field: getattr(obj, field) for field in fields[type_] }
value["type"] = type_.__name__
if isinstance(obj, Album):
children = [("album", x.id) for x in obj.children]
children += [("media", x.id) for x in obj.media]
value["children"] = [
flask.url_for("get_collection_item", table=table, id_=id_)
for table, id_ in children]
else:
value = obj
return value
|
<commit_before><commit_msg>Add JSON converter for database objects<commit_after>
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import flask
from .. import Album, Media
class JSONEncoder(json.JSONEncoder):
""" Encode database objects to JSON.
"""
def default(self, obj):
if isinstance(obj, (Album, Media)):
fields = {
Album: ["id", "name", "parent_id"],
Media: [
"id", "title", "author", "keywords", "filename", "album_id",
"location"
]
}
type_ = type(obj)
value = { field: getattr(obj, field) for field in fields[type_] }
value["type"] = type_.__name__
if isinstance(obj, Album):
children = [("album", x.id) for x in obj.children]
children += [("media", x.id) for x in obj.media]
value["children"] = [
flask.url_for("get_collection_item", table=table, id_=id_)
for table, id_ in children]
else:
value = obj
return value
|
Add JSON converter for database objects# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import flask
from .. import Album, Media
class JSONEncoder(json.JSONEncoder):
""" Encode database objects to JSON.
"""
def default(self, obj):
if isinstance(obj, (Album, Media)):
fields = {
Album: ["id", "name", "parent_id"],
Media: [
"id", "title", "author", "keywords", "filename", "album_id",
"location"
]
}
type_ = type(obj)
value = { field: getattr(obj, field) for field in fields[type_] }
value["type"] = type_.__name__
if isinstance(obj, Album):
children = [("album", x.id) for x in obj.children]
children += [("media", x.id) for x in obj.media]
value["children"] = [
flask.url_for("get_collection_item", table=table, id_=id_)
for table, id_ in children]
else:
value = obj
return value
|
<commit_before><commit_msg>Add JSON converter for database objects<commit_after># This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import flask
from .. import Album, Media
class JSONEncoder(json.JSONEncoder):
""" Encode database objects to JSON.
"""
def default(self, obj):
if isinstance(obj, (Album, Media)):
fields = {
Album: ["id", "name", "parent_id"],
Media: [
"id", "title", "author", "keywords", "filename", "album_id",
"location"
]
}
type_ = type(obj)
value = { field: getattr(obj, field) for field in fields[type_] }
value["type"] = type_.__name__
if isinstance(obj, Album):
children = [("album", x.id) for x in obj.children]
children += [("media", x.id) for x in obj.media]
value["children"] = [
flask.url_for("get_collection_item", table=table, id_=id_)
for table, id_ in children]
else:
value = obj
return value
|
|
d13927daaa19c8baff06469fd894154bda436593
|
code/loader/dblploader/translateJSON2Cypher.py
|
code/loader/dblploader/translateJSON2Cypher.py
|
import os
import sys
import json
cypher = "seeds.cql"
for file in os.listdir(os.getcwd()):
if file.find("docentes-") >= 0:
with open(cypher, "a", encoding="utf-8") as fout:
institution = file.split("-")[1].split(".")[0]
fout.write("CREATE(n:Institution {name: '%s'});\n" % institution)
with open(file, "r") as fin:
jsonData = json.load(fin)
for entry in jsonData:
if entry is None:
continue
fout.write("CREATE(a:Author {")
firstKey = True
for key in entry.keys():
if firstKey is True:
fout.write("%s: '%s'" % (key, entry.get(key)))
firstKey = False
else:
fout.write(", %s: '%s'" % (key, entry.get(key)))
fout.write("});\n")
fout.write("MATCH(i:Institution {name: '%s'}),(a:Author {name: '%s'}) MERGE (a)-[r:`ASSOCIATED TO`]->(i);\n" %(institution, entry.get('name')))
|
Convert json with primary author to cypher script format
|
Convert json with primary author to cypher script format
|
Python
|
mit
|
arcosta/sci-synergy,arcosta/sci-synergy,arcosta/sci-synergy
|
Convert json with primary author to cypher script format
|
import os
import sys
import json
cypher = "seeds.cql"
for file in os.listdir(os.getcwd()):
if file.find("docentes-") >= 0:
with open(cypher, "a", encoding="utf-8") as fout:
institution = file.split("-")[1].split(".")[0]
fout.write("CREATE(n:Institution {name: '%s'});\n" % institution)
with open(file, "r") as fin:
jsonData = json.load(fin)
for entry in jsonData:
if entry is None:
continue
fout.write("CREATE(a:Author {")
firstKey = True
for key in entry.keys():
if firstKey is True:
fout.write("%s: '%s'" % (key, entry.get(key)))
firstKey = False
else:
fout.write(", %s: '%s'" % (key, entry.get(key)))
fout.write("});\n")
fout.write("MATCH(i:Institution {name: '%s'}),(a:Author {name: '%s'}) MERGE (a)-[r:`ASSOCIATED TO`]->(i);\n" %(institution, entry.get('name')))
|
<commit_before><commit_msg>Convert json with primary author to cypher script format<commit_after>
|
import os
import sys
import json
cypher = "seeds.cql"
for file in os.listdir(os.getcwd()):
if file.find("docentes-") >= 0:
with open(cypher, "a", encoding="utf-8") as fout:
institution = file.split("-")[1].split(".")[0]
fout.write("CREATE(n:Institution {name: '%s'});\n" % institution)
with open(file, "r") as fin:
jsonData = json.load(fin)
for entry in jsonData:
if entry is None:
continue
fout.write("CREATE(a:Author {")
firstKey = True
for key in entry.keys():
if firstKey is True:
fout.write("%s: '%s'" % (key, entry.get(key)))
firstKey = False
else:
fout.write(", %s: '%s'" % (key, entry.get(key)))
fout.write("});\n")
fout.write("MATCH(i:Institution {name: '%s'}),(a:Author {name: '%s'}) MERGE (a)-[r:`ASSOCIATED TO`]->(i);\n" %(institution, entry.get('name')))
|
Convert json with primary author to cypher script formatimport os
import sys
import json
cypher = "seeds.cql"
for file in os.listdir(os.getcwd()):
if file.find("docentes-") >= 0:
with open(cypher, "a", encoding="utf-8") as fout:
institution = file.split("-")[1].split(".")[0]
fout.write("CREATE(n:Institution {name: '%s'});\n" % institution)
with open(file, "r") as fin:
jsonData = json.load(fin)
for entry in jsonData:
if entry is None:
continue
fout.write("CREATE(a:Author {")
firstKey = True
for key in entry.keys():
if firstKey is True:
fout.write("%s: '%s'" % (key, entry.get(key)))
firstKey = False
else:
fout.write(", %s: '%s'" % (key, entry.get(key)))
fout.write("});\n")
fout.write("MATCH(i:Institution {name: '%s'}),(a:Author {name: '%s'}) MERGE (a)-[r:`ASSOCIATED TO`]->(i);\n" %(institution, entry.get('name')))
|
<commit_before><commit_msg>Convert json with primary author to cypher script format<commit_after>import os
import sys
import json
cypher = "seeds.cql"
for file in os.listdir(os.getcwd()):
if file.find("docentes-") >= 0:
with open(cypher, "a", encoding="utf-8") as fout:
institution = file.split("-")[1].split(".")[0]
fout.write("CREATE(n:Institution {name: '%s'});\n" % institution)
with open(file, "r") as fin:
jsonData = json.load(fin)
for entry in jsonData:
if entry is None:
continue
fout.write("CREATE(a:Author {")
firstKey = True
for key in entry.keys():
if firstKey is True:
fout.write("%s: '%s'" % (key, entry.get(key)))
firstKey = False
else:
fout.write(", %s: '%s'" % (key, entry.get(key)))
fout.write("});\n")
fout.write("MATCH(i:Institution {name: '%s'}),(a:Author {name: '%s'}) MERGE (a)-[r:`ASSOCIATED TO`]->(i);\n" %(institution, entry.get('name')))
|
|
35e05afed6d65a2736069ed59a6732eb6b39aa2b
|
spc/expressions.py
|
spc/expressions.py
|
"""
The expression language is fully parsed out before passing it to a backend. This
prevents the backend from having to keep excess context in order to understand
the expression. For example, take the following expression:
(+ a (* b (func 1 2 3)))
Compare the nested form:
Add(a, Multiply(b, Call(func, [1, 2, 3])))
With the serial form:
Enter_Add()
Param(a)
Enter_Multiply()
Param(b)
Enter_Call(func)
Param(1)
Param(2)
Param(3)
End_Call(func)
End_Multiply()
End_Add()
"""
from collections import namedtuple
Call = namedtuple('Call', ['func', 'params'])
Variable = namedtuple('Variable', ['name'])
Integer = namedtuple('Integer', ['integer'])
Reference = namedtuple('Reference', ['expr'])
Dereference = namedtuple('Dereference', ['expr'])
PointerToInt = namedtuple('PointerToInt', ['expr'])
IntToPointer = namedtuple('IntToPointer', ['expr'])
Cast = namedtuple('Cast', ['type', 'expr'])
Func = namedtuple('Func', ['name'])
ArrayGet = namedtuple('ArrayGet', ['array', 'index'])
StructGet = namedtuple('StructGet', ['struct', 'fields'])
ARITH_PLUS, ARITH_MINUS, ARITH_TIMES, ARITH_DIVIDE, ARITH_MOD = range(5)
Arithmetic = namedtuple('Artithmetic', ['kind', 'lhs', 'rhs'])
CMP_LESS, CMP_GREATER, CMP_LESSEQ, CMP_GREATEQ, CMP_EQ, CMP_NOTEQ = range(6)
Compare = namedtuple('Compare', ['kind', 'lhs', 'rhs'])
SizeOf = namedtuple('SizeOf', ['type'])
|
Add types for describing expression trees
|
Add types for describing expression trees
|
Python
|
mit
|
adamnew123456/spc,adamnew123456/spc
|
Add types for describing expression trees
|
"""
The expression language is fully parsed out before passing it to a backend. This
prevents the backend from having to keep excess context in order to understand
the expression. For example, take the following expression:
(+ a (* b (func 1 2 3)))
Compare the nested form:
Add(a, Multiply(b, Call(func, [1, 2, 3])))
With the serial form:
Enter_Add()
Param(a)
Enter_Multiply()
Param(b)
Enter_Call(func)
Param(1)
Param(2)
Param(3)
End_Call(func)
End_Multiply()
End_Add()
"""
from collections import namedtuple
Call = namedtuple('Call', ['func', 'params'])
Variable = namedtuple('Variable', ['name'])
Integer = namedtuple('Integer', ['integer'])
Reference = namedtuple('Reference', ['expr'])
Dereference = namedtuple('Dereference', ['expr'])
PointerToInt = namedtuple('PointerToInt', ['expr'])
IntToPointer = namedtuple('IntToPointer', ['expr'])
Cast = namedtuple('Cast', ['type', 'expr'])
Func = namedtuple('Func', ['name'])
ArrayGet = namedtuple('ArrayGet', ['array', 'index'])
StructGet = namedtuple('StructGet', ['struct', 'fields'])
ARITH_PLUS, ARITH_MINUS, ARITH_TIMES, ARITH_DIVIDE, ARITH_MOD = range(5)
Arithmetic = namedtuple('Artithmetic', ['kind', 'lhs', 'rhs'])
CMP_LESS, CMP_GREATER, CMP_LESSEQ, CMP_GREATEQ, CMP_EQ, CMP_NOTEQ = range(6)
Compare = namedtuple('Compare', ['kind', 'lhs', 'rhs'])
SizeOf = namedtuple('SizeOf', ['type'])
|
<commit_before><commit_msg>Add types for describing expression trees<commit_after>
|
"""
The expression language is fully parsed out before passing it to a backend. This
prevents the backend from having to keep excess context in order to understand
the expression. For example, take the following expression:
(+ a (* b (func 1 2 3)))
Compare the nested form:
Add(a, Multiply(b, Call(func, [1, 2, 3])))
With the serial form:
Enter_Add()
Param(a)
Enter_Multiply()
Param(b)
Enter_Call(func)
Param(1)
Param(2)
Param(3)
End_Call(func)
End_Multiply()
End_Add()
"""
from collections import namedtuple
Call = namedtuple('Call', ['func', 'params'])
Variable = namedtuple('Variable', ['name'])
Integer = namedtuple('Integer', ['integer'])
Reference = namedtuple('Reference', ['expr'])
Dereference = namedtuple('Dereference', ['expr'])
PointerToInt = namedtuple('PointerToInt', ['expr'])
IntToPointer = namedtuple('IntToPointer', ['expr'])
Cast = namedtuple('Cast', ['type', 'expr'])
Func = namedtuple('Func', ['name'])
ArrayGet = namedtuple('ArrayGet', ['array', 'index'])
StructGet = namedtuple('StructGet', ['struct', 'fields'])
ARITH_PLUS, ARITH_MINUS, ARITH_TIMES, ARITH_DIVIDE, ARITH_MOD = range(5)
Arithmetic = namedtuple('Artithmetic', ['kind', 'lhs', 'rhs'])
CMP_LESS, CMP_GREATER, CMP_LESSEQ, CMP_GREATEQ, CMP_EQ, CMP_NOTEQ = range(6)
Compare = namedtuple('Compare', ['kind', 'lhs', 'rhs'])
SizeOf = namedtuple('SizeOf', ['type'])
|
Add types for describing expression trees"""
The expression language is fully parsed out before passing it to a backend. This
prevents the backend from having to keep excess context in order to understand
the expression. For example, take the following expression:
(+ a (* b (func 1 2 3)))
Compare the nested form:
Add(a, Multiply(b, Call(func, [1, 2, 3])))
With the serial form:
Enter_Add()
Param(a)
Enter_Multiply()
Param(b)
Enter_Call(func)
Param(1)
Param(2)
Param(3)
End_Call(func)
End_Multiply()
End_Add()
"""
from collections import namedtuple
Call = namedtuple('Call', ['func', 'params'])
Variable = namedtuple('Variable', ['name'])
Integer = namedtuple('Integer', ['integer'])
Reference = namedtuple('Reference', ['expr'])
Dereference = namedtuple('Dereference', ['expr'])
PointerToInt = namedtuple('PointerToInt', ['expr'])
IntToPointer = namedtuple('IntToPointer', ['expr'])
Cast = namedtuple('Cast', ['type', 'expr'])
Func = namedtuple('Func', ['name'])
ArrayGet = namedtuple('ArrayGet', ['array', 'index'])
StructGet = namedtuple('StructGet', ['struct', 'fields'])
ARITH_PLUS, ARITH_MINUS, ARITH_TIMES, ARITH_DIVIDE, ARITH_MOD = range(5)
Arithmetic = namedtuple('Artithmetic', ['kind', 'lhs', 'rhs'])
CMP_LESS, CMP_GREATER, CMP_LESSEQ, CMP_GREATEQ, CMP_EQ, CMP_NOTEQ = range(6)
Compare = namedtuple('Compare', ['kind', 'lhs', 'rhs'])
SizeOf = namedtuple('SizeOf', ['type'])
|
<commit_before><commit_msg>Add types for describing expression trees<commit_after>"""
The expression language is fully parsed out before passing it to a backend. This
prevents the backend from having to keep excess context in order to understand
the expression. For example, take the following expression:
(+ a (* b (func 1 2 3)))
Compare the nested form:
Add(a, Multiply(b, Call(func, [1, 2, 3])))
With the serial form:
Enter_Add()
Param(a)
Enter_Multiply()
Param(b)
Enter_Call(func)
Param(1)
Param(2)
Param(3)
End_Call(func)
End_Multiply()
End_Add()
"""
from collections import namedtuple
Call = namedtuple('Call', ['func', 'params'])
Variable = namedtuple('Variable', ['name'])
Integer = namedtuple('Integer', ['integer'])
Reference = namedtuple('Reference', ['expr'])
Dereference = namedtuple('Dereference', ['expr'])
PointerToInt = namedtuple('PointerToInt', ['expr'])
IntToPointer = namedtuple('IntToPointer', ['expr'])
Cast = namedtuple('Cast', ['type', 'expr'])
Func = namedtuple('Func', ['name'])
ArrayGet = namedtuple('ArrayGet', ['array', 'index'])
StructGet = namedtuple('StructGet', ['struct', 'fields'])
ARITH_PLUS, ARITH_MINUS, ARITH_TIMES, ARITH_DIVIDE, ARITH_MOD = range(5)
Arithmetic = namedtuple('Artithmetic', ['kind', 'lhs', 'rhs'])
CMP_LESS, CMP_GREATER, CMP_LESSEQ, CMP_GREATEQ, CMP_EQ, CMP_NOTEQ = range(6)
Compare = namedtuple('Compare', ['kind', 'lhs', 'rhs'])
SizeOf = namedtuple('SizeOf', ['type'])
|
|
9ffed9ce7dad6640246510db579c744037d40f88
|
motion_tracker/model/eval_model.py
|
motion_tracker/model/eval_model.py
|
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from motion_tracker.model.model import make_model
from motion_tracker.utils.image_generator import AlovGenerator
if __name__ == "__main__":
img_edge_size = 224
backend_id = 'th'
error_analysis_dir = './work/error_analysis/'
os.makedirs(error_analysis_dir, exist_ok=True)
weights_fname = './work/model_weights.h5'
my_gen = AlovGenerator(output_width = img_edge_size,
output_height = img_edge_size,
crops_per_image=4,
batch_size = 500,
desired_dim_ordering = backend_id).flow()
my_model = make_model(img_edge_size, backend_id)
my_model.load_weights(weights_fname)
X, y = next(my_gen)
preds = my_model.predict(X)
pred_df = pd.DataFrame({'pred_x0': preds[0].ravel(),
'pred_y0': preds[1].ravel(),
'pred_x1': preds[2].ravel(),
'pred_y1': preds[3].ravel()})
actual_df = pd.DataFrame(y)
pred_df = pred_df.join(actual_df)
for coord in ('x0', 'y0', 'x1', 'y1'):
this_coord_pred = pred_df['pred_' + coord]
this_coord_actual = pred_df[coord]
pred_df[coord+'_error'] = (this_coord_actual - this_coord_pred).abs()
my_plot = sns.jointplot(this_coord_pred, this_coord_actual, kind="kde")
my_plot.savefig(error_analysis_dir + coord + '.png')
pred_df['mean_coordinate_error'] = pred_df.eval('(x0_error + y0_error + x1_error + y1_error) / 4')
error_plot = sns.distplot(pred_df.mean_coordinate_error)
plt.savefig(error_analysis_dir + 'mean_coordinate_error_dist.png')
plt.close()
pred_df.to_csv(error_analysis_dir + 'prediction_error_info.csv', index=False)
|
Add analysis and plotting of prediction errors
|
Add analysis and plotting of prediction errors
|
Python
|
mit
|
dansbecker/motion-tracking
|
Add analysis and plotting of prediction errors
|
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from motion_tracker.model.model import make_model
from motion_tracker.utils.image_generator import AlovGenerator
if __name__ == "__main__":
img_edge_size = 224
backend_id = 'th'
error_analysis_dir = './work/error_analysis/'
os.makedirs(error_analysis_dir, exist_ok=True)
weights_fname = './work/model_weights.h5'
my_gen = AlovGenerator(output_width = img_edge_size,
output_height = img_edge_size,
crops_per_image=4,
batch_size = 500,
desired_dim_ordering = backend_id).flow()
my_model = make_model(img_edge_size, backend_id)
my_model.load_weights(weights_fname)
X, y = next(my_gen)
preds = my_model.predict(X)
pred_df = pd.DataFrame({'pred_x0': preds[0].ravel(),
'pred_y0': preds[1].ravel(),
'pred_x1': preds[2].ravel(),
'pred_y1': preds[3].ravel()})
actual_df = pd.DataFrame(y)
pred_df = pred_df.join(actual_df)
for coord in ('x0', 'y0', 'x1', 'y1'):
this_coord_pred = pred_df['pred_' + coord]
this_coord_actual = pred_df[coord]
pred_df[coord+'_error'] = (this_coord_actual - this_coord_pred).abs()
my_plot = sns.jointplot(this_coord_pred, this_coord_actual, kind="kde")
my_plot.savefig(error_analysis_dir + coord + '.png')
pred_df['mean_coordinate_error'] = pred_df.eval('(x0_error + y0_error + x1_error + y1_error) / 4')
error_plot = sns.distplot(pred_df.mean_coordinate_error)
plt.savefig(error_analysis_dir + 'mean_coordinate_error_dist.png')
plt.close()
pred_df.to_csv(error_analysis_dir + 'prediction_error_info.csv', index=False)
|
<commit_before><commit_msg>Add analysis and plotting of prediction errors<commit_after>
|
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from motion_tracker.model.model import make_model
from motion_tracker.utils.image_generator import AlovGenerator
if __name__ == "__main__":
img_edge_size = 224
backend_id = 'th'
error_analysis_dir = './work/error_analysis/'
os.makedirs(error_analysis_dir, exist_ok=True)
weights_fname = './work/model_weights.h5'
my_gen = AlovGenerator(output_width = img_edge_size,
output_height = img_edge_size,
crops_per_image=4,
batch_size = 500,
desired_dim_ordering = backend_id).flow()
my_model = make_model(img_edge_size, backend_id)
my_model.load_weights(weights_fname)
X, y = next(my_gen)
preds = my_model.predict(X)
pred_df = pd.DataFrame({'pred_x0': preds[0].ravel(),
'pred_y0': preds[1].ravel(),
'pred_x1': preds[2].ravel(),
'pred_y1': preds[3].ravel()})
actual_df = pd.DataFrame(y)
pred_df = pred_df.join(actual_df)
for coord in ('x0', 'y0', 'x1', 'y1'):
this_coord_pred = pred_df['pred_' + coord]
this_coord_actual = pred_df[coord]
pred_df[coord+'_error'] = (this_coord_actual - this_coord_pred).abs()
my_plot = sns.jointplot(this_coord_pred, this_coord_actual, kind="kde")
my_plot.savefig(error_analysis_dir + coord + '.png')
pred_df['mean_coordinate_error'] = pred_df.eval('(x0_error + y0_error + x1_error + y1_error) / 4')
error_plot = sns.distplot(pred_df.mean_coordinate_error)
plt.savefig(error_analysis_dir + 'mean_coordinate_error_dist.png')
plt.close()
pred_df.to_csv(error_analysis_dir + 'prediction_error_info.csv', index=False)
|
Add analysis and plotting of prediction errorsimport os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from motion_tracker.model.model import make_model
from motion_tracker.utils.image_generator import AlovGenerator
if __name__ == "__main__":
img_edge_size = 224
backend_id = 'th'
error_analysis_dir = './work/error_analysis/'
os.makedirs(error_analysis_dir, exist_ok=True)
weights_fname = './work/model_weights.h5'
my_gen = AlovGenerator(output_width = img_edge_size,
output_height = img_edge_size,
crops_per_image=4,
batch_size = 500,
desired_dim_ordering = backend_id).flow()
my_model = make_model(img_edge_size, backend_id)
my_model.load_weights(weights_fname)
X, y = next(my_gen)
preds = my_model.predict(X)
pred_df = pd.DataFrame({'pred_x0': preds[0].ravel(),
'pred_y0': preds[1].ravel(),
'pred_x1': preds[2].ravel(),
'pred_y1': preds[3].ravel()})
actual_df = pd.DataFrame(y)
pred_df = pred_df.join(actual_df)
for coord in ('x0', 'y0', 'x1', 'y1'):
this_coord_pred = pred_df['pred_' + coord]
this_coord_actual = pred_df[coord]
pred_df[coord+'_error'] = (this_coord_actual - this_coord_pred).abs()
my_plot = sns.jointplot(this_coord_pred, this_coord_actual, kind="kde")
my_plot.savefig(error_analysis_dir + coord + '.png')
pred_df['mean_coordinate_error'] = pred_df.eval('(x0_error + y0_error + x1_error + y1_error) / 4')
error_plot = sns.distplot(pred_df.mean_coordinate_error)
plt.savefig(error_analysis_dir + 'mean_coordinate_error_dist.png')
plt.close()
pred_df.to_csv(error_analysis_dir + 'prediction_error_info.csv', index=False)
|
<commit_before><commit_msg>Add analysis and plotting of prediction errors<commit_after>import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from motion_tracker.model.model import make_model
from motion_tracker.utils.image_generator import AlovGenerator
if __name__ == "__main__":
img_edge_size = 224
backend_id = 'th'
error_analysis_dir = './work/error_analysis/'
os.makedirs(error_analysis_dir, exist_ok=True)
weights_fname = './work/model_weights.h5'
my_gen = AlovGenerator(output_width = img_edge_size,
output_height = img_edge_size,
crops_per_image=4,
batch_size = 500,
desired_dim_ordering = backend_id).flow()
my_model = make_model(img_edge_size, backend_id)
my_model.load_weights(weights_fname)
X, y = next(my_gen)
preds = my_model.predict(X)
pred_df = pd.DataFrame({'pred_x0': preds[0].ravel(),
'pred_y0': preds[1].ravel(),
'pred_x1': preds[2].ravel(),
'pred_y1': preds[3].ravel()})
actual_df = pd.DataFrame(y)
pred_df = pred_df.join(actual_df)
for coord in ('x0', 'y0', 'x1', 'y1'):
this_coord_pred = pred_df['pred_' + coord]
this_coord_actual = pred_df[coord]
pred_df[coord+'_error'] = (this_coord_actual - this_coord_pred).abs()
my_plot = sns.jointplot(this_coord_pred, this_coord_actual, kind="kde")
my_plot.savefig(error_analysis_dir + coord + '.png')
pred_df['mean_coordinate_error'] = pred_df.eval('(x0_error + y0_error + x1_error + y1_error) / 4')
error_plot = sns.distplot(pred_df.mean_coordinate_error)
plt.savefig(error_analysis_dir + 'mean_coordinate_error_dist.png')
plt.close()
pred_df.to_csv(error_analysis_dir + 'prediction_error_info.csv', index=False)
|
|
82dece03d2bb5f691873572b2191733008370924
|
bluesky/broker_callbacks.py
|
bluesky/broker_callbacks.py
|
from dataportal import DataBroker as db
from metadatastore.commands import find_event_descriptors, find_run_starts
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(stop_doc):
uid = stop_doc['run_start']
start, = find_run_starts(uid=uid)
descriptors = find_event_descriptors(run_start=uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = db.fetch_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
Add function that run any callback using broker data instead of strema.
|
ENH: Add function that run any callback using broker data instead of strema.
|
Python
|
bsd-3-clause
|
klauer/bluesky,ericdill/bluesky,klauer/bluesky,sameera2004/bluesky,ericdill/bluesky,dchabot/bluesky,dchabot/bluesky,sameera2004/bluesky
|
ENH: Add function that run any callback using broker data instead of strema.
|
from dataportal import DataBroker as db
from metadatastore.commands import find_event_descriptors, find_run_starts
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(stop_doc):
uid = stop_doc['run_start']
start, = find_run_starts(uid=uid)
descriptors = find_event_descriptors(run_start=uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = db.fetch_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
<commit_before><commit_msg>ENH: Add function that run any callback using broker data instead of strema.<commit_after>
|
from dataportal import DataBroker as db
from metadatastore.commands import find_event_descriptors, find_run_starts
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(stop_doc):
uid = stop_doc['run_start']
start, = find_run_starts(uid=uid)
descriptors = find_event_descriptors(run_start=uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = db.fetch_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
ENH: Add function that run any callback using broker data instead of strema.from dataportal import DataBroker as db
from metadatastore.commands import find_event_descriptors, find_run_starts
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(stop_doc):
uid = stop_doc['run_start']
start, = find_run_starts(uid=uid)
descriptors = find_event_descriptors(run_start=uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = db.fetch_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
<commit_before><commit_msg>ENH: Add function that run any callback using broker data instead of strema.<commit_after>from dataportal import DataBroker as db
from metadatastore.commands import find_event_descriptors, find_run_starts
def post_run(callback):
"""
Trigger a callback to process all the Documents from a run at the end.
This function does not receive the Document stream during collection.
It retrieves the complete set of Documents from the DataBroker after
collection is complete.
Parameters
----------
callback : callable
a function that accepts all four Documents
Returns
-------
func : function
a function that acepts a RunStop Document
Examples
--------
Print a table with full (lossless) result set at the end of a run.
>>> s = Ascan(motor, [det1], [1,2,3])
>>> table = LiveTable(['det1', 'motor'])
>>> RE(s, {'stop': post_run(table)})
+------------+-------------------+----------------+----------------+
| seq_num | time | det1 | motor |
+------------+-------------------+----------------+----------------+
| 3 | 14:02:32.218348 | 5.00 | 3.00 |
| 2 | 14:02:32.158503 | 5.00 | 2.00 |
| 1 | 14:02:32.099807 | 5.00 | 1.00 |
+------------+-------------------+----------------+----------------+
"""
def f(stop_doc):
uid = stop_doc['run_start']
start, = find_run_starts(uid=uid)
descriptors = find_event_descriptors(run_start=uid)
# For convenience, I'll rely on the broker to get Events.
header = db[uid]
events = db.fetch_events(header)
callback.start(start)
for d in descriptors:
callback.descriptor(d)
for e in events:
callback.event(e)
callback.stop(stop_doc)
return f
|
|
57b1a3b2d7871f6f2f81f8300566778d6b28a85c
|
games/management/commands/checkbanners.py
|
games/management/commands/checkbanners.py
|
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from games.models import Game
class Command(BaseCommand):
def handle(self, *args, **kwargs):
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if not os.path.exists(icon_path):
print "%s is missing icon" % game
banner_path = os.path.join(settings.MEDIA_ROOT,
game.title_logo.name)
if not os.path.exists(banner_path):
print "%s is missing icon" % game
|
Add script to check for deleted icon and banners
|
Add script to check for deleted icon and banners
|
Python
|
agpl-3.0
|
Turupawn/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website,Turupawn/website,lutris/website,lutris/website
|
Add script to check for deleted icon and banners
|
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from games.models import Game
class Command(BaseCommand):
def handle(self, *args, **kwargs):
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if not os.path.exists(icon_path):
print "%s is missing icon" % game
banner_path = os.path.join(settings.MEDIA_ROOT,
game.title_logo.name)
if not os.path.exists(banner_path):
print "%s is missing icon" % game
|
<commit_before><commit_msg>Add script to check for deleted icon and banners<commit_after>
|
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from games.models import Game
class Command(BaseCommand):
def handle(self, *args, **kwargs):
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if not os.path.exists(icon_path):
print "%s is missing icon" % game
banner_path = os.path.join(settings.MEDIA_ROOT,
game.title_logo.name)
if not os.path.exists(banner_path):
print "%s is missing icon" % game
|
Add script to check for deleted icon and bannersimport os
from django.core.management.base import BaseCommand
from django.conf import settings
from games.models import Game
class Command(BaseCommand):
def handle(self, *args, **kwargs):
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if not os.path.exists(icon_path):
print "%s is missing icon" % game
banner_path = os.path.join(settings.MEDIA_ROOT,
game.title_logo.name)
if not os.path.exists(banner_path):
print "%s is missing icon" % game
|
<commit_before><commit_msg>Add script to check for deleted icon and banners<commit_after>import os
from django.core.management.base import BaseCommand
from django.conf import settings
from games.models import Game
class Command(BaseCommand):
def handle(self, *args, **kwargs):
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if not os.path.exists(icon_path):
print "%s is missing icon" % game
banner_path = os.path.join(settings.MEDIA_ROOT,
game.title_logo.name)
if not os.path.exists(banner_path):
print "%s is missing icon" % game
|
|
f267fd43d405187e526c5c35f2210bc94ea6a334
|
vpr/tests/get_material_ids.py
|
vpr/tests/get_material_ids.py
|
from django.db import connection
from vpr_storage.views import requestMaterialPDF
from vpr_content.models import Material
def getAllMaterialIDs():
""" """
cursor = connection.cursor()
cursor.execute('select material_id from vpr_content_material')
all_ids = [item[0] for item in cursor.fetchall()]
# get exported materials
cursor.execute('select material_id from vpr_content_materialexport')
export_ids = [item[0] for item in cursor.fetchall()]
for mid in export_ids:
all_ids.remove(mid)
return all_ids
def requestAllPDFs():
mids = getAllMaterialIDs()
count = 1
for mid in mids:
try:
print '[%d/%d] Exporting material %s...' % (count, len(mids), mid)
material = Material.objects.get(material_id=mid)
requestMaterialPDF(material)
count += 1
except:
print 'Failed by unknown error... sorry.'
print 'All is done. Congrats!'
|
Add script to request creating all material PDFs
|
Add script to request creating all material PDFs
|
Python
|
agpl-3.0
|
voer-platform/vp.repo,voer-platform/vp.repo,voer-platform/vp.repo,voer-platform/vp.repo
|
Add script to request creating all material PDFs
|
from django.db import connection
from vpr_storage.views import requestMaterialPDF
from vpr_content.models import Material
def getAllMaterialIDs():
""" """
cursor = connection.cursor()
cursor.execute('select material_id from vpr_content_material')
all_ids = [item[0] for item in cursor.fetchall()]
# get exported materials
cursor.execute('select material_id from vpr_content_materialexport')
export_ids = [item[0] for item in cursor.fetchall()]
for mid in export_ids:
all_ids.remove(mid)
return all_ids
def requestAllPDFs():
mids = getAllMaterialIDs()
count = 1
for mid in mids:
try:
print '[%d/%d] Exporting material %s...' % (count, len(mids), mid)
material = Material.objects.get(material_id=mid)
requestMaterialPDF(material)
count += 1
except:
print 'Failed by unknown error... sorry.'
print 'All is done. Congrats!'
|
<commit_before><commit_msg>Add script to request creating all material PDFs<commit_after>
|
from django.db import connection
from vpr_storage.views import requestMaterialPDF
from vpr_content.models import Material
def getAllMaterialIDs():
""" """
cursor = connection.cursor()
cursor.execute('select material_id from vpr_content_material')
all_ids = [item[0] for item in cursor.fetchall()]
# get exported materials
cursor.execute('select material_id from vpr_content_materialexport')
export_ids = [item[0] for item in cursor.fetchall()]
for mid in export_ids:
all_ids.remove(mid)
return all_ids
def requestAllPDFs():
mids = getAllMaterialIDs()
count = 1
for mid in mids:
try:
print '[%d/%d] Exporting material %s...' % (count, len(mids), mid)
material = Material.objects.get(material_id=mid)
requestMaterialPDF(material)
count += 1
except:
print 'Failed by unknown error... sorry.'
print 'All is done. Congrats!'
|
Add script to request creating all material PDFsfrom django.db import connection
from vpr_storage.views import requestMaterialPDF
from vpr_content.models import Material
def getAllMaterialIDs():
""" """
cursor = connection.cursor()
cursor.execute('select material_id from vpr_content_material')
all_ids = [item[0] for item in cursor.fetchall()]
# get exported materials
cursor.execute('select material_id from vpr_content_materialexport')
export_ids = [item[0] for item in cursor.fetchall()]
for mid in export_ids:
all_ids.remove(mid)
return all_ids
def requestAllPDFs():
mids = getAllMaterialIDs()
count = 1
for mid in mids:
try:
print '[%d/%d] Exporting material %s...' % (count, len(mids), mid)
material = Material.objects.get(material_id=mid)
requestMaterialPDF(material)
count += 1
except:
print 'Failed by unknown error... sorry.'
print 'All is done. Congrats!'
|
<commit_before><commit_msg>Add script to request creating all material PDFs<commit_after>from django.db import connection
from vpr_storage.views import requestMaterialPDF
from vpr_content.models import Material
def getAllMaterialIDs():
""" """
cursor = connection.cursor()
cursor.execute('select material_id from vpr_content_material')
all_ids = [item[0] for item in cursor.fetchall()]
# get exported materials
cursor.execute('select material_id from vpr_content_materialexport')
export_ids = [item[0] for item in cursor.fetchall()]
for mid in export_ids:
all_ids.remove(mid)
return all_ids
def requestAllPDFs():
mids = getAllMaterialIDs()
count = 1
for mid in mids:
try:
print '[%d/%d] Exporting material %s...' % (count, len(mids), mid)
material = Material.objects.get(material_id=mid)
requestMaterialPDF(material)
count += 1
except:
print 'Failed by unknown error... sorry.'
print 'All is done. Congrats!'
|
|
ba1523b7f10000d2c15fca08ef8a0211329af407
|
fabfile.py
|
fabfile.py
|
#!/usr/bin/env python
# encoding: utf-8
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.colors import red, green, yellow, blue
env.use_ssh_config = True
env.hosts = ['xdocker']
www_user = 'sysadmin'
www_group = 'sysadmin'
git_repo = 'git@github.com:XDocker/Engine.git'
project_folder = '/home/sysadmin/projects/xdocker'
def deploy():
local('git push')
with cd(project_folder):
run('git pull origin')
run('venv/bin/pip install -r requirements.txt')
restart_server()
def restart_server():
run(os.path.join(project_folder, 'restart.sh'))
|
Add fabric file for deployment
|
Add fabric file for deployment
|
Python
|
apache-2.0
|
XDocker/Engine,XDocker/Engine
|
Add fabric file for deployment
|
#!/usr/bin/env python
# encoding: utf-8
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.colors import red, green, yellow, blue
env.use_ssh_config = True
env.hosts = ['xdocker']
www_user = 'sysadmin'
www_group = 'sysadmin'
git_repo = 'git@github.com:XDocker/Engine.git'
project_folder = '/home/sysadmin/projects/xdocker'
def deploy():
local('git push')
with cd(project_folder):
run('git pull origin')
run('venv/bin/pip install -r requirements.txt')
restart_server()
def restart_server():
run(os.path.join(project_folder, 'restart.sh'))
|
<commit_before><commit_msg>Add fabric file for deployment<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.colors import red, green, yellow, blue
env.use_ssh_config = True
env.hosts = ['xdocker']
www_user = 'sysadmin'
www_group = 'sysadmin'
git_repo = 'git@github.com:XDocker/Engine.git'
project_folder = '/home/sysadmin/projects/xdocker'
def deploy():
local('git push')
with cd(project_folder):
run('git pull origin')
run('venv/bin/pip install -r requirements.txt')
restart_server()
def restart_server():
run(os.path.join(project_folder, 'restart.sh'))
|
Add fabric file for deployment#!/usr/bin/env python
# encoding: utf-8
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.colors import red, green, yellow, blue
env.use_ssh_config = True
env.hosts = ['xdocker']
www_user = 'sysadmin'
www_group = 'sysadmin'
git_repo = 'git@github.com:XDocker/Engine.git'
project_folder = '/home/sysadmin/projects/xdocker'
def deploy():
local('git push')
with cd(project_folder):
run('git pull origin')
run('venv/bin/pip install -r requirements.txt')
restart_server()
def restart_server():
run(os.path.join(project_folder, 'restart.sh'))
|
<commit_before><commit_msg>Add fabric file for deployment<commit_after>#!/usr/bin/env python
# encoding: utf-8
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.colors import red, green, yellow, blue
env.use_ssh_config = True
env.hosts = ['xdocker']
www_user = 'sysadmin'
www_group = 'sysadmin'
git_repo = 'git@github.com:XDocker/Engine.git'
project_folder = '/home/sysadmin/projects/xdocker'
def deploy():
local('git push')
with cd(project_folder):
run('git pull origin')
run('venv/bin/pip install -r requirements.txt')
restart_server()
def restart_server():
run(os.path.join(project_folder, 'restart.sh'))
|
|
7b629e134ddd8de512a685e8614af08194ce3d4f
|
CodeFights/isCoolTeam.py
|
CodeFights/isCoolTeam.py
|
#!/usr/local/bin/python
# Code Fights Is Cool Team Problem
class Team(object):
def __init__(self, names):
self.names = names
# TO DO
def isCoolTeam(team):
return bool(Team(team))
def main():
tests = [
[["Mark", "Kelly", "Kurt", "Terk"], True],
[["Lucy"], True],
[["Rob", "Bobby", "Billy"], False],
[["Sophie", "Boris", "EriC", "Charlotte"], True],
[["Sophie", "Boris", "Eric", "Charlotte", "Charlie"], False],
[["Sophie", "Edward", "Deb", "Boris", "Stephanie", "Eric", "Charlotte",
"Eric", "Charlie"], True],
[["Bobo", "obob", "Bobo", "ob"], True],
[["Edward", "Daniel", "Lily"], True],
[["ANTONY", "James"], False],
[["Ned", "Ben"], True]
]
for t in tests:
res = isCoolTeam(t[0])
ans = t[1]
if ans == res:
print("PASSED: isCoolTeam({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isCoolTeam({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights is cool team problem
|
Set up Code Fights is cool team problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights is cool team problem
|
#!/usr/local/bin/python
# Code Fights Is Cool Team Problem
class Team(object):
def __init__(self, names):
self.names = names
# TO DO
def isCoolTeam(team):
return bool(Team(team))
def main():
tests = [
[["Mark", "Kelly", "Kurt", "Terk"], True],
[["Lucy"], True],
[["Rob", "Bobby", "Billy"], False],
[["Sophie", "Boris", "EriC", "Charlotte"], True],
[["Sophie", "Boris", "Eric", "Charlotte", "Charlie"], False],
[["Sophie", "Edward", "Deb", "Boris", "Stephanie", "Eric", "Charlotte",
"Eric", "Charlie"], True],
[["Bobo", "obob", "Bobo", "ob"], True],
[["Edward", "Daniel", "Lily"], True],
[["ANTONY", "James"], False],
[["Ned", "Ben"], True]
]
for t in tests:
res = isCoolTeam(t[0])
ans = t[1]
if ans == res:
print("PASSED: isCoolTeam({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isCoolTeam({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights is cool team problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Is Cool Team Problem
class Team(object):
def __init__(self, names):
self.names = names
# TO DO
def isCoolTeam(team):
return bool(Team(team))
def main():
tests = [
[["Mark", "Kelly", "Kurt", "Terk"], True],
[["Lucy"], True],
[["Rob", "Bobby", "Billy"], False],
[["Sophie", "Boris", "EriC", "Charlotte"], True],
[["Sophie", "Boris", "Eric", "Charlotte", "Charlie"], False],
[["Sophie", "Edward", "Deb", "Boris", "Stephanie", "Eric", "Charlotte",
"Eric", "Charlie"], True],
[["Bobo", "obob", "Bobo", "ob"], True],
[["Edward", "Daniel", "Lily"], True],
[["ANTONY", "James"], False],
[["Ned", "Ben"], True]
]
for t in tests:
res = isCoolTeam(t[0])
ans = t[1]
if ans == res:
print("PASSED: isCoolTeam({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isCoolTeam({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights is cool team problem#!/usr/local/bin/python
# Code Fights Is Cool Team Problem
class Team(object):
def __init__(self, names):
self.names = names
# TO DO
def isCoolTeam(team):
return bool(Team(team))
def main():
tests = [
[["Mark", "Kelly", "Kurt", "Terk"], True],
[["Lucy"], True],
[["Rob", "Bobby", "Billy"], False],
[["Sophie", "Boris", "EriC", "Charlotte"], True],
[["Sophie", "Boris", "Eric", "Charlotte", "Charlie"], False],
[["Sophie", "Edward", "Deb", "Boris", "Stephanie", "Eric", "Charlotte",
"Eric", "Charlie"], True],
[["Bobo", "obob", "Bobo", "ob"], True],
[["Edward", "Daniel", "Lily"], True],
[["ANTONY", "James"], False],
[["Ned", "Ben"], True]
]
for t in tests:
res = isCoolTeam(t[0])
ans = t[1]
if ans == res:
print("PASSED: isCoolTeam({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isCoolTeam({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights is cool team problem<commit_after>#!/usr/local/bin/python
# Code Fights Is Cool Team Problem
class Team(object):
def __init__(self, names):
self.names = names
# TO DO
def isCoolTeam(team):
return bool(Team(team))
def main():
tests = [
[["Mark", "Kelly", "Kurt", "Terk"], True],
[["Lucy"], True],
[["Rob", "Bobby", "Billy"], False],
[["Sophie", "Boris", "EriC", "Charlotte"], True],
[["Sophie", "Boris", "Eric", "Charlotte", "Charlie"], False],
[["Sophie", "Edward", "Deb", "Boris", "Stephanie", "Eric", "Charlotte",
"Eric", "Charlie"], True],
[["Bobo", "obob", "Bobo", "ob"], True],
[["Edward", "Daniel", "Lily"], True],
[["ANTONY", "James"], False],
[["Ned", "Ben"], True]
]
for t in tests:
res = isCoolTeam(t[0])
ans = t[1]
if ans == res:
print("PASSED: isCoolTeam({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isCoolTeam({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
577f3000daae488603e21a2c3bb1dc2fd459920b
|
data/test-biochemists-nb.py
|
data/test-biochemists-nb.py
|
import numpy as np
from autoencoder.io import read_text
from autoencoder.network import mlp
count = read_text('biochemists.tsv', header='infer')
y = count[:, 0].astype(int)
x = count[:, 1:]
net = mlp(x.shape[1], output_size=1, hidden_size=(), masking=False, loss_type='nb')
model = net['model']
model.summary()
model.compile(loss=net['loss'], optimizer='Adam')
model.fit(x, y, epochs=700, batch_size=32)
print('Theta: %d' % 1/np.exp(model.get_weights()[2][0][0]))
|
Add a simple NB regression test to compare coefficients with R function glm.nb
|
Add a simple NB regression test to compare coefficients with R function glm.nb
Former-commit-id: 0ab35ca5ae619fc4f9c2233598f4d7563afdf214
|
Python
|
apache-2.0
|
theislab/dca,theislab/dca,theislab/dca
|
Add a simple NB regression test to compare coefficients with R function glm.nb
Former-commit-id: 0ab35ca5ae619fc4f9c2233598f4d7563afdf214
|
import numpy as np
from autoencoder.io import read_text
from autoencoder.network import mlp
count = read_text('biochemists.tsv', header='infer')
y = count[:, 0].astype(int)
x = count[:, 1:]
net = mlp(x.shape[1], output_size=1, hidden_size=(), masking=False, loss_type='nb')
model = net['model']
model.summary()
model.compile(loss=net['loss'], optimizer='Adam')
model.fit(x, y, epochs=700, batch_size=32)
print('Theta: %d' % 1/np.exp(model.get_weights()[2][0][0]))
|
<commit_before><commit_msg>Add a simple NB regression test to compare coefficients with R function glm.nb
Former-commit-id: 0ab35ca5ae619fc4f9c2233598f4d7563afdf214<commit_after>
|
import numpy as np
from autoencoder.io import read_text
from autoencoder.network import mlp
count = read_text('biochemists.tsv', header='infer')
y = count[:, 0].astype(int)
x = count[:, 1:]
net = mlp(x.shape[1], output_size=1, hidden_size=(), masking=False, loss_type='nb')
model = net['model']
model.summary()
model.compile(loss=net['loss'], optimizer='Adam')
model.fit(x, y, epochs=700, batch_size=32)
print('Theta: %d' % 1/np.exp(model.get_weights()[2][0][0]))
|
Add a simple NB regression test to compare coefficients with R function glm.nb
Former-commit-id: 0ab35ca5ae619fc4f9c2233598f4d7563afdf214import numpy as np
from autoencoder.io import read_text
from autoencoder.network import mlp
count = read_text('biochemists.tsv', header='infer')
y = count[:, 0].astype(int)
x = count[:, 1:]
net = mlp(x.shape[1], output_size=1, hidden_size=(), masking=False, loss_type='nb')
model = net['model']
model.summary()
model.compile(loss=net['loss'], optimizer='Adam')
model.fit(x, y, epochs=700, batch_size=32)
print('Theta: %d' % 1/np.exp(model.get_weights()[2][0][0]))
|
<commit_before><commit_msg>Add a simple NB regression test to compare coefficients with R function glm.nb
Former-commit-id: 0ab35ca5ae619fc4f9c2233598f4d7563afdf214<commit_after>import numpy as np
from autoencoder.io import read_text
from autoencoder.network import mlp
count = read_text('biochemists.tsv', header='infer')
y = count[:, 0].astype(int)
x = count[:, 1:]
net = mlp(x.shape[1], output_size=1, hidden_size=(), masking=False, loss_type='nb')
model = net['model']
model.summary()
model.compile(loss=net['loss'], optimizer='Adam')
model.fit(x, y, epochs=700, batch_size=32)
print('Theta: %d' % 1/np.exp(model.get_weights()[2][0][0]))
|
|
93a406b5f2ae3fb0027279a4d46bbf310ec7c93b
|
projects/sdr_paper/pytorch_experiments/analyze_nonzero.py
|
projects/sdr_paper/pytorch_experiments/analyze_nonzero.py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def filterResults(results, filter):
"""
Filter results containing the given condition
:param results: list of experiments returned by `suite.get_exps`
:param filter: list of conditions on the experiment parameters. For example:
["dropout0.0", "dropout0.50"]
:return: filtered results
"""
return [exp for exp in results if any(map(lambda v: v in exp, filter))]
if __name__ == '__main__':
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
columns = None # ['linearSdr.linearSdr1', 'linearSdr.linearSdr1.l1']
if suite.options.experiments is not None:
for expName in suite.options.experiments:
path = suite.get_exp(expName)[0]
data = suite.get_exps(path=path)
data = filterResults(data, ["min_weight0.0min_dutycycle0.0", "min_weight0.10"])
for exp in data:
values = suite.get_value(exp, 0, "nonzeros", "last")
df = pd.DataFrame.from_dict(values)
print()
print(exp)
if columns is not None:
df = df[columns]
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
else:
print("Failed to read experiments from arguments.",
"Use '-e' to select experiments or '--help' for other options.")
|
Add code to show nonzero table
|
Add code to show nonzero table
|
Python
|
agpl-3.0
|
subutai/htmresearch,subutai/htmresearch,numenta/htmresearch,numenta/htmresearch,subutai/htmresearch,numenta/htmresearch,numenta/htmresearch,numenta/htmresearch,subutai/htmresearch,numenta/htmresearch,subutai/htmresearch,numenta/htmresearch,subutai/htmresearch,subutai/htmresearch,subutai/htmresearch,numenta/htmresearch
|
Add code to show nonzero table
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def filterResults(results, filter):
"""
Filter results containing the given condition
:param results: list of experiments returned by `suite.get_exps`
:param filter: list of conditions on the experiment parameters. For example:
["dropout0.0", "dropout0.50"]
:return: filtered results
"""
return [exp for exp in results if any(map(lambda v: v in exp, filter))]
if __name__ == '__main__':
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
columns = None # ['linearSdr.linearSdr1', 'linearSdr.linearSdr1.l1']
if suite.options.experiments is not None:
for expName in suite.options.experiments:
path = suite.get_exp(expName)[0]
data = suite.get_exps(path=path)
data = filterResults(data, ["min_weight0.0min_dutycycle0.0", "min_weight0.10"])
for exp in data:
values = suite.get_value(exp, 0, "nonzeros", "last")
df = pd.DataFrame.from_dict(values)
print()
print(exp)
if columns is not None:
df = df[columns]
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
else:
print("Failed to read experiments from arguments.",
"Use '-e' to select experiments or '--help' for other options.")
|
<commit_before><commit_msg>Add code to show nonzero table<commit_after>
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def filterResults(results, filter):
"""
Filter results containing the given condition
:param results: list of experiments returned by `suite.get_exps`
:param filter: list of conditions on the experiment parameters. For example:
["dropout0.0", "dropout0.50"]
:return: filtered results
"""
return [exp for exp in results if any(map(lambda v: v in exp, filter))]
if __name__ == '__main__':
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
columns = None # ['linearSdr.linearSdr1', 'linearSdr.linearSdr1.l1']
if suite.options.experiments is not None:
for expName in suite.options.experiments:
path = suite.get_exp(expName)[0]
data = suite.get_exps(path=path)
data = filterResults(data, ["min_weight0.0min_dutycycle0.0", "min_weight0.10"])
for exp in data:
values = suite.get_value(exp, 0, "nonzeros", "last")
df = pd.DataFrame.from_dict(values)
print()
print(exp)
if columns is not None:
df = df[columns]
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
else:
print("Failed to read experiments from arguments.",
"Use '-e' to select experiments or '--help' for other options.")
|
Add code to show nonzero table# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def filterResults(results, filter):
"""
Filter results containing the given condition
:param results: list of experiments returned by `suite.get_exps`
:param filter: list of conditions on the experiment parameters. For example:
["dropout0.0", "dropout0.50"]
:return: filtered results
"""
return [exp for exp in results if any(map(lambda v: v in exp, filter))]
if __name__ == '__main__':
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
columns = None # ['linearSdr.linearSdr1', 'linearSdr.linearSdr1.l1']
if suite.options.experiments is not None:
for expName in suite.options.experiments:
path = suite.get_exp(expName)[0]
data = suite.get_exps(path=path)
data = filterResults(data, ["min_weight0.0min_dutycycle0.0", "min_weight0.10"])
for exp in data:
values = suite.get_value(exp, 0, "nonzeros", "last")
df = pd.DataFrame.from_dict(values)
print()
print(exp)
if columns is not None:
df = df[columns]
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
else:
print("Failed to read experiments from arguments.",
"Use '-e' to select experiments or '--help' for other options.")
|
<commit_before><commit_msg>Add code to show nonzero table<commit_after># ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
logging.basicConfig(level=logging.ERROR)
import matplotlib
matplotlib.use("Agg")
from tabulate import tabulate
import pandas as pd
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def filterResults(results, filter):
"""
Filter results containing the given condition
:param results: list of experiments returned by `suite.get_exps`
:param filter: list of conditions on the experiment parameters. For example:
["dropout0.0", "dropout0.50"]
:return: filtered results
"""
return [exp for exp in results if any(map(lambda v: v in exp, filter))]
if __name__ == '__main__':
# Initialize experiment options and parameters
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
columns = None # ['linearSdr.linearSdr1', 'linearSdr.linearSdr1.l1']
if suite.options.experiments is not None:
for expName in suite.options.experiments:
path = suite.get_exp(expName)[0]
data = suite.get_exps(path=path)
data = filterResults(data, ["min_weight0.0min_dutycycle0.0", "min_weight0.10"])
for exp in data:
values = suite.get_value(exp, 0, "nonzeros", "last")
df = pd.DataFrame.from_dict(values)
print()
print(exp)
if columns is not None:
df = df[columns]
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
else:
print("Failed to read experiments from arguments.",
"Use '-e' to select experiments or '--help' for other options.")
|
|
9f2c9bdb1dfcca677b7efd9f22f697c929b4c223
|
readthedocs/core/migrations/0005_migrate-old-passwords.py
|
readthedocs/core/migrations/0005_migrate-old-passwords.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 17:28
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate old passwords to be unusable
|
Migrate old passwords to be unusable
|
Python
|
mit
|
rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org
|
Migrate old passwords to be unusable
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 17:28
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
<commit_before><commit_msg>Migrate old passwords to be unusable<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 17:28
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate old passwords to be unusable# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 17:28
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
<commit_before><commit_msg>Migrate old passwords to be unusable<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 17:28
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
|
dc0d2e7bed5f58ae11716e95c554af5345af4c43
|
devops/makedb.py
|
devops/makedb.py
|
import boto.dynamodb
conn = boto.dynamodb.connect_to_region('us-east-1')
table_schema = conn.create_schema(hash_key_name='MAC',hash_key_proto_value=str)
conn.create_table(name='dev-ysniff',schema=table_schema,read_units=10,write_units=10)
|
Add script to make database
|
Add script to make database
|
Python
|
mit
|
jasonsbrooks/ysniff-software,jasonsbrooks/ysniff-software
|
Add script to make database
|
import boto.dynamodb
conn = boto.dynamodb.connect_to_region('us-east-1')
table_schema = conn.create_schema(hash_key_name='MAC',hash_key_proto_value=str)
conn.create_table(name='dev-ysniff',schema=table_schema,read_units=10,write_units=10)
|
<commit_before><commit_msg>Add script to make database<commit_after>
|
import boto.dynamodb
conn = boto.dynamodb.connect_to_region('us-east-1')
table_schema = conn.create_schema(hash_key_name='MAC',hash_key_proto_value=str)
conn.create_table(name='dev-ysniff',schema=table_schema,read_units=10,write_units=10)
|
Add script to make databaseimport boto.dynamodb
conn = boto.dynamodb.connect_to_region('us-east-1')
table_schema = conn.create_schema(hash_key_name='MAC',hash_key_proto_value=str)
conn.create_table(name='dev-ysniff',schema=table_schema,read_units=10,write_units=10)
|
<commit_before><commit_msg>Add script to make database<commit_after>import boto.dynamodb
conn = boto.dynamodb.connect_to_region('us-east-1')
table_schema = conn.create_schema(hash_key_name='MAC',hash_key_proto_value=str)
conn.create_table(name='dev-ysniff',schema=table_schema,read_units=10,write_units=10)
|
|
149195845d7062f7ee12f1d59f52c9d4a054c53f
|
airflow/contrib/operators/gcs_to_gcs.py
|
airflow/contrib/operators/gcs_to_gcs.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies an object from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_object: The destination name of the object in the destination Google cloud
storage bucket.
:type destination_object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket', 'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageOperatorToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy: %s, %s, %s, %s', self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_object or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.copy(self.source_bucket, self.source_object, self.destination_bucket, self.destination_object)
|
Add gcs to gcs copy operator with renaming if required
|
[AIRFLOW-1842] Add gcs to gcs copy operator with renaming if required
Copies an object from a Google Cloud Storage
bucket to another Google
Cloud Storage bucket, with renaming if required.
Closes #2808 from litdeviant/gcs_to_gcs
|
Python
|
apache-2.0
|
MortalViews/incubator-airflow,yk5/incubator-airflow,adamhaney/airflow,apache/airflow,wooga/airflow,lyft/incubator-airflow,malmiron/incubator-airflow,yk5/incubator-airflow,adamhaney/airflow,nathanielvarona/airflow,OpringaoDoTurno/airflow,artwr/airflow,airbnb/airflow,zack3241/incubator-airflow,lyft/incubator-airflow,sid88in/incubator-airflow,gtoonstra/airflow,owlabs/incubator-airflow,bolkedebruin/airflow,danielvdende/incubator-airflow,wndhydrnt/airflow,sid88in/incubator-airflow,edgarRd/incubator-airflow,mtagle/airflow,jgao54/airflow,r39132/airflow,danielvdende/incubator-airflow,DinoCow/airflow,KL-WLCR/incubator-airflow,Acehaidrey/incubator-airflow,danielvdende/incubator-airflow,fenglu-g/incubator-airflow,lxneng/incubator-airflow,cjqian/incubator-airflow,nathanielvarona/airflow,wooga/airflow,mrkm4ntr/incubator-airflow,DinoCow/airflow,lxneng/incubator-airflow,edgarRd/incubator-airflow,danielvdende/incubator-airflow,edgarRd/incubator-airflow,fenglu-g/incubator-airflow,apache/incubator-airflow,RealImpactAnalytics/airflow,jgao54/airflow,andyxhadji/incubator-airflow,malmiron/incubator-airflow,MortalViews/incubator-airflow,dhuang/incubator-airflow,cjqian/incubator-airflow,fenglu-g/incubator-airflow,jgao54/airflow,zack3241/incubator-airflow,cfei18/incubator-airflow,cjqian/incubator-airflow,KL-WLCR/incubator-airflow,OpringaoDoTurno/airflow,lyft/incubator-airflow,sid88in/incubator-airflow,adamhaney/airflow,apache/airflow,RealImpactAnalytics/airflow,nathanielvarona/airflow,akosel/incubator-airflow,wolfier/incubator-airflow,RealImpactAnalytics/airflow,andyxhadji/incubator-airflow,owlabs/incubator-airflow,Acehaidrey/incubator-airflow,akosel/incubator-airflow,apache/incubator-airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,criccomini/airflow,OpringaoDoTurno/airflow,sergiohgz/incubator-airflow,criccomini/airflow,artwr/airflow,Fokko/incubator-airflow,OpringaoDoTurno/airflow,nathanielvarona/airflow,sekikn/incubator-airflow,subodhchhabra/airflow,zack3241/incubator-airflow,lxneng/incubator-airflow,spektom/incubator-airflow,mistercrunch/airflow,r39132/airflow,Acehaidrey/incubator-airflow,apache/airflow,sekikn/incubator-airflow,apache/airflow,MortalViews/incubator-airflow,gtoonstra/airflow,apache/airflow,Acehaidrey/incubator-airflow,Fokko/incubator-airflow,lxneng/incubator-airflow,subodhchhabra/airflow,mrkm4ntr/incubator-airflow,airbnb/airflow,cfei18/incubator-airflow,r39132/airflow,jfantom/incubator-airflow,jfantom/incubator-airflow,Tagar/incubator-airflow,apache/airflow,wooga/airflow,wndhydrnt/airflow,sergiohgz/incubator-airflow,mtagle/airflow,jfantom/incubator-airflow,malmiron/incubator-airflow,CloverHealth/airflow,gtoonstra/airflow,Tagar/incubator-airflow,dhuang/incubator-airflow,spektom/incubator-airflow,criccomini/airflow,cfei18/incubator-airflow,wolfier/incubator-airflow,sekikn/incubator-airflow,yk5/incubator-airflow,dhuang/incubator-airflow,yati-sagade/incubator-airflow,artwr/airflow,cfei18/incubator-airflow,subodhchhabra/airflow,nathanielvarona/airflow,mistercrunch/airflow,danielvdende/incubator-airflow,mrkm4ntr/incubator-airflow,KL-WLCR/incubator-airflow,CloverHealth/airflow,Tagar/incubator-airflow,adamhaney/airflow,wileeam/airflow,Acehaidrey/incubator-airflow,mistercrunch/airflow,wooga/airflow,sergiohgz/incubator-airflow,bolkedebruin/airflow,yati-sagade/incubator-airflow,bolkedebruin/airflow,apache/incubator-airflow,Tagar/incubator-airflow,subodhchhabra/airflow,KL-WLCR/incubator-airflow,criccomini/airflow,apache/incubator-airflow,mtagle/airflow,artwr/airflow,yati-sagade/incubator-airflow,mtagle/airflow,yk5/incubator-airflow,wolfier/incubator-airflow,CloverHealth/airflow,owlabs/incubator-airflow,RealImpactAnalytics/airflow,airbnb/airflow,DinoCow/airflow,sekikn/incubator-airflow,sergiohgz/incubator-airflow,spektom/incubator-airflow,zack3241/incubator-airflow,owlabs/incubator-airflow,yati-sagade/incubator-airflow,Fokko/incubator-airflow,Fokko/incubator-airflow,jfantom/incubator-airflow,akosel/incubator-airflow,akosel/incubator-airflow,cfei18/incubator-airflow,jgao54/airflow,malmiron/incubator-airflow,wileeam/airflow,wolfier/incubator-airflow,wileeam/airflow,mrkm4ntr/incubator-airflow,bolkedebruin/airflow,wndhydrnt/airflow,dhuang/incubator-airflow,spektom/incubator-airflow,r39132/airflow,Acehaidrey/incubator-airflow,bolkedebruin/airflow,fenglu-g/incubator-airflow,wndhydrnt/airflow,andyxhadji/incubator-airflow,lyft/incubator-airflow,airbnb/airflow,edgarRd/incubator-airflow,nathanielvarona/airflow,andyxhadji/incubator-airflow,gtoonstra/airflow,CloverHealth/airflow,sid88in/incubator-airflow,wileeam/airflow,cjqian/incubator-airflow,mistercrunch/airflow,MortalViews/incubator-airflow,DinoCow/airflow
|
[AIRFLOW-1842] Add gcs to gcs copy operator with renaming if required
Copies an object from a Google Cloud Storage
bucket to another Google
Cloud Storage bucket, with renaming if required.
Closes #2808 from litdeviant/gcs_to_gcs
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies an object from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_object: The destination name of the object in the destination Google cloud
storage bucket.
:type destination_object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket', 'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageOperatorToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy: %s, %s, %s, %s', self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_object or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.copy(self.source_bucket, self.source_object, self.destination_bucket, self.destination_object)
|
<commit_before><commit_msg>[AIRFLOW-1842] Add gcs to gcs copy operator with renaming if required
Copies an object from a Google Cloud Storage
bucket to another Google
Cloud Storage bucket, with renaming if required.
Closes #2808 from litdeviant/gcs_to_gcs<commit_after>
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies an object from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_object: The destination name of the object in the destination Google cloud
storage bucket.
:type destination_object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket', 'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageOperatorToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy: %s, %s, %s, %s', self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_object or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.copy(self.source_bucket, self.source_object, self.destination_bucket, self.destination_object)
|
[AIRFLOW-1842] Add gcs to gcs copy operator with renaming if required
Copies an object from a Google Cloud Storage
bucket to another Google
Cloud Storage bucket, with renaming if required.
Closes #2808 from litdeviant/gcs_to_gcs# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies an object from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_object: The destination name of the object in the destination Google cloud
storage bucket.
:type destination_object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket', 'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageOperatorToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy: %s, %s, %s, %s', self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_object or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.copy(self.source_bucket, self.source_object, self.destination_bucket, self.destination_object)
|
<commit_before><commit_msg>[AIRFLOW-1842] Add gcs to gcs copy operator with renaming if required
Copies an object from a Google Cloud Storage
bucket to another Google
Cloud Storage bucket, with renaming if required.
Closes #2808 from litdeviant/gcs_to_gcs<commit_after># -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies an object from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_object: The destination name of the object in the destination Google cloud
storage bucket.
:type destination_object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket', 'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageOperatorToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy: %s, %s, %s, %s', self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_object or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.copy(self.source_bucket, self.source_object, self.destination_bucket, self.destination_object)
|
|
bcbcf37f4451fa87e89c527e8990181b24e3402c
|
google-calendar.py
|
google-calendar.py
|
import pprint
import pytz
from datetime import datetime, timedelta
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
with open('privatekey.p12', 'rb') as f:
key = f.read()
service_account_name = '...@developer.gserviceaccount.com'
calendarId = '...@group.calendar.google.com'
credentials = SignedJwtAssertionCredentials(
service_account_name, key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
lists = service.calendarList().list().execute()
pprint.pprint(lists)
print
# get events from calendar for the next 3 days
cest = pytz.timezone('Europe/Skopje')
now = datetime.now(tz=cest) # timezone?
timeMin = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=1)
timeMin = timeMin.isoformat()
timeMax = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=3)
timeMax = timeMax.isoformat()
events = service.events().list(calendarId=calendarId,
timeMin=timeMin, timeMax=timeMax).execute()
pprint.pprint(events)
|
Add script for printing google calendar events
|
Add script for printing google calendar events
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add script for printing google calendar events
|
import pprint
import pytz
from datetime import datetime, timedelta
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
with open('privatekey.p12', 'rb') as f:
key = f.read()
service_account_name = '...@developer.gserviceaccount.com'
calendarId = '...@group.calendar.google.com'
credentials = SignedJwtAssertionCredentials(
service_account_name, key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
lists = service.calendarList().list().execute()
pprint.pprint(lists)
print
# get events from calendar for the next 3 days
cest = pytz.timezone('Europe/Skopje')
now = datetime.now(tz=cest) # timezone?
timeMin = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=1)
timeMin = timeMin.isoformat()
timeMax = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=3)
timeMax = timeMax.isoformat()
events = service.events().list(calendarId=calendarId,
timeMin=timeMin, timeMax=timeMax).execute()
pprint.pprint(events)
|
<commit_before><commit_msg>Add script for printing google calendar events<commit_after>
|
import pprint
import pytz
from datetime import datetime, timedelta
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
with open('privatekey.p12', 'rb') as f:
key = f.read()
service_account_name = '...@developer.gserviceaccount.com'
calendarId = '...@group.calendar.google.com'
credentials = SignedJwtAssertionCredentials(
service_account_name, key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
lists = service.calendarList().list().execute()
pprint.pprint(lists)
print
# get events from calendar for the next 3 days
cest = pytz.timezone('Europe/Skopje')
now = datetime.now(tz=cest) # timezone?
timeMin = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=1)
timeMin = timeMin.isoformat()
timeMax = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=3)
timeMax = timeMax.isoformat()
events = service.events().list(calendarId=calendarId,
timeMin=timeMin, timeMax=timeMax).execute()
pprint.pprint(events)
|
Add script for printing google calendar eventsimport pprint
import pytz
from datetime import datetime, timedelta
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
with open('privatekey.p12', 'rb') as f:
key = f.read()
service_account_name = '...@developer.gserviceaccount.com'
calendarId = '...@group.calendar.google.com'
credentials = SignedJwtAssertionCredentials(
service_account_name, key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
lists = service.calendarList().list().execute()
pprint.pprint(lists)
print
# get events from calendar for the next 3 days
cest = pytz.timezone('Europe/Skopje')
now = datetime.now(tz=cest) # timezone?
timeMin = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=1)
timeMin = timeMin.isoformat()
timeMax = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=3)
timeMax = timeMax.isoformat()
events = service.events().list(calendarId=calendarId,
timeMin=timeMin, timeMax=timeMax).execute()
pprint.pprint(events)
|
<commit_before><commit_msg>Add script for printing google calendar events<commit_after>import pprint
import pytz
from datetime import datetime, timedelta
import httplib2
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
with open('privatekey.p12', 'rb') as f:
key = f.read()
service_account_name = '...@developer.gserviceaccount.com'
calendarId = '...@group.calendar.google.com'
credentials = SignedJwtAssertionCredentials(
service_account_name, key,
scope=['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build(serviceName='calendar', version='v3', http=http)
lists = service.calendarList().list().execute()
pprint.pprint(lists)
print
# get events from calendar for the next 3 days
cest = pytz.timezone('Europe/Skopje')
now = datetime.now(tz=cest) # timezone?
timeMin = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=1)
timeMin = timeMin.isoformat()
timeMax = datetime(year=now.year, month=now.month, day=now.day, tzinfo=cest) + timedelta(days=3)
timeMax = timeMax.isoformat()
events = service.events().list(calendarId=calendarId,
timeMin=timeMin, timeMax=timeMax).execute()
pprint.pprint(events)
|
|
f52ada02a4f0b6e1ca36d56bd3ad16a8c151ae10
|
MachineLearning/TensorFlow/LinearRegression.py
|
MachineLearning/TensorFlow/LinearRegression.py
|
'''
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_points():
# Define number of points to draw
points = 500
# Initalize lists
x_points = []
y_points = []
# Define constanst
a = 0.22
b = 0.78
for i in range(points):
x = np.random.normal(0.0, 0.5)
y = a * x + b + np.random.normal(0.0, 0.1)
x_points.append(x)
y_points.append(y)
return x_points, y_points
def graph_points():
x_points, y_points = generate_points()
plt.plot(x_points, y_points, 'o', label='Input Data')
plt.legend()
plt.show()
def main():
graph_points()
if __name__ == '__main__':
main()
|
Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.
|
feat: Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.
|
'''
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_points():
# Define number of points to draw
points = 500
# Initalize lists
x_points = []
y_points = []
# Define constanst
a = 0.22
b = 0.78
for i in range(points):
x = np.random.normal(0.0, 0.5)
y = a * x + b + np.random.normal(0.0, 0.1)
x_points.append(x)
y_points.append(y)
return x_points, y_points
def graph_points():
x_points, y_points = generate_points()
plt.plot(x_points, y_points, 'o', label='Input Data')
plt.legend()
plt.show()
def main():
graph_points()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.<commit_after>
|
'''
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_points():
# Define number of points to draw
points = 500
# Initalize lists
x_points = []
y_points = []
# Define constanst
a = 0.22
b = 0.78
for i in range(points):
x = np.random.normal(0.0, 0.5)
y = a * x + b + np.random.normal(0.0, 0.1)
x_points.append(x)
y_points.append(y)
return x_points, y_points
def graph_points():
x_points, y_points = generate_points()
plt.plot(x_points, y_points, 'o', label='Input Data')
plt.legend()
plt.show()
def main():
graph_points()
if __name__ == '__main__':
main()
|
feat: Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.'''
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_points():
# Define number of points to draw
points = 500
# Initalize lists
x_points = []
y_points = []
# Define constanst
a = 0.22
b = 0.78
for i in range(points):
x = np.random.normal(0.0, 0.5)
y = a * x + b + np.random.normal(0.0, 0.1)
x_points.append(x)
y_points.append(y)
return x_points, y_points
def graph_points():
x_points, y_points = generate_points()
plt.plot(x_points, y_points, 'o', label='Input Data')
plt.legend()
plt.show()
def main():
graph_points()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add script to build a model by which to predict the values of a dependent variable from the values of one or more independent variables using machine learning techniques with the linear regression algorithm.<commit_after>'''
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_points():
# Define number of points to draw
points = 500
# Initalize lists
x_points = []
y_points = []
# Define constanst
a = 0.22
b = 0.78
for i in range(points):
x = np.random.normal(0.0, 0.5)
y = a * x + b + np.random.normal(0.0, 0.1)
x_points.append(x)
y_points.append(y)
return x_points, y_points
def graph_points():
x_points, y_points = generate_points()
plt.plot(x_points, y_points, 'o', label='Input Data')
plt.legend()
plt.show()
def main():
graph_points()
if __name__ == '__main__':
main()
|
|
6ad3b3372e8cd641ec41bdcf261a1a3018b073d9
|
localtv/management/commands/update_one_thumbnail.py
|
localtv/management/commands/update_one_thumbnail.py
|
# This file is part of Miro Community.
# Copyright (C) 2010 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand, CommandError
from localtv import models
import localtv.util
class Command(BaseCommand):
args = '[video primary key]'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('update_one_thumbnail takes one argument: '
'%i argument(s) given' % len(args))
try:
video = models.Video.objects.get(pk=args[0])
except models.Feed.DoesNotExist:
raise CommandError('Video with pk %s does not exist' % args[0])
self.actually_update_thumb(video)
def actually_update_thumb(self, video):
thumbnail_data = None
if video.thumbnail_url:
thumbnail_data = localtv.util.pull_downloaded_file_from_cache(video.thumbnail_url)
# wrap it in a Django ContentFile, and pass it through.
cf_image = ContentFile(thumbnail_data)
video.save_thumbnail_from_file(cf_image)
else:
video.save_thumbnail()
|
Add celery-oriented management command to update a thumbnail
|
Add celery-oriented management command to update a thumbnail
|
Python
|
agpl-3.0
|
pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity
|
Add celery-oriented management command to update a thumbnail
|
# This file is part of Miro Community.
# Copyright (C) 2010 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand, CommandError
from localtv import models
import localtv.util
class Command(BaseCommand):
args = '[video primary key]'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('update_one_thumbnail takes one argument: '
'%i argument(s) given' % len(args))
try:
video = models.Video.objects.get(pk=args[0])
except models.Feed.DoesNotExist:
raise CommandError('Video with pk %s does not exist' % args[0])
self.actually_update_thumb(video)
def actually_update_thumb(self, video):
thumbnail_data = None
if video.thumbnail_url:
thumbnail_data = localtv.util.pull_downloaded_file_from_cache(video.thumbnail_url)
# wrap it in a Django ContentFile, and pass it through.
cf_image = ContentFile(thumbnail_data)
video.save_thumbnail_from_file(cf_image)
else:
video.save_thumbnail()
|
<commit_before><commit_msg>Add celery-oriented management command to update a thumbnail<commit_after>
|
# This file is part of Miro Community.
# Copyright (C) 2010 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand, CommandError
from localtv import models
import localtv.util
class Command(BaseCommand):
args = '[video primary key]'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('update_one_thumbnail takes one argument: '
'%i argument(s) given' % len(args))
try:
video = models.Video.objects.get(pk=args[0])
except models.Feed.DoesNotExist:
raise CommandError('Video with pk %s does not exist' % args[0])
self.actually_update_thumb(video)
def actually_update_thumb(self, video):
thumbnail_data = None
if video.thumbnail_url:
thumbnail_data = localtv.util.pull_downloaded_file_from_cache(video.thumbnail_url)
# wrap it in a Django ContentFile, and pass it through.
cf_image = ContentFile(thumbnail_data)
video.save_thumbnail_from_file(cf_image)
else:
video.save_thumbnail()
|
Add celery-oriented management command to update a thumbnail# This file is part of Miro Community.
# Copyright (C) 2010 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand, CommandError
from localtv import models
import localtv.util
class Command(BaseCommand):
args = '[video primary key]'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('update_one_thumbnail takes one argument: '
'%i argument(s) given' % len(args))
try:
video = models.Video.objects.get(pk=args[0])
except models.Feed.DoesNotExist:
raise CommandError('Video with pk %s does not exist' % args[0])
self.actually_update_thumb(video)
def actually_update_thumb(self, video):
thumbnail_data = None
if video.thumbnail_url:
thumbnail_data = localtv.util.pull_downloaded_file_from_cache(video.thumbnail_url)
# wrap it in a Django ContentFile, and pass it through.
cf_image = ContentFile(thumbnail_data)
video.save_thumbnail_from_file(cf_image)
else:
video.save_thumbnail()
|
<commit_before><commit_msg>Add celery-oriented management command to update a thumbnail<commit_after># This file is part of Miro Community.
# Copyright (C) 2010 Participatory Culture Foundation
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand, CommandError
from localtv import models
import localtv.util
class Command(BaseCommand):
args = '[video primary key]'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('update_one_thumbnail takes one argument: '
'%i argument(s) given' % len(args))
try:
video = models.Video.objects.get(pk=args[0])
except models.Feed.DoesNotExist:
raise CommandError('Video with pk %s does not exist' % args[0])
self.actually_update_thumb(video)
def actually_update_thumb(self, video):
thumbnail_data = None
if video.thumbnail_url:
thumbnail_data = localtv.util.pull_downloaded_file_from_cache(video.thumbnail_url)
# wrap it in a Django ContentFile, and pass it through.
cf_image = ContentFile(thumbnail_data)
video.save_thumbnail_from_file(cf_image)
else:
video.save_thumbnail()
|
|
76d196cc428695c9312d0fc052e43326f356c680
|
tests/test_helpers.py
|
tests/test_helpers.py
|
import unittest
from contextlib import redirect_stdout
from conllu import print_tree
from conllu.tree_helpers import TreeNode
from io import StringIO
class TestPrintTree(unittest.TestCase):
def test_print_simple_treenode(self):
node = TreeNode(data={"id": "X", "deprel": "Y"}, children={})
result = self._capture_print(print_tree, node)
self.assertEqual(result, "(deprel:Y) id:X deprel:Y [X]\n")
def _capture_print(self, func, args):
f = StringIO()
with redirect_stdout(f):
func(args)
return f.getvalue()
|
Add simple tests for print_tree.
|
Add simple tests for print_tree.
|
Python
|
mit
|
EmilStenstrom/conllu
|
Add simple tests for print_tree.
|
import unittest
from contextlib import redirect_stdout
from conllu import print_tree
from conllu.tree_helpers import TreeNode
from io import StringIO
class TestPrintTree(unittest.TestCase):
def test_print_simple_treenode(self):
node = TreeNode(data={"id": "X", "deprel": "Y"}, children={})
result = self._capture_print(print_tree, node)
self.assertEqual(result, "(deprel:Y) id:X deprel:Y [X]\n")
def _capture_print(self, func, args):
f = StringIO()
with redirect_stdout(f):
func(args)
return f.getvalue()
|
<commit_before><commit_msg>Add simple tests for print_tree.<commit_after>
|
import unittest
from contextlib import redirect_stdout
from conllu import print_tree
from conllu.tree_helpers import TreeNode
from io import StringIO
class TestPrintTree(unittest.TestCase):
def test_print_simple_treenode(self):
node = TreeNode(data={"id": "X", "deprel": "Y"}, children={})
result = self._capture_print(print_tree, node)
self.assertEqual(result, "(deprel:Y) id:X deprel:Y [X]\n")
def _capture_print(self, func, args):
f = StringIO()
with redirect_stdout(f):
func(args)
return f.getvalue()
|
Add simple tests for print_tree.import unittest
from contextlib import redirect_stdout
from conllu import print_tree
from conllu.tree_helpers import TreeNode
from io import StringIO
class TestPrintTree(unittest.TestCase):
def test_print_simple_treenode(self):
node = TreeNode(data={"id": "X", "deprel": "Y"}, children={})
result = self._capture_print(print_tree, node)
self.assertEqual(result, "(deprel:Y) id:X deprel:Y [X]\n")
def _capture_print(self, func, args):
f = StringIO()
with redirect_stdout(f):
func(args)
return f.getvalue()
|
<commit_before><commit_msg>Add simple tests for print_tree.<commit_after>import unittest
from contextlib import redirect_stdout
from conllu import print_tree
from conllu.tree_helpers import TreeNode
from io import StringIO
class TestPrintTree(unittest.TestCase):
def test_print_simple_treenode(self):
node = TreeNode(data={"id": "X", "deprel": "Y"}, children={})
result = self._capture_print(print_tree, node)
self.assertEqual(result, "(deprel:Y) id:X deprel:Y [X]\n")
def _capture_print(self, func, args):
f = StringIO()
with redirect_stdout(f):
func(args)
return f.getvalue()
|
|
52bfa1015d51ac5835909eb178caf9279530d666
|
dipy/denoise/tests/test_denoise.py
|
dipy/denoise/tests/test_denoise.py
|
import numpy as np
import numpy.testing as npt
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.denoise.nlmeans import nlmeans
import dipy.data as dpd
import nibabel as nib
def test_denoise():
"""
"""
fdata, fbval, fbvec = dpd.get_data()
data = nib.load(fdata).get_data()
sigma = estimate_sigma(data, N=4)
denoised = nlmeans(data, sigma=sigma)
|
Verify that output of estimate_sigma is a proper input to nlmeans.
|
TST: Verify that output of estimate_sigma is a proper input to nlmeans.
|
Python
|
bsd-3-clause
|
StongeEtienne/dipy,nilgoyyou/dipy,matthieudumont/dipy,matthieudumont/dipy,nilgoyyou/dipy,JohnGriffiths/dipy,StongeEtienne/dipy,FrancoisRheaultUS/dipy,demianw/dipy,FrancoisRheaultUS/dipy,demianw/dipy,JohnGriffiths/dipy,villalonreina/dipy,villalonreina/dipy
|
TST: Verify that output of estimate_sigma is a proper input to nlmeans.
|
import numpy as np
import numpy.testing as npt
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.denoise.nlmeans import nlmeans
import dipy.data as dpd
import nibabel as nib
def test_denoise():
"""
"""
fdata, fbval, fbvec = dpd.get_data()
data = nib.load(fdata).get_data()
sigma = estimate_sigma(data, N=4)
denoised = nlmeans(data, sigma=sigma)
|
<commit_before><commit_msg>TST: Verify that output of estimate_sigma is a proper input to nlmeans.<commit_after>
|
import numpy as np
import numpy.testing as npt
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.denoise.nlmeans import nlmeans
import dipy.data as dpd
import nibabel as nib
def test_denoise():
"""
"""
fdata, fbval, fbvec = dpd.get_data()
data = nib.load(fdata).get_data()
sigma = estimate_sigma(data, N=4)
denoised = nlmeans(data, sigma=sigma)
|
TST: Verify that output of estimate_sigma is a proper input to nlmeans.import numpy as np
import numpy.testing as npt
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.denoise.nlmeans import nlmeans
import dipy.data as dpd
import nibabel as nib
def test_denoise():
"""
"""
fdata, fbval, fbvec = dpd.get_data()
data = nib.load(fdata).get_data()
sigma = estimate_sigma(data, N=4)
denoised = nlmeans(data, sigma=sigma)
|
<commit_before><commit_msg>TST: Verify that output of estimate_sigma is a proper input to nlmeans.<commit_after>import numpy as np
import numpy.testing as npt
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.denoise.nlmeans import nlmeans
import dipy.data as dpd
import nibabel as nib
def test_denoise():
"""
"""
fdata, fbval, fbvec = dpd.get_data()
data = nib.load(fdata).get_data()
sigma = estimate_sigma(data, N=4)
denoised = nlmeans(data, sigma=sigma)
|
|
ef4730de0a2cf2a5b5c1b5d8c01e3ac35923be49
|
dockci/migrations/0004.py
|
dockci/migrations/0004.py
|
"""
Rename "job" models to "project" models
"""
import py.path
jobs_path = py.path.local().join('data', 'jobs')
projects_path = py.path.local().join('data', 'projects')
jobs_path.move(projects_path)
|
Add migration for job -> project rename
|
Add migration for job -> project rename
|
Python
|
isc
|
sprucedev/DockCI,sprucedev/DockCI-Agent,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,RickyCook/DockCI,sprucedev/DockCI,RickyCook/DockCI,RickyCook/DockCI
|
Add migration for job -> project rename
|
"""
Rename "job" models to "project" models
"""
import py.path
jobs_path = py.path.local().join('data', 'jobs')
projects_path = py.path.local().join('data', 'projects')
jobs_path.move(projects_path)
|
<commit_before><commit_msg>Add migration for job -> project rename<commit_after>
|
"""
Rename "job" models to "project" models
"""
import py.path
jobs_path = py.path.local().join('data', 'jobs')
projects_path = py.path.local().join('data', 'projects')
jobs_path.move(projects_path)
|
Add migration for job -> project rename"""
Rename "job" models to "project" models
"""
import py.path
jobs_path = py.path.local().join('data', 'jobs')
projects_path = py.path.local().join('data', 'projects')
jobs_path.move(projects_path)
|
<commit_before><commit_msg>Add migration for job -> project rename<commit_after>"""
Rename "job" models to "project" models
"""
import py.path
jobs_path = py.path.local().join('data', 'jobs')
projects_path = py.path.local().join('data', 'projects')
jobs_path.move(projects_path)
|
|
20c95ee7f6e929c05c20fcba5e1a5806c44d69be
|
maxwellbloch/tests/test_spectral.py
|
maxwellbloch/tests/test_spectral.py
|
"""
Unit tests for the spectral analysis module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import os
import unittest
from maxwellbloch import mb_solve, spectral
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestSpectral(unittest.TestCase):
""" Unit tests of the spectral methods.
Note: The real test of the spectral methods is comparison with a
two-level linear system, as we know the analytic lineshapes. A good
test might be to compare these lineshapes, however to get good
agreement a lot of timesteps are needed which makes the test too slow.
See Appendix C in the notebooks-maxwellbloch repo.
"""
def test_spectral_twolevel(self):
""" Check the spectral methods for exceptions. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
freq_list = spectral.freq_list(mb_solve_00)
rabi_freq_fft = spectral.rabi_freq(mb_solve_00, 0)
abs = spectral.absorption(mb_solve_00, 0, -1)
dis = spectral.dispersion(mb_solve_00, 0, -1)
|
Add test of spectral methods
|
Add test of spectral methods
|
Python
|
mit
|
tommyogden/maxwellbloch,tommyogden/maxwellbloch
|
Add test of spectral methods
|
"""
Unit tests for the spectral analysis module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import os
import unittest
from maxwellbloch import mb_solve, spectral
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestSpectral(unittest.TestCase):
""" Unit tests of the spectral methods.
Note: The real test of the spectral methods is comparison with a
two-level linear system, as we know the analytic lineshapes. A good
test might be to compare these lineshapes, however to get good
agreement a lot of timesteps are needed which makes the test too slow.
See Appendix C in the notebooks-maxwellbloch repo.
"""
def test_spectral_twolevel(self):
""" Check the spectral methods for exceptions. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
freq_list = spectral.freq_list(mb_solve_00)
rabi_freq_fft = spectral.rabi_freq(mb_solve_00, 0)
abs = spectral.absorption(mb_solve_00, 0, -1)
dis = spectral.dispersion(mb_solve_00, 0, -1)
|
<commit_before><commit_msg>Add test of spectral methods<commit_after>
|
"""
Unit tests for the spectral analysis module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import os
import unittest
from maxwellbloch import mb_solve, spectral
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestSpectral(unittest.TestCase):
""" Unit tests of the spectral methods.
Note: The real test of the spectral methods is comparison with a
two-level linear system, as we know the analytic lineshapes. A good
test might be to compare these lineshapes, however to get good
agreement a lot of timesteps are needed which makes the test too slow.
See Appendix C in the notebooks-maxwellbloch repo.
"""
def test_spectral_twolevel(self):
""" Check the spectral methods for exceptions. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
freq_list = spectral.freq_list(mb_solve_00)
rabi_freq_fft = spectral.rabi_freq(mb_solve_00, 0)
abs = spectral.absorption(mb_solve_00, 0, -1)
dis = spectral.dispersion(mb_solve_00, 0, -1)
|
Add test of spectral methods"""
Unit tests for the spectral analysis module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import os
import unittest
from maxwellbloch import mb_solve, spectral
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestSpectral(unittest.TestCase):
""" Unit tests of the spectral methods.
Note: The real test of the spectral methods is comparison with a
two-level linear system, as we know the analytic lineshapes. A good
test might be to compare these lineshapes, however to get good
agreement a lot of timesteps are needed which makes the test too slow.
See Appendix C in the notebooks-maxwellbloch repo.
"""
def test_spectral_twolevel(self):
""" Check the spectral methods for exceptions. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
freq_list = spectral.freq_list(mb_solve_00)
rabi_freq_fft = spectral.rabi_freq(mb_solve_00, 0)
abs = spectral.absorption(mb_solve_00, 0, -1)
dis = spectral.dispersion(mb_solve_00, 0, -1)
|
<commit_before><commit_msg>Add test of spectral methods<commit_after>"""
Unit tests for the spectral analysis module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import os
import unittest
from maxwellbloch import mb_solve, spectral
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestSpectral(unittest.TestCase):
""" Unit tests of the spectral methods.
Note: The real test of the spectral methods is comparison with a
two-level linear system, as we know the analytic lineshapes. A good
test might be to compare these lineshapes, however to get good
agreement a lot of timesteps are needed which makes the test too slow.
See Appendix C in the notebooks-maxwellbloch repo.
"""
def test_spectral_twolevel(self):
""" Check the spectral methods for exceptions. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
freq_list = spectral.freq_list(mb_solve_00)
rabi_freq_fft = spectral.rabi_freq(mb_solve_00, 0)
abs = spectral.absorption(mb_solve_00, 0, -1)
dis = spectral.dispersion(mb_solve_00, 0, -1)
|
|
f7213a2f99ff9134c4b6a3a184c2c03f64e845ab
|
data_manipulator.py
|
data_manipulator.py
|
from keras.utils import np_utils
import numpy as np
def get_labels_number(batches):
size = batches[0]['data'].shape[1]
return size
def get_empty_batch(size):
merged_batch = {'data': np.array([]).reshape(0, size), 'filenames': [], 'labels': []}
return merged_batch
def append_batch(merged_batch, batch):
merged_batch['data'] = np.concatenate((merged_batch['data'], batch['data']))
merged_batch['filenames'] += batch['filenames']
merged_batch['labels'] += batch['labels']
def merge_batches(batches):
size = get_labels_number(batches)
merged_batch = get_empty_batch(size)
for batch in batches:
append_batch(merged_batch, batch)
return merged_batch
def to_categorical(batch, classes_number):
batch['labels'] = np_utils.to_categorical(batch['labels'], classes_number)
def categorize(merged_train_batch, test_batch):
to_categorical(merged_train_batch, classes_number=10)
to_categorical(test_batch, classes_number=10)
|
Add module for data manipulation
|
Add module for data manipulation
|
Python
|
mit
|
maciewar/AGH-Deep-Learning-CIFAR10
|
Add module for data manipulation
|
from keras.utils import np_utils
import numpy as np
def get_labels_number(batches):
size = batches[0]['data'].shape[1]
return size
def get_empty_batch(size):
merged_batch = {'data': np.array([]).reshape(0, size), 'filenames': [], 'labels': []}
return merged_batch
def append_batch(merged_batch, batch):
merged_batch['data'] = np.concatenate((merged_batch['data'], batch['data']))
merged_batch['filenames'] += batch['filenames']
merged_batch['labels'] += batch['labels']
def merge_batches(batches):
size = get_labels_number(batches)
merged_batch = get_empty_batch(size)
for batch in batches:
append_batch(merged_batch, batch)
return merged_batch
def to_categorical(batch, classes_number):
batch['labels'] = np_utils.to_categorical(batch['labels'], classes_number)
def categorize(merged_train_batch, test_batch):
to_categorical(merged_train_batch, classes_number=10)
to_categorical(test_batch, classes_number=10)
|
<commit_before><commit_msg>Add module for data manipulation<commit_after>
|
from keras.utils import np_utils
import numpy as np
def get_labels_number(batches):
size = batches[0]['data'].shape[1]
return size
def get_empty_batch(size):
merged_batch = {'data': np.array([]).reshape(0, size), 'filenames': [], 'labels': []}
return merged_batch
def append_batch(merged_batch, batch):
merged_batch['data'] = np.concatenate((merged_batch['data'], batch['data']))
merged_batch['filenames'] += batch['filenames']
merged_batch['labels'] += batch['labels']
def merge_batches(batches):
size = get_labels_number(batches)
merged_batch = get_empty_batch(size)
for batch in batches:
append_batch(merged_batch, batch)
return merged_batch
def to_categorical(batch, classes_number):
batch['labels'] = np_utils.to_categorical(batch['labels'], classes_number)
def categorize(merged_train_batch, test_batch):
to_categorical(merged_train_batch, classes_number=10)
to_categorical(test_batch, classes_number=10)
|
Add module for data manipulationfrom keras.utils import np_utils
import numpy as np
def get_labels_number(batches):
size = batches[0]['data'].shape[1]
return size
def get_empty_batch(size):
merged_batch = {'data': np.array([]).reshape(0, size), 'filenames': [], 'labels': []}
return merged_batch
def append_batch(merged_batch, batch):
merged_batch['data'] = np.concatenate((merged_batch['data'], batch['data']))
merged_batch['filenames'] += batch['filenames']
merged_batch['labels'] += batch['labels']
def merge_batches(batches):
size = get_labels_number(batches)
merged_batch = get_empty_batch(size)
for batch in batches:
append_batch(merged_batch, batch)
return merged_batch
def to_categorical(batch, classes_number):
batch['labels'] = np_utils.to_categorical(batch['labels'], classes_number)
def categorize(merged_train_batch, test_batch):
to_categorical(merged_train_batch, classes_number=10)
to_categorical(test_batch, classes_number=10)
|
<commit_before><commit_msg>Add module for data manipulation<commit_after>from keras.utils import np_utils
import numpy as np
def get_labels_number(batches):
size = batches[0]['data'].shape[1]
return size
def get_empty_batch(size):
merged_batch = {'data': np.array([]).reshape(0, size), 'filenames': [], 'labels': []}
return merged_batch
def append_batch(merged_batch, batch):
merged_batch['data'] = np.concatenate((merged_batch['data'], batch['data']))
merged_batch['filenames'] += batch['filenames']
merged_batch['labels'] += batch['labels']
def merge_batches(batches):
size = get_labels_number(batches)
merged_batch = get_empty_batch(size)
for batch in batches:
append_batch(merged_batch, batch)
return merged_batch
def to_categorical(batch, classes_number):
batch['labels'] = np_utils.to_categorical(batch['labels'], classes_number)
def categorize(merged_train_batch, test_batch):
to_categorical(merged_train_batch, classes_number=10)
to_categorical(test_batch, classes_number=10)
|
|
2764f0dca9b65bad6ea445a51a914eb29122e71c
|
devil/devil/android/constants/chrome.py
|
devil/devil/android/constants/chrome.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PackageInfo = collections.namedtuple(
'PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
}
|
Add PACKAGE_INFO to devil in Catapult
|
Add PACKAGE_INFO to devil in Catapult
This CL adds the PACKAGE_INFO in pylib to devil in Catapult.
So adb_profile_chrome can use this in Catapult.
BUG=catapult:#1937
Review URL: https://codereview.chromium.org/1685803002
|
Python
|
bsd-3-clause
|
catapult-project/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,sahiljain/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,sahiljain/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult-csm,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm
|
Add PACKAGE_INFO to devil in Catapult
This CL adds the PACKAGE_INFO in pylib to devil in Catapult.
So adb_profile_chrome can use this in Catapult.
BUG=catapult:#1937
Review URL: https://codereview.chromium.org/1685803002
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PackageInfo = collections.namedtuple(
'PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
}
|
<commit_before><commit_msg>Add PACKAGE_INFO to devil in Catapult
This CL adds the PACKAGE_INFO in pylib to devil in Catapult.
So adb_profile_chrome can use this in Catapult.
BUG=catapult:#1937
Review URL: https://codereview.chromium.org/1685803002<commit_after>
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PackageInfo = collections.namedtuple(
'PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
}
|
Add PACKAGE_INFO to devil in Catapult
This CL adds the PACKAGE_INFO in pylib to devil in Catapult.
So adb_profile_chrome can use this in Catapult.
BUG=catapult:#1937
Review URL: https://codereview.chromium.org/1685803002# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PackageInfo = collections.namedtuple(
'PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
}
|
<commit_before><commit_msg>Add PACKAGE_INFO to devil in Catapult
This CL adds the PACKAGE_INFO in pylib to devil in Catapult.
So adb_profile_chrome can use this in Catapult.
BUG=catapult:#1937
Review URL: https://codereview.chromium.org/1685803002<commit_after># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
PackageInfo = collections.namedtuple(
'PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket', 'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'org.chromium.chrome.tests'),
}
|
|
fc9acded3072a7fc4ebd874b3d4c582a6ae1e2ec
|
pythonforandroid/recipes/zbarlight/__init__.py
|
pythonforandroid/recipes/zbarlight/__init__.py
|
from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarLightRecipe(PythonRecipe):
version = '2.1'
url = 'https://github.com/Polyconseil/zbarlight/archive/{version}.tar.gz' # noqa
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ZBarLightRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarLightRecipe()
|
Add zbarlight recipe (also compatible with python2 and python3)
|
Add zbarlight recipe (also compatible with python2 and python3)
|
Python
|
mit
|
germn/python-for-android,rnixx/python-for-android,germn/python-for-android,kronenpj/python-for-android,kivy/python-for-android,rnixx/python-for-android,kivy/python-for-android,rnixx/python-for-android,rnixx/python-for-android,germn/python-for-android,rnixx/python-for-android,kronenpj/python-for-android,PKRoma/python-for-android,PKRoma/python-for-android,kronenpj/python-for-android,PKRoma/python-for-android,rnixx/python-for-android,kivy/python-for-android,PKRoma/python-for-android,kronenpj/python-for-android,germn/python-for-android,germn/python-for-android,PKRoma/python-for-android,kronenpj/python-for-android,kivy/python-for-android,kivy/python-for-android,germn/python-for-android
|
Add zbarlight recipe (also compatible with python2 and python3)
|
from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarLightRecipe(PythonRecipe):
version = '2.1'
url = 'https://github.com/Polyconseil/zbarlight/archive/{version}.tar.gz' # noqa
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ZBarLightRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarLightRecipe()
|
<commit_before><commit_msg>Add zbarlight recipe (also compatible with python2 and python3)<commit_after>
|
from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarLightRecipe(PythonRecipe):
version = '2.1'
url = 'https://github.com/Polyconseil/zbarlight/archive/{version}.tar.gz' # noqa
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ZBarLightRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarLightRecipe()
|
Add zbarlight recipe (also compatible with python2 and python3)from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarLightRecipe(PythonRecipe):
version = '2.1'
url = 'https://github.com/Polyconseil/zbarlight/archive/{version}.tar.gz' # noqa
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ZBarLightRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarLightRecipe()
|
<commit_before><commit_msg>Add zbarlight recipe (also compatible with python2 and python3)<commit_after>from os.path import join
from pythonforandroid.recipe import PythonRecipe
class ZBarLightRecipe(PythonRecipe):
version = '2.1'
url = 'https://github.com/Polyconseil/zbarlight/archive/{version}.tar.gz' # noqa
call_hostpython_via_targetpython = False
depends = ['setuptools', 'libzbar']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ZBarLightRecipe, self).get_recipe_env(arch, with_flags_in_cc)
libzbar = self.get_recipe('libzbar', self.ctx)
libzbar_dir = libzbar.get_build_dir(arch.arch)
env['PYTHON_ROOT'] = self.ctx.get_python_install_dir()
env['CFLAGS'] += ' -I' + join(libzbar_dir, 'include')
env['LDFLAGS'] += ' -L' + join(libzbar_dir, 'zbar', '.libs')
env['LIBS'] = env.get('LIBS', '') + ' -landroid -lzbar'
return env
recipe = ZBarLightRecipe()
|
|
ae056d2f4e6268c371365d23997c81462855f22b
|
py/elimination-game.py
|
py/elimination-game.py
|
class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
return ((n | 0x55555555555555) & ((1 << (n.bit_length() - 1)) - 1)) + 1
|
Add py solution for 390. Elimination Game
|
Add py solution for 390. Elimination Game
390. Elimination Game: https://leetcode.com/problems/elimination-game/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 390. Elimination Game
390. Elimination Game: https://leetcode.com/problems/elimination-game/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
return ((n | 0x55555555555555) & ((1 << (n.bit_length() - 1)) - 1)) + 1
|
<commit_before><commit_msg>Add py solution for 390. Elimination Game
390. Elimination Game: https://leetcode.com/problems/elimination-game/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>
|
class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
return ((n | 0x55555555555555) & ((1 << (n.bit_length() - 1)) - 1)) + 1
|
Add py solution for 390. Elimination Game
390. Elimination Game: https://leetcode.com/problems/elimination-game/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
return ((n | 0x55555555555555) & ((1 << (n.bit_length() - 1)) - 1)) + 1
|
<commit_before><commit_msg>Add py solution for 390. Elimination Game
390. Elimination Game: https://leetcode.com/problems/elimination-game/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
return ((n | 0x55555555555555) & ((1 << (n.bit_length() - 1)) - 1)) + 1
|
|
00f3394deca4bcdca4e2158895bf5a5c2a8a879c
|
senlin/tests/tempest/api/profiles/test_profile_show.py
|
senlin/tests/tempest/api/profiles/test_profile_show.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@classmethod
def resource_cleanup(cls):
# Delete profile
cls.client.delete_obj('profiles', cls.profile['id'])
@decorators.idempotent_id('36206711-0676-4e4f-8f5d-7029912ecade')
def test_show_profile(self):
res = self.client.get_obj('profiles', self.profile['id'])
# Verify resp of profile update API
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
|
Add API test for profile show
|
Add API test for profile show
Add API test for profile show
Change-Id: Ie782839c1f4703507f0bfa71817cd96fd7dfc2a4
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
|
Add API test for profile show
Add API test for profile show
Change-Id: Ie782839c1f4703507f0bfa71817cd96fd7dfc2a4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@classmethod
def resource_cleanup(cls):
# Delete profile
cls.client.delete_obj('profiles', cls.profile['id'])
@decorators.idempotent_id('36206711-0676-4e4f-8f5d-7029912ecade')
def test_show_profile(self):
res = self.client.get_obj('profiles', self.profile['id'])
# Verify resp of profile update API
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
|
<commit_before><commit_msg>Add API test for profile show
Add API test for profile show
Change-Id: Ie782839c1f4703507f0bfa71817cd96fd7dfc2a4<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@classmethod
def resource_cleanup(cls):
# Delete profile
cls.client.delete_obj('profiles', cls.profile['id'])
@decorators.idempotent_id('36206711-0676-4e4f-8f5d-7029912ecade')
def test_show_profile(self):
res = self.client.get_obj('profiles', self.profile['id'])
# Verify resp of profile update API
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
|
Add API test for profile show
Add API test for profile show
Change-Id: Ie782839c1f4703507f0bfa71817cd96fd7dfc2a4# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@classmethod
def resource_cleanup(cls):
# Delete profile
cls.client.delete_obj('profiles', cls.profile['id'])
@decorators.idempotent_id('36206711-0676-4e4f-8f5d-7029912ecade')
def test_show_profile(self):
res = self.client.get_obj('profiles', self.profile['id'])
# Verify resp of profile update API
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
|
<commit_before><commit_msg>Add API test for profile show
Add API test for profile show
Change-Id: Ie782839c1f4703507f0bfa71817cd96fd7dfc2a4<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestProfileShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
@classmethod
def resource_cleanup(cls):
# Delete profile
cls.client.delete_obj('profiles', cls.profile['id'])
@decorators.idempotent_id('36206711-0676-4e4f-8f5d-7029912ecade')
def test_show_profile(self):
res = self.client.get_obj('profiles', self.profile['id'])
# Verify resp of profile update API
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
|
|
8f6a51571a38d2cd35d12b40f046a93236e710ad
|
print-in-color/printer.py
|
print-in-color/printer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
printer.py: Simple demo of how to print to console using colors.
"""
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
class printer():
"""Class to print using colors. """
_colors_ = {
**dict.fromkeys(("RED", "ERROR", "NO"), "\033[1;31m"),
**dict.fromkeys(("GREEN", "OK", "YES"), "\033[0;32m"),
**dict.fromkeys(("YELLOW", "WARN", "MAYBE"), "\033[0;93m"),
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def _get_color_(self, key):
"""Gets the corresponding color ANSI code... """
try:
return self._colors_[key]
except:
return self._colors_["RESET"]
def print(self, msg , color="RESET"):
"""Main print function..."""
# Get ANSI color code.
color = self._get_color_(key=color)
# Printing...
print("{}{}{}".format(color, msg, self._colors_["RESET"]))
def error(self, msg):
"""Print message in red..."""
self.print(msg=msg, color="RED")
def success(self, msg):
"""Print message in green..."""
self.print(msg=msg, color="GREEN")
def warning(self, msg):
"""Print message in yellow..."""
self.print(msg=msg, color="YELLOW")
if __name__ == "__main__":
p = printer()
p.success("SUCCESS Test...")
p.warning("WARN Test...")
p.error("ERROR Test...")
|
Print to terminal using colors.
|
Demo: Print to terminal using colors.
|
Python
|
mit
|
brenordv/python-snippets
|
Demo: Print to terminal using colors.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
printer.py: Simple demo of how to print to console using colors.
"""
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
class printer():
"""Class to print using colors. """
_colors_ = {
**dict.fromkeys(("RED", "ERROR", "NO"), "\033[1;31m"),
**dict.fromkeys(("GREEN", "OK", "YES"), "\033[0;32m"),
**dict.fromkeys(("YELLOW", "WARN", "MAYBE"), "\033[0;93m"),
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def _get_color_(self, key):
"""Gets the corresponding color ANSI code... """
try:
return self._colors_[key]
except:
return self._colors_["RESET"]
def print(self, msg , color="RESET"):
"""Main print function..."""
# Get ANSI color code.
color = self._get_color_(key=color)
# Printing...
print("{}{}{}".format(color, msg, self._colors_["RESET"]))
def error(self, msg):
"""Print message in red..."""
self.print(msg=msg, color="RED")
def success(self, msg):
"""Print message in green..."""
self.print(msg=msg, color="GREEN")
def warning(self, msg):
"""Print message in yellow..."""
self.print(msg=msg, color="YELLOW")
if __name__ == "__main__":
p = printer()
p.success("SUCCESS Test...")
p.warning("WARN Test...")
p.error("ERROR Test...")
|
<commit_before><commit_msg>Demo: Print to terminal using colors.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
printer.py: Simple demo of how to print to console using colors.
"""
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
class printer():
"""Class to print using colors. """
_colors_ = {
**dict.fromkeys(("RED", "ERROR", "NO"), "\033[1;31m"),
**dict.fromkeys(("GREEN", "OK", "YES"), "\033[0;32m"),
**dict.fromkeys(("YELLOW", "WARN", "MAYBE"), "\033[0;93m"),
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def _get_color_(self, key):
"""Gets the corresponding color ANSI code... """
try:
return self._colors_[key]
except:
return self._colors_["RESET"]
def print(self, msg , color="RESET"):
"""Main print function..."""
# Get ANSI color code.
color = self._get_color_(key=color)
# Printing...
print("{}{}{}".format(color, msg, self._colors_["RESET"]))
def error(self, msg):
"""Print message in red..."""
self.print(msg=msg, color="RED")
def success(self, msg):
"""Print message in green..."""
self.print(msg=msg, color="GREEN")
def warning(self, msg):
"""Print message in yellow..."""
self.print(msg=msg, color="YELLOW")
if __name__ == "__main__":
p = printer()
p.success("SUCCESS Test...")
p.warning("WARN Test...")
p.error("ERROR Test...")
|
Demo: Print to terminal using colors.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
printer.py: Simple demo of how to print to console using colors.
"""
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
class printer():
"""Class to print using colors. """
_colors_ = {
**dict.fromkeys(("RED", "ERROR", "NO"), "\033[1;31m"),
**dict.fromkeys(("GREEN", "OK", "YES"), "\033[0;32m"),
**dict.fromkeys(("YELLOW", "WARN", "MAYBE"), "\033[0;93m"),
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def _get_color_(self, key):
"""Gets the corresponding color ANSI code... """
try:
return self._colors_[key]
except:
return self._colors_["RESET"]
def print(self, msg , color="RESET"):
"""Main print function..."""
# Get ANSI color code.
color = self._get_color_(key=color)
# Printing...
print("{}{}{}".format(color, msg, self._colors_["RESET"]))
def error(self, msg):
"""Print message in red..."""
self.print(msg=msg, color="RED")
def success(self, msg):
"""Print message in green..."""
self.print(msg=msg, color="GREEN")
def warning(self, msg):
"""Print message in yellow..."""
self.print(msg=msg, color="YELLOW")
if __name__ == "__main__":
p = printer()
p.success("SUCCESS Test...")
p.warning("WARN Test...")
p.error("ERROR Test...")
|
<commit_before><commit_msg>Demo: Print to terminal using colors.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
printer.py: Simple demo of how to print to console using colors.
"""
__author__ = "Breno RdV"
__copyright__ = "Breno RdV @ raccoon.ninja"
__contact__ = "http://raccoon.ninja"
__license__ = "MIT"
__version__ = "01.000"
__maintainer__ = "Breno RdV"
__status__ = "Demonstration"
class printer():
"""Class to print using colors. """
_colors_ = {
**dict.fromkeys(("RED", "ERROR", "NO"), "\033[1;31m"),
**dict.fromkeys(("GREEN", "OK", "YES"), "\033[0;32m"),
**dict.fromkeys(("YELLOW", "WARN", "MAYBE"), "\033[0;93m"),
"BLUE": "\033[1;34m",
"CYAN": "\033[1;36m",
"RESET": "\033[0;0m",
"BOLD": "\033[;1m",
"REVERSE": "\033[;7m"
}
def _get_color_(self, key):
"""Gets the corresponding color ANSI code... """
try:
return self._colors_[key]
except:
return self._colors_["RESET"]
def print(self, msg , color="RESET"):
"""Main print function..."""
# Get ANSI color code.
color = self._get_color_(key=color)
# Printing...
print("{}{}{}".format(color, msg, self._colors_["RESET"]))
def error(self, msg):
"""Print message in red..."""
self.print(msg=msg, color="RED")
def success(self, msg):
"""Print message in green..."""
self.print(msg=msg, color="GREEN")
def warning(self, msg):
"""Print message in yellow..."""
self.print(msg=msg, color="YELLOW")
if __name__ == "__main__":
p = printer()
p.success("SUCCESS Test...")
p.warning("WARN Test...")
p.error("ERROR Test...")
|
|
83458ec4a716aa8827c3ea6edb9d398073bbb93d
|
aiofcm/errors.py
|
aiofcm/errors.py
|
# See https://firebase.google.com/docs/cloud-messaging/xmpp-server-ref
INVALID_JSON = 'INVALID_JSON'
BAD_REGISTRATION = 'BAD_REGISTRATION'
DEVICE_UNREGISTERED = 'DEVICE_UNREGISTERED'
BAD_ACK = 'BAD_ACK'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
DEVICE_MESSAGE_RATE_EXCEEDED = 'DEVICE_MESSAGE_RATE_EXCEEDED'
TOPICS_MESSAGE_RATE_EXCEEDED = 'TOPICS_MESSAGE_RATE_EXCEEDED'
CONNECTION_DRAINING = 'CONNECTION_DRAINING'
|
Add module with error code constants
|
Add module with error code constants
|
Python
|
apache-2.0
|
Fatal1ty/aiofcm
|
Add module with error code constants
|
# See https://firebase.google.com/docs/cloud-messaging/xmpp-server-ref
INVALID_JSON = 'INVALID_JSON'
BAD_REGISTRATION = 'BAD_REGISTRATION'
DEVICE_UNREGISTERED = 'DEVICE_UNREGISTERED'
BAD_ACK = 'BAD_ACK'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
DEVICE_MESSAGE_RATE_EXCEEDED = 'DEVICE_MESSAGE_RATE_EXCEEDED'
TOPICS_MESSAGE_RATE_EXCEEDED = 'TOPICS_MESSAGE_RATE_EXCEEDED'
CONNECTION_DRAINING = 'CONNECTION_DRAINING'
|
<commit_before><commit_msg>Add module with error code constants<commit_after>
|
# See https://firebase.google.com/docs/cloud-messaging/xmpp-server-ref
INVALID_JSON = 'INVALID_JSON'
BAD_REGISTRATION = 'BAD_REGISTRATION'
DEVICE_UNREGISTERED = 'DEVICE_UNREGISTERED'
BAD_ACK = 'BAD_ACK'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
DEVICE_MESSAGE_RATE_EXCEEDED = 'DEVICE_MESSAGE_RATE_EXCEEDED'
TOPICS_MESSAGE_RATE_EXCEEDED = 'TOPICS_MESSAGE_RATE_EXCEEDED'
CONNECTION_DRAINING = 'CONNECTION_DRAINING'
|
Add module with error code constants# See https://firebase.google.com/docs/cloud-messaging/xmpp-server-ref
INVALID_JSON = 'INVALID_JSON'
BAD_REGISTRATION = 'BAD_REGISTRATION'
DEVICE_UNREGISTERED = 'DEVICE_UNREGISTERED'
BAD_ACK = 'BAD_ACK'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
DEVICE_MESSAGE_RATE_EXCEEDED = 'DEVICE_MESSAGE_RATE_EXCEEDED'
TOPICS_MESSAGE_RATE_EXCEEDED = 'TOPICS_MESSAGE_RATE_EXCEEDED'
CONNECTION_DRAINING = 'CONNECTION_DRAINING'
|
<commit_before><commit_msg>Add module with error code constants<commit_after># See https://firebase.google.com/docs/cloud-messaging/xmpp-server-ref
INVALID_JSON = 'INVALID_JSON'
BAD_REGISTRATION = 'BAD_REGISTRATION'
DEVICE_UNREGISTERED = 'DEVICE_UNREGISTERED'
BAD_ACK = 'BAD_ACK'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
INTERNAL_SERVER_ERROR = 'INTERNAL_SERVER_ERROR'
DEVICE_MESSAGE_RATE_EXCEEDED = 'DEVICE_MESSAGE_RATE_EXCEEDED'
TOPICS_MESSAGE_RATE_EXCEEDED = 'TOPICS_MESSAGE_RATE_EXCEEDED'
CONNECTION_DRAINING = 'CONNECTION_DRAINING'
|
|
aa2f6d9bb400cb696d4d5942d813b9fccb3022b6
|
mezzanine/core/management.py
|
mezzanine/core/management.py
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import models as auth_app
from django.db.models.signals import post_syncdb
def create_demo_user(app, created_models, verbosity, db, **kwargs):
if settings.DEBUG and User in created_models:
if verbosity >= 2:
print "Creating demo User object"
User.objects.create_superuser("demo", "example@example.com", "demo")
post_syncdb.connect(create_demo_user, sender=auth_app)
|
Add a default user when syncdb is called.
|
Add a default user when syncdb is called.
|
Python
|
bsd-2-clause
|
promil23/mezzanine,joshcartme/mezzanine,dsanders11/mezzanine,dsanders11/mezzanine,mush42/mezzanine,PegasusWang/mezzanine,sjuxax/mezzanine,SoLoHiC/mezzanine,damnfine/mezzanine,frankchin/mezzanine,orlenko/plei,frankier/mezzanine,christianwgd/mezzanine,orlenko/sfpirg,Cicero-Zhao/mezzanine,vladir/mezzanine,emile2016/mezzanine,Cajoline/mezzanine,damnfine/mezzanine,Kniyl/mezzanine,tuxinhang1989/mezzanine,viaregio/mezzanine,Kniyl/mezzanine,biomassives/mezzanine,nikolas/mezzanine,promil23/mezzanine,scarcry/snm-mezzanine,gbosh/mezzanine,molokov/mezzanine,gradel/mezzanine,dovydas/mezzanine,theclanks/mezzanine,vladir/mezzanine,dovydas/mezzanine,saintbird/mezzanine,wbtuomela/mezzanine,wrwrwr/mezzanine,emile2016/mezzanine,PegasusWang/mezzanine,ZeroXn/mezzanine,stephenmcd/mezzanine,readevalprint/mezzanine,fusionbox/mezzanine,geodesign/mezzanine,readevalprint/mezzanine,damnfine/mezzanine,guibernardino/mezzanine,eino-makitalo/mezzanine,webounty/mezzanine,eino-makitalo/mezzanine,orlenko/plei,readevalprint/mezzanine,molokov/mezzanine,theclanks/mezzanine,douglaskastle/mezzanine,adrian-the-git/mezzanine,tuxinhang1989/mezzanine,ZeroXn/mezzanine,jjz/mezzanine,adrian-the-git/mezzanine,scarcry/snm-mezzanine,promil23/mezzanine,biomassives/mezzanine,ZeroXn/mezzanine,spookylukey/mezzanine,dekomote/mezzanine-modeltranslation-backport,frankchin/mezzanine,wyzex/mezzanine,industrydive/mezzanine,cccs-web/mezzanine,ryneeverett/mezzanine,Kniyl/mezzanine,agepoly/mezzanine,jerivas/mezzanine,dustinrb/mezzanine,dsanders11/mezzanine,wyzex/mezzanine,jjz/mezzanine,AlexHill/mezzanine,guibernardino/mezzanine,mush42/mezzanine,ryneeverett/mezzanine,adrian-the-git/mezzanine,industrydive/mezzanine,sjdines/mezzanine,jerivas/mezzanine,saintbird/mezzanine,christianwgd/mezzanine,Skytorn86/mezzanine,SoLoHiC/mezzanine,dovydas/mezzanine,gradel/mezzanine,christianwgd/mezzanine,spookylukey/mezzanine,dekomote/mezzanine-modeltranslation-backport,SoLoHiC/mezzanine,dekomote/mezzanine-modeltranslation-backport,wbtuomela/mezzanine,nikolas/mezzanine,joshcartme/mezzanine,industrydive/mezzanine,fusionbox/mezzanine,frankier/mezzanine,vladir/mezzanine,jerivas/mezzanine,cccs-web/mezzanine,orlenko/plei,sjuxax/mezzanine,PegasusWang/mezzanine,AlexHill/mezzanine,stephenmcd/mezzanine,nikolas/mezzanine,dustinrb/mezzanine,joshcartme/mezzanine,mush42/mezzanine,douglaskastle/mezzanine,batpad/mezzanine,theclanks/mezzanine,viaregio/mezzanine,molokov/mezzanine,ryneeverett/mezzanine,jjz/mezzanine,emile2016/mezzanine,geodesign/mezzanine,batpad/mezzanine,Cicero-Zhao/mezzanine,webounty/mezzanine,stbarnabas/mezzanine,Skytorn86/mezzanine,dustinrb/mezzanine,eino-makitalo/mezzanine,spookylukey/mezzanine,sjdines/mezzanine,gradel/mezzanine,webounty/mezzanine,biomassives/mezzanine,Cajoline/mezzanine,wyzex/mezzanine,orlenko/sfpirg,frankier/mezzanine,gbosh/mezzanine,saintbird/mezzanine,stephenmcd/mezzanine,agepoly/mezzanine,gbosh/mezzanine,Skytorn86/mezzanine,tuxinhang1989/mezzanine,wbtuomela/mezzanine,douglaskastle/mezzanine,stbarnabas/mezzanine,sjuxax/mezzanine,wrwrwr/mezzanine,Cajoline/mezzanine,orlenko/sfpirg,frankchin/mezzanine,geodesign/mezzanine,scarcry/snm-mezzanine,sjdines/mezzanine,agepoly/mezzanine,viaregio/mezzanine
|
Add a default user when syncdb is called.
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import models as auth_app
from django.db.models.signals import post_syncdb
def create_demo_user(app, created_models, verbosity, db, **kwargs):
if settings.DEBUG and User in created_models:
if verbosity >= 2:
print "Creating demo User object"
User.objects.create_superuser("demo", "example@example.com", "demo")
post_syncdb.connect(create_demo_user, sender=auth_app)
|
<commit_before><commit_msg>Add a default user when syncdb is called.<commit_after>
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import models as auth_app
from django.db.models.signals import post_syncdb
def create_demo_user(app, created_models, verbosity, db, **kwargs):
if settings.DEBUG and User in created_models:
if verbosity >= 2:
print "Creating demo User object"
User.objects.create_superuser("demo", "example@example.com", "demo")
post_syncdb.connect(create_demo_user, sender=auth_app)
|
Add a default user when syncdb is called.
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import models as auth_app
from django.db.models.signals import post_syncdb
def create_demo_user(app, created_models, verbosity, db, **kwargs):
if settings.DEBUG and User in created_models:
if verbosity >= 2:
print "Creating demo User object"
User.objects.create_superuser("demo", "example@example.com", "demo")
post_syncdb.connect(create_demo_user, sender=auth_app)
|
<commit_before><commit_msg>Add a default user when syncdb is called.<commit_after>
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import models as auth_app
from django.db.models.signals import post_syncdb
def create_demo_user(app, created_models, verbosity, db, **kwargs):
if settings.DEBUG and User in created_models:
if verbosity >= 2:
print "Creating demo User object"
User.objects.create_superuser("demo", "example@example.com", "demo")
post_syncdb.connect(create_demo_user, sender=auth_app)
|
|
755397b519321b6bc5e85535f2f9345ada972196
|
tot/utils.py
|
tot/utils.py
|
from opencivicdata.models.people_orgs import Person
def get_current_people(position):
if position == 'senator':
return Person.objects.filter(memberships__organization__name='Florida Senate')
if position == 'representative':
return Person.objects.filter(memberships__organization__name='Florida House of Representatives')
|
Add start of util function that will return current people
|
Add start of util function that will return current people
|
Python
|
mit
|
jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot
|
Add start of util function that will return current people
|
from opencivicdata.models.people_orgs import Person
def get_current_people(position):
if position == 'senator':
return Person.objects.filter(memberships__organization__name='Florida Senate')
if position == 'representative':
return Person.objects.filter(memberships__organization__name='Florida House of Representatives')
|
<commit_before><commit_msg>Add start of util function that will return current people<commit_after>
|
from opencivicdata.models.people_orgs import Person
def get_current_people(position):
if position == 'senator':
return Person.objects.filter(memberships__organization__name='Florida Senate')
if position == 'representative':
return Person.objects.filter(memberships__organization__name='Florida House of Representatives')
|
Add start of util function that will return current people
from opencivicdata.models.people_orgs import Person
def get_current_people(position):
if position == 'senator':
return Person.objects.filter(memberships__organization__name='Florida Senate')
if position == 'representative':
return Person.objects.filter(memberships__organization__name='Florida House of Representatives')
|
<commit_before><commit_msg>Add start of util function that will return current people<commit_after>
from opencivicdata.models.people_orgs import Person
def get_current_people(position):
if position == 'senator':
return Person.objects.filter(memberships__organization__name='Florida Senate')
if position == 'representative':
return Person.objects.filter(memberships__organization__name='Florida House of Representatives')
|
|
409350557b8b32a1bbe67ea418c3cb043097fd03
|
examples/grad/16-force_scan.py
|
examples/grad/16-force_scan.py
|
#!/usr/bin/env python
'''
Scan molecule dissociation curve and the force on the curve.
'''
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, dft
bond = np.arange(0.8, 5.0, .1)
energy = []
force = []
mol = gto.Mole(atom=[['N', 0, 0, -0.4],
['N', 0, 0, 0.4]],
basis='ccpvdz')
mf_grad_scan = scf.RHF(mol).nuc_grad_method().as_scanner()
for r in reversed(bond):
e_tot, grad = mf_grad_scan([['N', 0, 0, -r / 2],
['N', 0, 0, r / 2]])
energy.append(e_tot)
force.append(grad[0,2])
plt.plot(bond, e_hf[::-1])
plt.show()
plt.plot(bond, force[::-1])
plt.show()
|
Add example for nuclear gradients
|
Add example for nuclear gradients
|
Python
|
apache-2.0
|
sunqm/pyscf,gkc1000/pyscf,sunqm/pyscf,sunqm/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,sunqm/pyscf
|
Add example for nuclear gradients
|
#!/usr/bin/env python
'''
Scan molecule dissociation curve and the force on the curve.
'''
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, dft
bond = np.arange(0.8, 5.0, .1)
energy = []
force = []
mol = gto.Mole(atom=[['N', 0, 0, -0.4],
['N', 0, 0, 0.4]],
basis='ccpvdz')
mf_grad_scan = scf.RHF(mol).nuc_grad_method().as_scanner()
for r in reversed(bond):
e_tot, grad = mf_grad_scan([['N', 0, 0, -r / 2],
['N', 0, 0, r / 2]])
energy.append(e_tot)
force.append(grad[0,2])
plt.plot(bond, e_hf[::-1])
plt.show()
plt.plot(bond, force[::-1])
plt.show()
|
<commit_before><commit_msg>Add example for nuclear gradients<commit_after>
|
#!/usr/bin/env python
'''
Scan molecule dissociation curve and the force on the curve.
'''
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, dft
bond = np.arange(0.8, 5.0, .1)
energy = []
force = []
mol = gto.Mole(atom=[['N', 0, 0, -0.4],
['N', 0, 0, 0.4]],
basis='ccpvdz')
mf_grad_scan = scf.RHF(mol).nuc_grad_method().as_scanner()
for r in reversed(bond):
e_tot, grad = mf_grad_scan([['N', 0, 0, -r / 2],
['N', 0, 0, r / 2]])
energy.append(e_tot)
force.append(grad[0,2])
plt.plot(bond, e_hf[::-1])
plt.show()
plt.plot(bond, force[::-1])
plt.show()
|
Add example for nuclear gradients#!/usr/bin/env python
'''
Scan molecule dissociation curve and the force on the curve.
'''
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, dft
bond = np.arange(0.8, 5.0, .1)
energy = []
force = []
mol = gto.Mole(atom=[['N', 0, 0, -0.4],
['N', 0, 0, 0.4]],
basis='ccpvdz')
mf_grad_scan = scf.RHF(mol).nuc_grad_method().as_scanner()
for r in reversed(bond):
e_tot, grad = mf_grad_scan([['N', 0, 0, -r / 2],
['N', 0, 0, r / 2]])
energy.append(e_tot)
force.append(grad[0,2])
plt.plot(bond, e_hf[::-1])
plt.show()
plt.plot(bond, force[::-1])
plt.show()
|
<commit_before><commit_msg>Add example for nuclear gradients<commit_after>#!/usr/bin/env python
'''
Scan molecule dissociation curve and the force on the curve.
'''
import numpy as np
import matplotlib.pyplot as plt
from pyscf import gto, dft
bond = np.arange(0.8, 5.0, .1)
energy = []
force = []
mol = gto.Mole(atom=[['N', 0, 0, -0.4],
['N', 0, 0, 0.4]],
basis='ccpvdz')
mf_grad_scan = scf.RHF(mol).nuc_grad_method().as_scanner()
for r in reversed(bond):
e_tot, grad = mf_grad_scan([['N', 0, 0, -r / 2],
['N', 0, 0, r / 2]])
energy.append(e_tot)
force.append(grad[0,2])
plt.plot(bond, e_hf[::-1])
plt.show()
plt.plot(bond, force[::-1])
plt.show()
|
|
91535fcab34a4f85e5da4f057f9030ee2d6708db
|
Discord/cogs/tweepy.py
|
Discord/cogs/tweepy.py
|
from discord import app_commands
from discord.ext import commands
async def setup(bot):
await bot.add_cog(Tweepy())
class Tweepy(commands.Cog, app_commands.Group):
"""Tweepy"""
|
Add Tweepy cog and application command group
|
[Discord] Add Tweepy cog and application command group
|
Python
|
mit
|
Harmon758/Harmonbot,Harmon758/Harmonbot
|
[Discord] Add Tweepy cog and application command group
|
from discord import app_commands
from discord.ext import commands
async def setup(bot):
await bot.add_cog(Tweepy())
class Tweepy(commands.Cog, app_commands.Group):
"""Tweepy"""
|
<commit_before><commit_msg>[Discord] Add Tweepy cog and application command group<commit_after>
|
from discord import app_commands
from discord.ext import commands
async def setup(bot):
await bot.add_cog(Tweepy())
class Tweepy(commands.Cog, app_commands.Group):
"""Tweepy"""
|
[Discord] Add Tweepy cog and application command group
from discord import app_commands
from discord.ext import commands
async def setup(bot):
await bot.add_cog(Tweepy())
class Tweepy(commands.Cog, app_commands.Group):
"""Tweepy"""
|
<commit_before><commit_msg>[Discord] Add Tweepy cog and application command group<commit_after>
from discord import app_commands
from discord.ext import commands
async def setup(bot):
await bot.add_cog(Tweepy())
class Tweepy(commands.Cog, app_commands.Group):
"""Tweepy"""
|
|
27dfdd2c646dbcaaad80af7aed72e3053c42efad
|
tensor2tensor/trax/models/research/__init__.py
|
tensor2tensor/trax/models/research/__init__.py
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add init Trax research models.
|
Add init Trax research models.
PiperOrigin-RevId: 247220362
|
Python
|
apache-2.0
|
tensorflow/tensor2tensor,tensorflow/tensor2tensor,tensorflow/tensor2tensor,tensorflow/tensor2tensor,tensorflow/tensor2tensor
|
Add init Trax research models.
PiperOrigin-RevId: 247220362
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add init Trax research models.
PiperOrigin-RevId: 247220362<commit_after>
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add init Trax research models.
PiperOrigin-RevId: 247220362# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add init Trax research models.
PiperOrigin-RevId: 247220362<commit_after># coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
|
9d0edb3c9a937e6346d03482d8544ce7a411a0d2
|
tests/twisted/roster/request-never-answered-2.py
|
tests/twisted/roster/request-never-answered-2.py
|
"""
Exhibit a bug where RequestChannel times out when requesting a group channel
after the roster has been received.
"""
import dbus
from gabbletest import exec_test, sync_stream
from servicetest import sync_dbus, call_async
HT_CONTACT_LIST = 3
HT_GROUP = 4
def test(q, bus, conn, stream):
conn.Connect()
# q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
roster_event = q.expect('stream-iq', query_ns='jabber:iq:roster')
roster_event.stanza['type'] = 'result'
call_async(q, conn, "RequestHandles", HT_GROUP, ['test'])
event = q.expect('dbus-return', method='RequestHandles')
test_handle = event.value[0][0]
# send an empty roster
stream.send(roster_event.stanza)
sync_stream(q, stream)
sync_dbus(bus, q, conn)
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.ContactList', HT_GROUP,
test_handle, True)
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
assert handle_type == HT_GROUP, handle_type
assert handle == test_handle, (handle, test_handle)
event = q.expect('dbus-return', method='RequestChannel')
assert event.value[0] == path, (event.args[0], path)
if __name__ == '__main__':
exec_test(test)
|
Add a test for the reverse of request-never-answered.py
|
Add a test for the reverse of request-never-answered.py
When the race in test-group-race.py had the wrong result, it triggered a
bug in the requestotronned roster code.
|
Python
|
lgpl-2.1
|
community-ssu/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,community-ssu/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble
|
Add a test for the reverse of request-never-answered.py
When the race in test-group-race.py had the wrong result, it triggered a
bug in the requestotronned roster code.
|
"""
Exhibit a bug where RequestChannel times out when requesting a group channel
after the roster has been received.
"""
import dbus
from gabbletest import exec_test, sync_stream
from servicetest import sync_dbus, call_async
HT_CONTACT_LIST = 3
HT_GROUP = 4
def test(q, bus, conn, stream):
conn.Connect()
# q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
roster_event = q.expect('stream-iq', query_ns='jabber:iq:roster')
roster_event.stanza['type'] = 'result'
call_async(q, conn, "RequestHandles", HT_GROUP, ['test'])
event = q.expect('dbus-return', method='RequestHandles')
test_handle = event.value[0][0]
# send an empty roster
stream.send(roster_event.stanza)
sync_stream(q, stream)
sync_dbus(bus, q, conn)
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.ContactList', HT_GROUP,
test_handle, True)
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
assert handle_type == HT_GROUP, handle_type
assert handle == test_handle, (handle, test_handle)
event = q.expect('dbus-return', method='RequestChannel')
assert event.value[0] == path, (event.args[0], path)
if __name__ == '__main__':
exec_test(test)
|
<commit_before><commit_msg>Add a test for the reverse of request-never-answered.py
When the race in test-group-race.py had the wrong result, it triggered a
bug in the requestotronned roster code.<commit_after>
|
"""
Exhibit a bug where RequestChannel times out when requesting a group channel
after the roster has been received.
"""
import dbus
from gabbletest import exec_test, sync_stream
from servicetest import sync_dbus, call_async
HT_CONTACT_LIST = 3
HT_GROUP = 4
def test(q, bus, conn, stream):
conn.Connect()
# q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
roster_event = q.expect('stream-iq', query_ns='jabber:iq:roster')
roster_event.stanza['type'] = 'result'
call_async(q, conn, "RequestHandles", HT_GROUP, ['test'])
event = q.expect('dbus-return', method='RequestHandles')
test_handle = event.value[0][0]
# send an empty roster
stream.send(roster_event.stanza)
sync_stream(q, stream)
sync_dbus(bus, q, conn)
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.ContactList', HT_GROUP,
test_handle, True)
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
assert handle_type == HT_GROUP, handle_type
assert handle == test_handle, (handle, test_handle)
event = q.expect('dbus-return', method='RequestChannel')
assert event.value[0] == path, (event.args[0], path)
if __name__ == '__main__':
exec_test(test)
|
Add a test for the reverse of request-never-answered.py
When the race in test-group-race.py had the wrong result, it triggered a
bug in the requestotronned roster code."""
Exhibit a bug where RequestChannel times out when requesting a group channel
after the roster has been received.
"""
import dbus
from gabbletest import exec_test, sync_stream
from servicetest import sync_dbus, call_async
HT_CONTACT_LIST = 3
HT_GROUP = 4
def test(q, bus, conn, stream):
conn.Connect()
# q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
roster_event = q.expect('stream-iq', query_ns='jabber:iq:roster')
roster_event.stanza['type'] = 'result'
call_async(q, conn, "RequestHandles", HT_GROUP, ['test'])
event = q.expect('dbus-return', method='RequestHandles')
test_handle = event.value[0][0]
# send an empty roster
stream.send(roster_event.stanza)
sync_stream(q, stream)
sync_dbus(bus, q, conn)
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.ContactList', HT_GROUP,
test_handle, True)
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
assert handle_type == HT_GROUP, handle_type
assert handle == test_handle, (handle, test_handle)
event = q.expect('dbus-return', method='RequestChannel')
assert event.value[0] == path, (event.args[0], path)
if __name__ == '__main__':
exec_test(test)
|
<commit_before><commit_msg>Add a test for the reverse of request-never-answered.py
When the race in test-group-race.py had the wrong result, it triggered a
bug in the requestotronned roster code.<commit_after>"""
Exhibit a bug where RequestChannel times out when requesting a group channel
after the roster has been received.
"""
import dbus
from gabbletest import exec_test, sync_stream
from servicetest import sync_dbus, call_async
HT_CONTACT_LIST = 3
HT_GROUP = 4
def test(q, bus, conn, stream):
conn.Connect()
# q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
roster_event = q.expect('stream-iq', query_ns='jabber:iq:roster')
roster_event.stanza['type'] = 'result'
call_async(q, conn, "RequestHandles", HT_GROUP, ['test'])
event = q.expect('dbus-return', method='RequestHandles')
test_handle = event.value[0][0]
# send an empty roster
stream.send(roster_event.stanza)
sync_stream(q, stream)
sync_dbus(bus, q, conn)
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.ContactList', HT_GROUP,
test_handle, True)
event = q.expect('dbus-signal', signal='NewChannel')
path, type, handle_type, handle, suppress_handler = event.args
assert handle_type == HT_GROUP, handle_type
assert handle == test_handle, (handle, test_handle)
event = q.expect('dbus-return', method='RequestChannel')
assert event.value[0] == path, (event.args[0], path)
if __name__ == '__main__':
exec_test(test)
|
|
6a223a5b8b82db40121bdfe296af471463d31184
|
examples/image_fromarray.py
|
examples/image_fromarray.py
|
"""Create a nifti image from a numpy array and an affine transform."""
from os import path
import numpy as np
from neuroimaging.core.api import fromarray, save_image, load_image, \
Affine, CoordinateMap
# Imports used just for development and testing. User's typically
# would not uses these when creating an image.
from tempfile import NamedTemporaryFile
from neuroimaging.testing import assert_equal
# Load an image to get the array and affine
fn = path.join(path.expanduser('~'), '.nipy', 'tests', 'data',
'avg152T1.nii.gz')
if not path.exists(fn):
raise IOError('file does not exists: %s\n' % fn)
# Use one of our test files to get an array and affine from.
img = load_image(fn)
arr = np.asarray(img)
affine = img.affine.copy()
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
tmpfile = NamedTemporaryFile(suffix='.nii.gz')
#
# START HERE
#
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the affine
axes_names = ['x', 'y', 'z']
# Build a CoordinateMap to create the image with
coordmap = CoordinateMap.from_affine(Affine(affine), names=axes_names,
shape=arr.shape)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, names=axes_names, coordmap=coordmap)
# 3) Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
#
# END HERE
#
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
|
Add example for creating an image from an array and an affine.
|
Add example for creating an image from an array and an affine.
|
Python
|
bsd-3-clause
|
yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD
|
Add example for creating an image from an array and an affine.
|
"""Create a nifti image from a numpy array and an affine transform."""
from os import path
import numpy as np
from neuroimaging.core.api import fromarray, save_image, load_image, \
Affine, CoordinateMap
# Imports used just for development and testing. User's typically
# would not uses these when creating an image.
from tempfile import NamedTemporaryFile
from neuroimaging.testing import assert_equal
# Load an image to get the array and affine
fn = path.join(path.expanduser('~'), '.nipy', 'tests', 'data',
'avg152T1.nii.gz')
if not path.exists(fn):
raise IOError('file does not exists: %s\n' % fn)
# Use one of our test files to get an array and affine from.
img = load_image(fn)
arr = np.asarray(img)
affine = img.affine.copy()
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
tmpfile = NamedTemporaryFile(suffix='.nii.gz')
#
# START HERE
#
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the affine
axes_names = ['x', 'y', 'z']
# Build a CoordinateMap to create the image with
coordmap = CoordinateMap.from_affine(Affine(affine), names=axes_names,
shape=arr.shape)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, names=axes_names, coordmap=coordmap)
# 3) Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
#
# END HERE
#
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
|
<commit_before><commit_msg>Add example for creating an image from an array and an affine.<commit_after>
|
"""Create a nifti image from a numpy array and an affine transform."""
from os import path
import numpy as np
from neuroimaging.core.api import fromarray, save_image, load_image, \
Affine, CoordinateMap
# Imports used just for development and testing. User's typically
# would not uses these when creating an image.
from tempfile import NamedTemporaryFile
from neuroimaging.testing import assert_equal
# Load an image to get the array and affine
fn = path.join(path.expanduser('~'), '.nipy', 'tests', 'data',
'avg152T1.nii.gz')
if not path.exists(fn):
raise IOError('file does not exists: %s\n' % fn)
# Use one of our test files to get an array and affine from.
img = load_image(fn)
arr = np.asarray(img)
affine = img.affine.copy()
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
tmpfile = NamedTemporaryFile(suffix='.nii.gz')
#
# START HERE
#
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the affine
axes_names = ['x', 'y', 'z']
# Build a CoordinateMap to create the image with
coordmap = CoordinateMap.from_affine(Affine(affine), names=axes_names,
shape=arr.shape)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, names=axes_names, coordmap=coordmap)
# 3) Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
#
# END HERE
#
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
|
Add example for creating an image from an array and an affine."""Create a nifti image from a numpy array and an affine transform."""
from os import path
import numpy as np
from neuroimaging.core.api import fromarray, save_image, load_image, \
Affine, CoordinateMap
# Imports used just for development and testing. User's typically
# would not uses these when creating an image.
from tempfile import NamedTemporaryFile
from neuroimaging.testing import assert_equal
# Load an image to get the array and affine
fn = path.join(path.expanduser('~'), '.nipy', 'tests', 'data',
'avg152T1.nii.gz')
if not path.exists(fn):
raise IOError('file does not exists: %s\n' % fn)
# Use one of our test files to get an array and affine from.
img = load_image(fn)
arr = np.asarray(img)
affine = img.affine.copy()
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
tmpfile = NamedTemporaryFile(suffix='.nii.gz')
#
# START HERE
#
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the affine
axes_names = ['x', 'y', 'z']
# Build a CoordinateMap to create the image with
coordmap = CoordinateMap.from_affine(Affine(affine), names=axes_names,
shape=arr.shape)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, names=axes_names, coordmap=coordmap)
# 3) Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
#
# END HERE
#
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
|
<commit_before><commit_msg>Add example for creating an image from an array and an affine.<commit_after>"""Create a nifti image from a numpy array and an affine transform."""
from os import path
import numpy as np
from neuroimaging.core.api import fromarray, save_image, load_image, \
Affine, CoordinateMap
# Imports used just for development and testing. User's typically
# would not uses these when creating an image.
from tempfile import NamedTemporaryFile
from neuroimaging.testing import assert_equal
# Load an image to get the array and affine
fn = path.join(path.expanduser('~'), '.nipy', 'tests', 'data',
'avg152T1.nii.gz')
if not path.exists(fn):
raise IOError('file does not exists: %s\n' % fn)
# Use one of our test files to get an array and affine from.
img = load_image(fn)
arr = np.asarray(img)
affine = img.affine.copy()
# We use a temporary file for this example so as to not create junk
# files in the nipy directory.
tmpfile = NamedTemporaryFile(suffix='.nii.gz')
#
# START HERE
#
# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates.
# Specify the axis order of the affine
axes_names = ['x', 'y', 'z']
# Build a CoordinateMap to create the image with
coordmap = CoordinateMap.from_affine(Affine(affine), names=axes_names,
shape=arr.shape)
# 2) Create a nipy image from the array and CoordinateMap
# Create new image
newimg = fromarray(arr, names=axes_names, coordmap=coordmap)
# 3) Save the nipy image to the specified filename
save_image(newimg, tmpfile.name)
#
# END HERE
#
# Reload and verify the affine was saved correctly.
tmpimg = load_image(tmpfile.name)
assert_equal(tmpimg.affine, affine)
assert_equal(np.mean(tmpimg), np.mean(img))
assert_equal(np.std(tmpimg), np.std(img))
assert_equal(np.asarray(tmpimg), np.asarray(img))
|
|
d75c8c01c57c0223bfb75f9a7b2ddb9087476d38
|
agithub_test.py
|
agithub_test.py
|
#!/usr/bin/env python
import agithub
import unittest
class TestGithubObjectCreation(unittest.TestCase):
def test_user_pw(self):
gh = agithub.Github('korfuri', '1234')
self.assertTrue(gh is not None)
gh = agithub.Github(username='korfuri', password='1234')
self.assertTrue(gh is not None)
def test_token(self):
gh = agithub.Github(username='korfuri', token='deadbeef')
self.assertTrue(gh is not None)
gh = agithub.Github(token='deadbeef')
self.assertTrue(gh is not None)
def test_token_password(self):
with self.assertRaises(TypeError):
gh = agithub.Github(
username='korfuri', password='1234', token='deadbeef')
if __name__ == '__main__':
unittest.main()
|
Add unittests for Github object creation scenarios.
|
Add unittests for Github object creation scenarios.
Simply run with `./agithub_test.py`.
|
Python
|
mit
|
mozilla/agithub,jpaugh/agithub
|
Add unittests for Github object creation scenarios.
Simply run with `./agithub_test.py`.
|
#!/usr/bin/env python
import agithub
import unittest
class TestGithubObjectCreation(unittest.TestCase):
def test_user_pw(self):
gh = agithub.Github('korfuri', '1234')
self.assertTrue(gh is not None)
gh = agithub.Github(username='korfuri', password='1234')
self.assertTrue(gh is not None)
def test_token(self):
gh = agithub.Github(username='korfuri', token='deadbeef')
self.assertTrue(gh is not None)
gh = agithub.Github(token='deadbeef')
self.assertTrue(gh is not None)
def test_token_password(self):
with self.assertRaises(TypeError):
gh = agithub.Github(
username='korfuri', password='1234', token='deadbeef')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittests for Github object creation scenarios.
Simply run with `./agithub_test.py`.<commit_after>
|
#!/usr/bin/env python
import agithub
import unittest
class TestGithubObjectCreation(unittest.TestCase):
def test_user_pw(self):
gh = agithub.Github('korfuri', '1234')
self.assertTrue(gh is not None)
gh = agithub.Github(username='korfuri', password='1234')
self.assertTrue(gh is not None)
def test_token(self):
gh = agithub.Github(username='korfuri', token='deadbeef')
self.assertTrue(gh is not None)
gh = agithub.Github(token='deadbeef')
self.assertTrue(gh is not None)
def test_token_password(self):
with self.assertRaises(TypeError):
gh = agithub.Github(
username='korfuri', password='1234', token='deadbeef')
if __name__ == '__main__':
unittest.main()
|
Add unittests for Github object creation scenarios.
Simply run with `./agithub_test.py`.#!/usr/bin/env python
import agithub
import unittest
class TestGithubObjectCreation(unittest.TestCase):
def test_user_pw(self):
gh = agithub.Github('korfuri', '1234')
self.assertTrue(gh is not None)
gh = agithub.Github(username='korfuri', password='1234')
self.assertTrue(gh is not None)
def test_token(self):
gh = agithub.Github(username='korfuri', token='deadbeef')
self.assertTrue(gh is not None)
gh = agithub.Github(token='deadbeef')
self.assertTrue(gh is not None)
def test_token_password(self):
with self.assertRaises(TypeError):
gh = agithub.Github(
username='korfuri', password='1234', token='deadbeef')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittests for Github object creation scenarios.
Simply run with `./agithub_test.py`.<commit_after>#!/usr/bin/env python
import agithub
import unittest
class TestGithubObjectCreation(unittest.TestCase):
def test_user_pw(self):
gh = agithub.Github('korfuri', '1234')
self.assertTrue(gh is not None)
gh = agithub.Github(username='korfuri', password='1234')
self.assertTrue(gh is not None)
def test_token(self):
gh = agithub.Github(username='korfuri', token='deadbeef')
self.assertTrue(gh is not None)
gh = agithub.Github(token='deadbeef')
self.assertTrue(gh is not None)
def test_token_password(self):
with self.assertRaises(TypeError):
gh = agithub.Github(
username='korfuri', password='1234', token='deadbeef')
if __name__ == '__main__':
unittest.main()
|
|
989ea42f13d0e7b6952aaf84ee422851628a98ec
|
plugins/dicom_viewer/server/event_helper.py
|
plugins/dicom_viewer/server/event_helper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
from girder import events
class _EventHelper(object):
"""
Helper class to wait for plugin's data.process event handler to complete.
Usage:
with EventHelper('event.name') as helper:
self.model('upload').uploadFile(...)
handled = helper.wait()
"""
def __init__(self, eventName, timeout=10):
self.eventName = eventName
self.timeout = timeout
self.handlerName = 'HandlerCallback'
self.event = threading.Event()
def wait(self):
"""
Wait for the handler to complete.
:returns: True if the handler completes before the timeout or has
already been called.
"""
return self.event.wait(self.timeout)
def _callback(self, event):
self.event.set()
def __enter__(self):
events.bind(self.eventName, self.handlerName, self._callback)
return self
def __exit__(self, *args):
events.unbind(self.eventName, self.handlerName)
|
Add a helper class to wait for event handlers to complete
|
Add a helper class to wait for event handlers to complete
|
Python
|
apache-2.0
|
Kitware/girder,jbeezley/girder,RafaelPalomar/girder,Kitware/girder,data-exp-lab/girder,RafaelPalomar/girder,girder/girder,Xarthisius/girder,manthey/girder,RafaelPalomar/girder,data-exp-lab/girder,kotfic/girder,kotfic/girder,Xarthisius/girder,data-exp-lab/girder,girder/girder,Kitware/girder,Kitware/girder,manthey/girder,kotfic/girder,Xarthisius/girder,data-exp-lab/girder,girder/girder,manthey/girder,manthey/girder,RafaelPalomar/girder,jbeezley/girder,kotfic/girder,jbeezley/girder,girder/girder,kotfic/girder,jbeezley/girder,Xarthisius/girder,Xarthisius/girder,data-exp-lab/girder,RafaelPalomar/girder
|
Add a helper class to wait for event handlers to complete
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
from girder import events
class _EventHelper(object):
"""
Helper class to wait for plugin's data.process event handler to complete.
Usage:
with EventHelper('event.name') as helper:
self.model('upload').uploadFile(...)
handled = helper.wait()
"""
def __init__(self, eventName, timeout=10):
self.eventName = eventName
self.timeout = timeout
self.handlerName = 'HandlerCallback'
self.event = threading.Event()
def wait(self):
"""
Wait for the handler to complete.
:returns: True if the handler completes before the timeout or has
already been called.
"""
return self.event.wait(self.timeout)
def _callback(self, event):
self.event.set()
def __enter__(self):
events.bind(self.eventName, self.handlerName, self._callback)
return self
def __exit__(self, *args):
events.unbind(self.eventName, self.handlerName)
|
<commit_before><commit_msg>Add a helper class to wait for event handlers to complete<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
from girder import events
class _EventHelper(object):
"""
Helper class to wait for plugin's data.process event handler to complete.
Usage:
with EventHelper('event.name') as helper:
self.model('upload').uploadFile(...)
handled = helper.wait()
"""
def __init__(self, eventName, timeout=10):
self.eventName = eventName
self.timeout = timeout
self.handlerName = 'HandlerCallback'
self.event = threading.Event()
def wait(self):
"""
Wait for the handler to complete.
:returns: True if the handler completes before the timeout or has
already been called.
"""
return self.event.wait(self.timeout)
def _callback(self, event):
self.event.set()
def __enter__(self):
events.bind(self.eventName, self.handlerName, self._callback)
return self
def __exit__(self, *args):
events.unbind(self.eventName, self.handlerName)
|
Add a helper class to wait for event handlers to complete#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
from girder import events
class _EventHelper(object):
"""
Helper class to wait for plugin's data.process event handler to complete.
Usage:
with EventHelper('event.name') as helper:
self.model('upload').uploadFile(...)
handled = helper.wait()
"""
def __init__(self, eventName, timeout=10):
self.eventName = eventName
self.timeout = timeout
self.handlerName = 'HandlerCallback'
self.event = threading.Event()
def wait(self):
"""
Wait for the handler to complete.
:returns: True if the handler completes before the timeout or has
already been called.
"""
return self.event.wait(self.timeout)
def _callback(self, event):
self.event.set()
def __enter__(self):
events.bind(self.eventName, self.handlerName, self._callback)
return self
def __exit__(self, *args):
events.unbind(self.eventName, self.handlerName)
|
<commit_before><commit_msg>Add a helper class to wait for event handlers to complete<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import threading
from girder import events
class _EventHelper(object):
"""
Helper class to wait for plugin's data.process event handler to complete.
Usage:
with EventHelper('event.name') as helper:
self.model('upload').uploadFile(...)
handled = helper.wait()
"""
def __init__(self, eventName, timeout=10):
self.eventName = eventName
self.timeout = timeout
self.handlerName = 'HandlerCallback'
self.event = threading.Event()
def wait(self):
"""
Wait for the handler to complete.
:returns: True if the handler completes before the timeout or has
already been called.
"""
return self.event.wait(self.timeout)
def _callback(self, event):
self.event.set()
def __enter__(self):
events.bind(self.eventName, self.handlerName, self._callback)
return self
def __exit__(self, *args):
events.unbind(self.eventName, self.handlerName)
|
|
7fc860c0480c5269d812950bc3e738564cb5577c
|
tests/test_bot_filter.py
|
tests/test_bot_filter.py
|
import pytest
import responses
from instabot.api.config import API_URL
from .test_bot import TestBot
from .test_variables import TEST_USERNAME_INFO_ITEM
class TestBotFilter(TestBot):
@pytest.mark.parametrize('filter_users,filter_business_accounts,filter_verified_accounts,expected', [
(False, False, False, True),
(True, False, False, True),
(True, True, False, False),
(True, False, True, False),
(True, True, True, False),
])
@responses.activate
def test_check_user(self, filter_users, filter_business_accounts, filter_verified_accounts, expected):
self.BOT.filter_users = filter_users
self.BOT.filter_business_accounts = filter_business_accounts
self.BOT.filter_verified_accounts = filter_verified_accounts
self.BOT.following = [1]
user_id = TEST_USERNAME_INFO_ITEM['pk']
TEST_USERNAME_INFO_ITEM['is_verified'] = True
TEST_USERNAME_INFO_ITEM['is_business'] = True
response_data = {
'status': 'ok',
'user': TEST_USERNAME_INFO_ITEM
}
responses.add(
responses.GET, '{API_URL}users/{user_id}/info/'.format(
API_URL=API_URL, user_id=user_id
), status=200, json=response_data)
result = self.BOT.check_user(user_id)
assert result == expected
|
Add test on check user with filter users, business and verified
|
Add test on check user with filter users, business and verified
|
Python
|
apache-2.0
|
instagrambot/instabot,instagrambot/instabot,ohld/instabot
|
Add test on check user with filter users, business and verified
|
import pytest
import responses
from instabot.api.config import API_URL
from .test_bot import TestBot
from .test_variables import TEST_USERNAME_INFO_ITEM
class TestBotFilter(TestBot):
@pytest.mark.parametrize('filter_users,filter_business_accounts,filter_verified_accounts,expected', [
(False, False, False, True),
(True, False, False, True),
(True, True, False, False),
(True, False, True, False),
(True, True, True, False),
])
@responses.activate
def test_check_user(self, filter_users, filter_business_accounts, filter_verified_accounts, expected):
self.BOT.filter_users = filter_users
self.BOT.filter_business_accounts = filter_business_accounts
self.BOT.filter_verified_accounts = filter_verified_accounts
self.BOT.following = [1]
user_id = TEST_USERNAME_INFO_ITEM['pk']
TEST_USERNAME_INFO_ITEM['is_verified'] = True
TEST_USERNAME_INFO_ITEM['is_business'] = True
response_data = {
'status': 'ok',
'user': TEST_USERNAME_INFO_ITEM
}
responses.add(
responses.GET, '{API_URL}users/{user_id}/info/'.format(
API_URL=API_URL, user_id=user_id
), status=200, json=response_data)
result = self.BOT.check_user(user_id)
assert result == expected
|
<commit_before><commit_msg>Add test on check user with filter users, business and verified<commit_after>
|
import pytest
import responses
from instabot.api.config import API_URL
from .test_bot import TestBot
from .test_variables import TEST_USERNAME_INFO_ITEM
class TestBotFilter(TestBot):
@pytest.mark.parametrize('filter_users,filter_business_accounts,filter_verified_accounts,expected', [
(False, False, False, True),
(True, False, False, True),
(True, True, False, False),
(True, False, True, False),
(True, True, True, False),
])
@responses.activate
def test_check_user(self, filter_users, filter_business_accounts, filter_verified_accounts, expected):
self.BOT.filter_users = filter_users
self.BOT.filter_business_accounts = filter_business_accounts
self.BOT.filter_verified_accounts = filter_verified_accounts
self.BOT.following = [1]
user_id = TEST_USERNAME_INFO_ITEM['pk']
TEST_USERNAME_INFO_ITEM['is_verified'] = True
TEST_USERNAME_INFO_ITEM['is_business'] = True
response_data = {
'status': 'ok',
'user': TEST_USERNAME_INFO_ITEM
}
responses.add(
responses.GET, '{API_URL}users/{user_id}/info/'.format(
API_URL=API_URL, user_id=user_id
), status=200, json=response_data)
result = self.BOT.check_user(user_id)
assert result == expected
|
Add test on check user with filter users, business and verifiedimport pytest
import responses
from instabot.api.config import API_URL
from .test_bot import TestBot
from .test_variables import TEST_USERNAME_INFO_ITEM
class TestBotFilter(TestBot):
@pytest.mark.parametrize('filter_users,filter_business_accounts,filter_verified_accounts,expected', [
(False, False, False, True),
(True, False, False, True),
(True, True, False, False),
(True, False, True, False),
(True, True, True, False),
])
@responses.activate
def test_check_user(self, filter_users, filter_business_accounts, filter_verified_accounts, expected):
self.BOT.filter_users = filter_users
self.BOT.filter_business_accounts = filter_business_accounts
self.BOT.filter_verified_accounts = filter_verified_accounts
self.BOT.following = [1]
user_id = TEST_USERNAME_INFO_ITEM['pk']
TEST_USERNAME_INFO_ITEM['is_verified'] = True
TEST_USERNAME_INFO_ITEM['is_business'] = True
response_data = {
'status': 'ok',
'user': TEST_USERNAME_INFO_ITEM
}
responses.add(
responses.GET, '{API_URL}users/{user_id}/info/'.format(
API_URL=API_URL, user_id=user_id
), status=200, json=response_data)
result = self.BOT.check_user(user_id)
assert result == expected
|
<commit_before><commit_msg>Add test on check user with filter users, business and verified<commit_after>import pytest
import responses
from instabot.api.config import API_URL
from .test_bot import TestBot
from .test_variables import TEST_USERNAME_INFO_ITEM
class TestBotFilter(TestBot):
@pytest.mark.parametrize('filter_users,filter_business_accounts,filter_verified_accounts,expected', [
(False, False, False, True),
(True, False, False, True),
(True, True, False, False),
(True, False, True, False),
(True, True, True, False),
])
@responses.activate
def test_check_user(self, filter_users, filter_business_accounts, filter_verified_accounts, expected):
self.BOT.filter_users = filter_users
self.BOT.filter_business_accounts = filter_business_accounts
self.BOT.filter_verified_accounts = filter_verified_accounts
self.BOT.following = [1]
user_id = TEST_USERNAME_INFO_ITEM['pk']
TEST_USERNAME_INFO_ITEM['is_verified'] = True
TEST_USERNAME_INFO_ITEM['is_business'] = True
response_data = {
'status': 'ok',
'user': TEST_USERNAME_INFO_ITEM
}
responses.add(
responses.GET, '{API_URL}users/{user_id}/info/'.format(
API_URL=API_URL, user_id=user_id
), status=200, json=response_data)
result = self.BOT.check_user(user_id)
assert result == expected
|
|
a2c006a2ce5524a9d4afdd9086fa3c76704cae08
|
bashlint.py
|
bashlint.py
|
#!/usr/bin/env python
import os
from fnmatch import fnmatch
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is not specified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
class StyleGuide(object):
"""Bash style guide."""
FILE_PATTERNS = ("*.sh", )
def check_paths(self, paths=None):
"""Run all checks on the paths."""
try:
for path in paths or ["."]:
if os.path.isdir(path):
self._check_dir(path)
except KeyboardInterrupt:
print("... stopped")
def _check_dir(self, path):
"""Check all files in this directory and all subdirectories."""
for root, dirs, files in os.walk(path):
for filename in files:
if filename_match(filename, self.FILE_PATTERNS):
self._run_checks(os.path.join(root, filename))
def _run_checks(self, filename):
"""Run checks for a given file."""
print("Checking %s file." % filename)
def main():
guide = StyleGuide()
guide.check_paths()
if __name__ == "__main__":
main()
|
Implement shell files searching by pattern
|
Implement shell files searching by pattern
Change-Id: I6bd6743a1b9f15fd704dfa38b1cce34b5948f0df
|
Python
|
mit
|
skudriashev/bashlint
|
Implement shell files searching by pattern
Change-Id: I6bd6743a1b9f15fd704dfa38b1cce34b5948f0df
|
#!/usr/bin/env python
import os
from fnmatch import fnmatch
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is not specified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
class StyleGuide(object):
"""Bash style guide."""
FILE_PATTERNS = ("*.sh", )
def check_paths(self, paths=None):
"""Run all checks on the paths."""
try:
for path in paths or ["."]:
if os.path.isdir(path):
self._check_dir(path)
except KeyboardInterrupt:
print("... stopped")
def _check_dir(self, path):
"""Check all files in this directory and all subdirectories."""
for root, dirs, files in os.walk(path):
for filename in files:
if filename_match(filename, self.FILE_PATTERNS):
self._run_checks(os.path.join(root, filename))
def _run_checks(self, filename):
"""Run checks for a given file."""
print("Checking %s file." % filename)
def main():
guide = StyleGuide()
guide.check_paths()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement shell files searching by pattern
Change-Id: I6bd6743a1b9f15fd704dfa38b1cce34b5948f0df<commit_after>
|
#!/usr/bin/env python
import os
from fnmatch import fnmatch
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is not specified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
class StyleGuide(object):
"""Bash style guide."""
FILE_PATTERNS = ("*.sh", )
def check_paths(self, paths=None):
"""Run all checks on the paths."""
try:
for path in paths or ["."]:
if os.path.isdir(path):
self._check_dir(path)
except KeyboardInterrupt:
print("... stopped")
def _check_dir(self, path):
"""Check all files in this directory and all subdirectories."""
for root, dirs, files in os.walk(path):
for filename in files:
if filename_match(filename, self.FILE_PATTERNS):
self._run_checks(os.path.join(root, filename))
def _run_checks(self, filename):
"""Run checks for a given file."""
print("Checking %s file." % filename)
def main():
guide = StyleGuide()
guide.check_paths()
if __name__ == "__main__":
main()
|
Implement shell files searching by pattern
Change-Id: I6bd6743a1b9f15fd704dfa38b1cce34b5948f0df#!/usr/bin/env python
import os
from fnmatch import fnmatch
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is not specified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
class StyleGuide(object):
"""Bash style guide."""
FILE_PATTERNS = ("*.sh", )
def check_paths(self, paths=None):
"""Run all checks on the paths."""
try:
for path in paths or ["."]:
if os.path.isdir(path):
self._check_dir(path)
except KeyboardInterrupt:
print("... stopped")
def _check_dir(self, path):
"""Check all files in this directory and all subdirectories."""
for root, dirs, files in os.walk(path):
for filename in files:
if filename_match(filename, self.FILE_PATTERNS):
self._run_checks(os.path.join(root, filename))
def _run_checks(self, filename):
"""Run checks for a given file."""
print("Checking %s file." % filename)
def main():
guide = StyleGuide()
guide.check_paths()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement shell files searching by pattern
Change-Id: I6bd6743a1b9f15fd704dfa38b1cce34b5948f0df<commit_after>#!/usr/bin/env python
import os
from fnmatch import fnmatch
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is not specified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
class StyleGuide(object):
"""Bash style guide."""
FILE_PATTERNS = ("*.sh", )
def check_paths(self, paths=None):
"""Run all checks on the paths."""
try:
for path in paths or ["."]:
if os.path.isdir(path):
self._check_dir(path)
except KeyboardInterrupt:
print("... stopped")
def _check_dir(self, path):
"""Check all files in this directory and all subdirectories."""
for root, dirs, files in os.walk(path):
for filename in files:
if filename_match(filename, self.FILE_PATTERNS):
self._run_checks(os.path.join(root, filename))
def _run_checks(self, filename):
"""Run checks for a given file."""
print("Checking %s file." % filename)
def main():
guide = StyleGuide()
guide.check_paths()
if __name__ == "__main__":
main()
|
|
cbc08671019a62e9840d6f0decf9b883eb2eeec4
|
binary_search.py
|
binary_search.py
|
from __future__ import division
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
|
Add Python binary search implementation (recursive and iterative)
|
Add Python binary search implementation (recursive and iterative)
|
Python
|
mit
|
gg/algorithms,gg/algorithms,gg/algorithms
|
Add Python binary search implementation (recursive and iterative)
|
from __future__ import division
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
|
<commit_before><commit_msg>Add Python binary search implementation (recursive and iterative)<commit_after>
|
from __future__ import division
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
|
Add Python binary search implementation (recursive and iterative)from __future__ import division
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
|
<commit_before><commit_msg>Add Python binary search implementation (recursive and iterative)<commit_after>from __future__ import division
def recursive_binary_search(sorted_sequence, key, start=0, end=None):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
end = end or len(sorted_sequence)
if (end - start) < 0:
return None
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
return recursive_binary_search(sorted_sequence, key, middle + 1, end)
else:
return recursive_binary_search(sorted_sequence, key, start, middle - 1)
def iterative_binary_search(sorted_sequence, key):
'''Returns the index of `key` in the `sorted_sequence`, or None if `key` is
not in `sorted_sequence`.'''
start = 0
end = len(sorted_sequence)
while start < end:
middle = start + ((end - start) // 2)
if sorted_sequence[middle] == key:
return middle
elif sorted_sequence[middle] < key:
start = middle + 1
else:
end = middle
return None
if __name__ == '__main__':
seq = [1, 1, 2, 5, 9, 11, 11, 11, 12, 18, 29, 37, 38, 40, 67, 78, 94, 94]
assert(recursive_binary_search(seq, 12) == 8)
assert(recursive_binary_search(seq, 13) == None)
assert(iterative_binary_search(seq, 12) == 8)
assert(iterative_binary_search(seq, 13) == None)
|
|
93c8b63df4794765164f3a50f48230121cb334c0
|
python/np_types.py
|
python/np_types.py
|
#!/usr/in/env python
# -*- coding: utf-8 -*-
import numpy as np
# TODO: generate following sizes with templates
state_size = 4
input_size = 2
output_size = 2
second_order_size = 2
natural_t = np.int32
real_t = np.float64
state_t = (real_t, (state_size, 1))
input_t = (real_t, (input_size, 1))
output_t = (real_t, (output_size, 1))
state_matrix_t = (real_t, (state_size, state_size))
second_order_matrix_t = (real_t, (second_order_size, second_order_size))
lqr_gain_matrix_t = (real_t, (input_size, state_size))
kalman_gain_matrix_t = (real_t, (state_size, output_size))
symmetric_state_matrix_t = state_matrix_t
symmetric_output_matrix_t = (real_t, (output_size, output_size))
symmetric_input_matrix_t = (real_t, (input_size, input_size))
bicycle_t = [
('v', real_t), # bicycle forward speed
('dt', real_t), # discrete time system sample time
('M',) + second_order_matrix_t, # mass matrix
('C1',) + second_order_matrix_t, # v proportional damping matrix
('K0',) + second_order_matrix_t, # v independent stiffness matrix
('K2',) + second_order_matrix_t] # v**2 prop. stiffness matrix
kalman_t = [
('x',) + state_t, # state estimate
('P',) + symmetric_state_matrix_t, # error covariance
('Q',) + symmetric_state_matrix_t, # process noise covariance
('R',) + symmetric_input_matrix_t, # measurement noise covariance
('K',) + kalman_gain_matrix_t] # Kalman gain
lqr_t = [
('n', natural_t), # horizon length (in samples)
('r',) + state_t, # state reference
('Q',) + symmetric_state_matrix_t, # state cost
('R',) + symmetric_input_matrix_t, # input cost
('P',) + symmetric_state_matrix_t, # horizon cost
('K',) + lqr_gain_matrix_t] # LQR gain
sample_t = [
('ts', natural_t), # timestamp
('bicycle', bicycle_t),
('kalman', kalman_t),
('lqr', lqr_t),
('x',) + state_t, # system state
('u',) + input_t, # system input
('y',) + output_t, # system output
('z',) + output_t] # system output with noise
|
Add Python data types for sample data
|
Add Python data types for sample data
|
Python
|
bsd-2-clause
|
oliverlee/bicycle,oliverlee/bicycle
|
Add Python data types for sample data
|
#!/usr/in/env python
# -*- coding: utf-8 -*-
import numpy as np
# TODO: generate following sizes with templates
state_size = 4
input_size = 2
output_size = 2
second_order_size = 2
natural_t = np.int32
real_t = np.float64
state_t = (real_t, (state_size, 1))
input_t = (real_t, (input_size, 1))
output_t = (real_t, (output_size, 1))
state_matrix_t = (real_t, (state_size, state_size))
second_order_matrix_t = (real_t, (second_order_size, second_order_size))
lqr_gain_matrix_t = (real_t, (input_size, state_size))
kalman_gain_matrix_t = (real_t, (state_size, output_size))
symmetric_state_matrix_t = state_matrix_t
symmetric_output_matrix_t = (real_t, (output_size, output_size))
symmetric_input_matrix_t = (real_t, (input_size, input_size))
bicycle_t = [
('v', real_t), # bicycle forward speed
('dt', real_t), # discrete time system sample time
('M',) + second_order_matrix_t, # mass matrix
('C1',) + second_order_matrix_t, # v proportional damping matrix
('K0',) + second_order_matrix_t, # v independent stiffness matrix
('K2',) + second_order_matrix_t] # v**2 prop. stiffness matrix
kalman_t = [
('x',) + state_t, # state estimate
('P',) + symmetric_state_matrix_t, # error covariance
('Q',) + symmetric_state_matrix_t, # process noise covariance
('R',) + symmetric_input_matrix_t, # measurement noise covariance
('K',) + kalman_gain_matrix_t] # Kalman gain
lqr_t = [
('n', natural_t), # horizon length (in samples)
('r',) + state_t, # state reference
('Q',) + symmetric_state_matrix_t, # state cost
('R',) + symmetric_input_matrix_t, # input cost
('P',) + symmetric_state_matrix_t, # horizon cost
('K',) + lqr_gain_matrix_t] # LQR gain
sample_t = [
('ts', natural_t), # timestamp
('bicycle', bicycle_t),
('kalman', kalman_t),
('lqr', lqr_t),
('x',) + state_t, # system state
('u',) + input_t, # system input
('y',) + output_t, # system output
('z',) + output_t] # system output with noise
|
<commit_before><commit_msg>Add Python data types for sample data<commit_after>
|
#!/usr/in/env python
# -*- coding: utf-8 -*-
import numpy as np
# TODO: generate following sizes with templates
state_size = 4
input_size = 2
output_size = 2
second_order_size = 2
natural_t = np.int32
real_t = np.float64
state_t = (real_t, (state_size, 1))
input_t = (real_t, (input_size, 1))
output_t = (real_t, (output_size, 1))
state_matrix_t = (real_t, (state_size, state_size))
second_order_matrix_t = (real_t, (second_order_size, second_order_size))
lqr_gain_matrix_t = (real_t, (input_size, state_size))
kalman_gain_matrix_t = (real_t, (state_size, output_size))
symmetric_state_matrix_t = state_matrix_t
symmetric_output_matrix_t = (real_t, (output_size, output_size))
symmetric_input_matrix_t = (real_t, (input_size, input_size))
bicycle_t = [
('v', real_t), # bicycle forward speed
('dt', real_t), # discrete time system sample time
('M',) + second_order_matrix_t, # mass matrix
('C1',) + second_order_matrix_t, # v proportional damping matrix
('K0',) + second_order_matrix_t, # v independent stiffness matrix
('K2',) + second_order_matrix_t] # v**2 prop. stiffness matrix
kalman_t = [
('x',) + state_t, # state estimate
('P',) + symmetric_state_matrix_t, # error covariance
('Q',) + symmetric_state_matrix_t, # process noise covariance
('R',) + symmetric_input_matrix_t, # measurement noise covariance
('K',) + kalman_gain_matrix_t] # Kalman gain
lqr_t = [
('n', natural_t), # horizon length (in samples)
('r',) + state_t, # state reference
('Q',) + symmetric_state_matrix_t, # state cost
('R',) + symmetric_input_matrix_t, # input cost
('P',) + symmetric_state_matrix_t, # horizon cost
('K',) + lqr_gain_matrix_t] # LQR gain
sample_t = [
('ts', natural_t), # timestamp
('bicycle', bicycle_t),
('kalman', kalman_t),
('lqr', lqr_t),
('x',) + state_t, # system state
('u',) + input_t, # system input
('y',) + output_t, # system output
('z',) + output_t] # system output with noise
|
Add Python data types for sample data#!/usr/in/env python
# -*- coding: utf-8 -*-
import numpy as np
# TODO: generate following sizes with templates
state_size = 4
input_size = 2
output_size = 2
second_order_size = 2
natural_t = np.int32
real_t = np.float64
state_t = (real_t, (state_size, 1))
input_t = (real_t, (input_size, 1))
output_t = (real_t, (output_size, 1))
state_matrix_t = (real_t, (state_size, state_size))
second_order_matrix_t = (real_t, (second_order_size, second_order_size))
lqr_gain_matrix_t = (real_t, (input_size, state_size))
kalman_gain_matrix_t = (real_t, (state_size, output_size))
symmetric_state_matrix_t = state_matrix_t
symmetric_output_matrix_t = (real_t, (output_size, output_size))
symmetric_input_matrix_t = (real_t, (input_size, input_size))
bicycle_t = [
('v', real_t), # bicycle forward speed
('dt', real_t), # discrete time system sample time
('M',) + second_order_matrix_t, # mass matrix
('C1',) + second_order_matrix_t, # v proportional damping matrix
('K0',) + second_order_matrix_t, # v independent stiffness matrix
('K2',) + second_order_matrix_t] # v**2 prop. stiffness matrix
kalman_t = [
('x',) + state_t, # state estimate
('P',) + symmetric_state_matrix_t, # error covariance
('Q',) + symmetric_state_matrix_t, # process noise covariance
('R',) + symmetric_input_matrix_t, # measurement noise covariance
('K',) + kalman_gain_matrix_t] # Kalman gain
lqr_t = [
('n', natural_t), # horizon length (in samples)
('r',) + state_t, # state reference
('Q',) + symmetric_state_matrix_t, # state cost
('R',) + symmetric_input_matrix_t, # input cost
('P',) + symmetric_state_matrix_t, # horizon cost
('K',) + lqr_gain_matrix_t] # LQR gain
sample_t = [
('ts', natural_t), # timestamp
('bicycle', bicycle_t),
('kalman', kalman_t),
('lqr', lqr_t),
('x',) + state_t, # system state
('u',) + input_t, # system input
('y',) + output_t, # system output
('z',) + output_t] # system output with noise
|
<commit_before><commit_msg>Add Python data types for sample data<commit_after>#!/usr/in/env python
# -*- coding: utf-8 -*-
import numpy as np
# TODO: generate following sizes with templates
state_size = 4
input_size = 2
output_size = 2
second_order_size = 2
natural_t = np.int32
real_t = np.float64
state_t = (real_t, (state_size, 1))
input_t = (real_t, (input_size, 1))
output_t = (real_t, (output_size, 1))
state_matrix_t = (real_t, (state_size, state_size))
second_order_matrix_t = (real_t, (second_order_size, second_order_size))
lqr_gain_matrix_t = (real_t, (input_size, state_size))
kalman_gain_matrix_t = (real_t, (state_size, output_size))
symmetric_state_matrix_t = state_matrix_t
symmetric_output_matrix_t = (real_t, (output_size, output_size))
symmetric_input_matrix_t = (real_t, (input_size, input_size))
bicycle_t = [
('v', real_t), # bicycle forward speed
('dt', real_t), # discrete time system sample time
('M',) + second_order_matrix_t, # mass matrix
('C1',) + second_order_matrix_t, # v proportional damping matrix
('K0',) + second_order_matrix_t, # v independent stiffness matrix
('K2',) + second_order_matrix_t] # v**2 prop. stiffness matrix
kalman_t = [
('x',) + state_t, # state estimate
('P',) + symmetric_state_matrix_t, # error covariance
('Q',) + symmetric_state_matrix_t, # process noise covariance
('R',) + symmetric_input_matrix_t, # measurement noise covariance
('K',) + kalman_gain_matrix_t] # Kalman gain
lqr_t = [
('n', natural_t), # horizon length (in samples)
('r',) + state_t, # state reference
('Q',) + symmetric_state_matrix_t, # state cost
('R',) + symmetric_input_matrix_t, # input cost
('P',) + symmetric_state_matrix_t, # horizon cost
('K',) + lqr_gain_matrix_t] # LQR gain
sample_t = [
('ts', natural_t), # timestamp
('bicycle', bicycle_t),
('kalman', kalman_t),
('lqr', lqr_t),
('x',) + state_t, # system state
('u',) + input_t, # system input
('y',) + output_t, # system output
('z',) + output_t] # system output with noise
|
|
346aa9624092483de1fe28878c90ab7fdbaa543f
|
organize/migrations/0005_auto_20170120_1810.py
|
organize/migrations/0005_auto_20170120_1810.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organize', '0004_auto_20170116_1808'),
]
operations = [
migrations.AlterField(
model_name='coorganizer',
name='last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AlterField(
model_name='eventapplication',
name='main_organizer_last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
]
|
Add defaults to last name of organizers
|
Add defaults to last name of organizers
|
Python
|
bsd-3-clause
|
patjouk/djangogirls,DjangoGirls/djangogirls,DjangoGirls/djangogirls,DjangoGirls/djangogirls,patjouk/djangogirls,patjouk/djangogirls,patjouk/djangogirls
|
Add defaults to last name of organizers
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organize', '0004_auto_20170116_1808'),
]
operations = [
migrations.AlterField(
model_name='coorganizer',
name='last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AlterField(
model_name='eventapplication',
name='main_organizer_last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
]
|
<commit_before><commit_msg>Add defaults to last name of organizers<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organize', '0004_auto_20170116_1808'),
]
operations = [
migrations.AlterField(
model_name='coorganizer',
name='last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AlterField(
model_name='eventapplication',
name='main_organizer_last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
]
|
Add defaults to last name of organizers# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organize', '0004_auto_20170116_1808'),
]
operations = [
migrations.AlterField(
model_name='coorganizer',
name='last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AlterField(
model_name='eventapplication',
name='main_organizer_last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
]
|
<commit_before><commit_msg>Add defaults to last name of organizers<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-20 18:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organize', '0004_auto_20170116_1808'),
]
operations = [
migrations.AlterField(
model_name='coorganizer',
name='last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
migrations.AlterField(
model_name='eventapplication',
name='main_organizer_last_name',
field=models.CharField(blank=True, default='', max_length=30),
),
]
|
|
6b8554bca4be17bc2b9fdcf2bb2f349900d2b4cb
|
rdmo/core/management/commands/make_theme.py
|
rdmo/core/management/commands/make_theme.py
|
from django.apps import apps
from django.core.management.base import BaseCommand
import os.path
import re
from shutil import copyfile
class Command(BaseCommand):
def get_folders(self):
rdmo_core = os.path.join(apps.get_app_config('rdmo').path, 'core')
rdmo_app_theme = os.path.join(os.getcwd(), 'theme')
rdmo_app_config = os.path.join(os.getcwd(), 'config', 'settings', 'local.py')
return rdmo_core, rdmo_app_theme, rdmo_app_config
def mkdir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def copy(self, source_file):
fol = self.get_folders()
source_file = os.path.join(fol[0], source_file)
target_file = source_file.replace(fol[0], fol[1])
print('Copying ' + source_file + ' -> ' + target_file)
self.mkdir(re.search(r'.*(?=\/)', target_file).group(0))
copyfile(source_file, target_file)
def enable_theme(self):
print('Enabling theme by adding the necessary config line')
rxScheme = r'.*?THEME_DIR.*?=.*?[a-z]'
replaced = False
new_arr = []
fol = self.get_folders()
with open(fol[2]) as f:
content = f.read().splitlines()
for line in content:
append = True
if bool(re.search(rxScheme, line)) is True and replaced is True:
append = False
if bool(re.search(rxScheme, line)) is True and replaced is False:
line = 'THEME_DIR = os.path.join(BASE_DIR, \'theme\')'
replaced = True
if append is True:
new_arr.append(line)
if bool(re.search(rxScheme, line)) is True:
replaced = True
self.write_file(fol[2], new_arr)
def write_file(self, filename, data):
with open(filename, 'w') as fp:
for line in data:
fp.write(line + '\n')
def handle(self, *args, **options):
self.copy(os.path.join('static', 'core', 'css', 'variables.scss'))
self.copy(os.path.join('templates', 'core', 'base.html'))
self.copy(os.path.join('templates', 'core', 'base_head.html'))
self.copy(os.path.join('templates', 'core', 'base_navigation.html'))
self.copy(os.path.join('templates', 'core', 'base_footer.html'))
self.enable_theme()
print('Done')
|
Add make theme manage script
|
Add make theme manage script
|
Python
|
apache-2.0
|
rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo
|
Add make theme manage script
|
from django.apps import apps
from django.core.management.base import BaseCommand
import os.path
import re
from shutil import copyfile
class Command(BaseCommand):
def get_folders(self):
rdmo_core = os.path.join(apps.get_app_config('rdmo').path, 'core')
rdmo_app_theme = os.path.join(os.getcwd(), 'theme')
rdmo_app_config = os.path.join(os.getcwd(), 'config', 'settings', 'local.py')
return rdmo_core, rdmo_app_theme, rdmo_app_config
def mkdir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def copy(self, source_file):
fol = self.get_folders()
source_file = os.path.join(fol[0], source_file)
target_file = source_file.replace(fol[0], fol[1])
print('Copying ' + source_file + ' -> ' + target_file)
self.mkdir(re.search(r'.*(?=\/)', target_file).group(0))
copyfile(source_file, target_file)
def enable_theme(self):
print('Enabling theme by adding the necessary config line')
rxScheme = r'.*?THEME_DIR.*?=.*?[a-z]'
replaced = False
new_arr = []
fol = self.get_folders()
with open(fol[2]) as f:
content = f.read().splitlines()
for line in content:
append = True
if bool(re.search(rxScheme, line)) is True and replaced is True:
append = False
if bool(re.search(rxScheme, line)) is True and replaced is False:
line = 'THEME_DIR = os.path.join(BASE_DIR, \'theme\')'
replaced = True
if append is True:
new_arr.append(line)
if bool(re.search(rxScheme, line)) is True:
replaced = True
self.write_file(fol[2], new_arr)
def write_file(self, filename, data):
with open(filename, 'w') as fp:
for line in data:
fp.write(line + '\n')
def handle(self, *args, **options):
self.copy(os.path.join('static', 'core', 'css', 'variables.scss'))
self.copy(os.path.join('templates', 'core', 'base.html'))
self.copy(os.path.join('templates', 'core', 'base_head.html'))
self.copy(os.path.join('templates', 'core', 'base_navigation.html'))
self.copy(os.path.join('templates', 'core', 'base_footer.html'))
self.enable_theme()
print('Done')
|
<commit_before><commit_msg>Add make theme manage script<commit_after>
|
from django.apps import apps
from django.core.management.base import BaseCommand
import os.path
import re
from shutil import copyfile
class Command(BaseCommand):
def get_folders(self):
rdmo_core = os.path.join(apps.get_app_config('rdmo').path, 'core')
rdmo_app_theme = os.path.join(os.getcwd(), 'theme')
rdmo_app_config = os.path.join(os.getcwd(), 'config', 'settings', 'local.py')
return rdmo_core, rdmo_app_theme, rdmo_app_config
def mkdir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def copy(self, source_file):
fol = self.get_folders()
source_file = os.path.join(fol[0], source_file)
target_file = source_file.replace(fol[0], fol[1])
print('Copying ' + source_file + ' -> ' + target_file)
self.mkdir(re.search(r'.*(?=\/)', target_file).group(0))
copyfile(source_file, target_file)
def enable_theme(self):
print('Enabling theme by adding the necessary config line')
rxScheme = r'.*?THEME_DIR.*?=.*?[a-z]'
replaced = False
new_arr = []
fol = self.get_folders()
with open(fol[2]) as f:
content = f.read().splitlines()
for line in content:
append = True
if bool(re.search(rxScheme, line)) is True and replaced is True:
append = False
if bool(re.search(rxScheme, line)) is True and replaced is False:
line = 'THEME_DIR = os.path.join(BASE_DIR, \'theme\')'
replaced = True
if append is True:
new_arr.append(line)
if bool(re.search(rxScheme, line)) is True:
replaced = True
self.write_file(fol[2], new_arr)
def write_file(self, filename, data):
with open(filename, 'w') as fp:
for line in data:
fp.write(line + '\n')
def handle(self, *args, **options):
self.copy(os.path.join('static', 'core', 'css', 'variables.scss'))
self.copy(os.path.join('templates', 'core', 'base.html'))
self.copy(os.path.join('templates', 'core', 'base_head.html'))
self.copy(os.path.join('templates', 'core', 'base_navigation.html'))
self.copy(os.path.join('templates', 'core', 'base_footer.html'))
self.enable_theme()
print('Done')
|
Add make theme manage scriptfrom django.apps import apps
from django.core.management.base import BaseCommand
import os.path
import re
from shutil import copyfile
class Command(BaseCommand):
def get_folders(self):
rdmo_core = os.path.join(apps.get_app_config('rdmo').path, 'core')
rdmo_app_theme = os.path.join(os.getcwd(), 'theme')
rdmo_app_config = os.path.join(os.getcwd(), 'config', 'settings', 'local.py')
return rdmo_core, rdmo_app_theme, rdmo_app_config
def mkdir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def copy(self, source_file):
fol = self.get_folders()
source_file = os.path.join(fol[0], source_file)
target_file = source_file.replace(fol[0], fol[1])
print('Copying ' + source_file + ' -> ' + target_file)
self.mkdir(re.search(r'.*(?=\/)', target_file).group(0))
copyfile(source_file, target_file)
def enable_theme(self):
print('Enabling theme by adding the necessary config line')
rxScheme = r'.*?THEME_DIR.*?=.*?[a-z]'
replaced = False
new_arr = []
fol = self.get_folders()
with open(fol[2]) as f:
content = f.read().splitlines()
for line in content:
append = True
if bool(re.search(rxScheme, line)) is True and replaced is True:
append = False
if bool(re.search(rxScheme, line)) is True and replaced is False:
line = 'THEME_DIR = os.path.join(BASE_DIR, \'theme\')'
replaced = True
if append is True:
new_arr.append(line)
if bool(re.search(rxScheme, line)) is True:
replaced = True
self.write_file(fol[2], new_arr)
def write_file(self, filename, data):
with open(filename, 'w') as fp:
for line in data:
fp.write(line + '\n')
def handle(self, *args, **options):
self.copy(os.path.join('static', 'core', 'css', 'variables.scss'))
self.copy(os.path.join('templates', 'core', 'base.html'))
self.copy(os.path.join('templates', 'core', 'base_head.html'))
self.copy(os.path.join('templates', 'core', 'base_navigation.html'))
self.copy(os.path.join('templates', 'core', 'base_footer.html'))
self.enable_theme()
print('Done')
|
<commit_before><commit_msg>Add make theme manage script<commit_after>from django.apps import apps
from django.core.management.base import BaseCommand
import os.path
import re
from shutil import copyfile
class Command(BaseCommand):
def get_folders(self):
rdmo_core = os.path.join(apps.get_app_config('rdmo').path, 'core')
rdmo_app_theme = os.path.join(os.getcwd(), 'theme')
rdmo_app_config = os.path.join(os.getcwd(), 'config', 'settings', 'local.py')
return rdmo_core, rdmo_app_theme, rdmo_app_config
def mkdir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory)
def copy(self, source_file):
fol = self.get_folders()
source_file = os.path.join(fol[0], source_file)
target_file = source_file.replace(fol[0], fol[1])
print('Copying ' + source_file + ' -> ' + target_file)
self.mkdir(re.search(r'.*(?=\/)', target_file).group(0))
copyfile(source_file, target_file)
def enable_theme(self):
print('Enabling theme by adding the necessary config line')
rxScheme = r'.*?THEME_DIR.*?=.*?[a-z]'
replaced = False
new_arr = []
fol = self.get_folders()
with open(fol[2]) as f:
content = f.read().splitlines()
for line in content:
append = True
if bool(re.search(rxScheme, line)) is True and replaced is True:
append = False
if bool(re.search(rxScheme, line)) is True and replaced is False:
line = 'THEME_DIR = os.path.join(BASE_DIR, \'theme\')'
replaced = True
if append is True:
new_arr.append(line)
if bool(re.search(rxScheme, line)) is True:
replaced = True
self.write_file(fol[2], new_arr)
def write_file(self, filename, data):
with open(filename, 'w') as fp:
for line in data:
fp.write(line + '\n')
def handle(self, *args, **options):
self.copy(os.path.join('static', 'core', 'css', 'variables.scss'))
self.copy(os.path.join('templates', 'core', 'base.html'))
self.copy(os.path.join('templates', 'core', 'base_head.html'))
self.copy(os.path.join('templates', 'core', 'base_navigation.html'))
self.copy(os.path.join('templates', 'core', 'base_footer.html'))
self.enable_theme()
print('Done')
|
|
12ff38555ac735fca4a7585767b006dbf5e15ca9
|
groups/tests/test_apps_base.py
|
groups/tests/test_apps_base.py
|
from unittest import mock
from django.test import TestCase
import groups # Needed for instantiating AppConfig classes.
from groups import _apps_base
from groups.admin import GroupAdmin
from groups.models import Group
class TestAdminRegisteringAppConfig(TestCase):
def setUp(self):
"""
Create an AdminRegisteringAppConfig pointed at whichever models we have handy.
"""
self.config = _apps_base.AdminRegisteringAppConfig('groups', groups)
def test_register_admin_classes(self):
"""
Assert that admin.site.register() is called based on the value of admin_classes.
"""
self.config.admin_classes = {
'Group': 'groups.admin.GroupAdmin',
}
# Mock out get_model() because otherwise the AppConfig will try to check if it's
# prepared the models already, which it hasn't since we're shortcutting the
# process to get a better unit test. Assert that it's called with the correct
# input later on to make sure we're not cheating.
with mock.patch.object(self.config, 'get_model', return_value=Group) as get_model:
with mock.patch('django.contrib.admin.site.register') as site_register:
self.config._register_admin_classes()
get_model.assert_called_once_with('Group')
site_register.assert_called_once_with(Group, GroupAdmin)
def test_ready(self):
"""
Assert that ready() calls _register_admin_classes() and the superclass's ready().
"""
with mock.patch('django.apps.config.AppConfig.ready') as super_ready:
with mock.patch.object(self.config, '_register_admin_classes') as register:
self.config.ready()
super_ready.assert_called_once_with()
register.assert_called_once_with()
|
Add a test for AdminRegisteringAppConfig.
|
Add a test for AdminRegisteringAppConfig.
|
Python
|
bsd-2-clause
|
incuna/incuna-groups,incuna/incuna-groups
|
Add a test for AdminRegisteringAppConfig.
|
from unittest import mock
from django.test import TestCase
import groups # Needed for instantiating AppConfig classes.
from groups import _apps_base
from groups.admin import GroupAdmin
from groups.models import Group
class TestAdminRegisteringAppConfig(TestCase):
def setUp(self):
"""
Create an AdminRegisteringAppConfig pointed at whichever models we have handy.
"""
self.config = _apps_base.AdminRegisteringAppConfig('groups', groups)
def test_register_admin_classes(self):
"""
Assert that admin.site.register() is called based on the value of admin_classes.
"""
self.config.admin_classes = {
'Group': 'groups.admin.GroupAdmin',
}
# Mock out get_model() because otherwise the AppConfig will try to check if it's
# prepared the models already, which it hasn't since we're shortcutting the
# process to get a better unit test. Assert that it's called with the correct
# input later on to make sure we're not cheating.
with mock.patch.object(self.config, 'get_model', return_value=Group) as get_model:
with mock.patch('django.contrib.admin.site.register') as site_register:
self.config._register_admin_classes()
get_model.assert_called_once_with('Group')
site_register.assert_called_once_with(Group, GroupAdmin)
def test_ready(self):
"""
Assert that ready() calls _register_admin_classes() and the superclass's ready().
"""
with mock.patch('django.apps.config.AppConfig.ready') as super_ready:
with mock.patch.object(self.config, '_register_admin_classes') as register:
self.config.ready()
super_ready.assert_called_once_with()
register.assert_called_once_with()
|
<commit_before><commit_msg>Add a test for AdminRegisteringAppConfig.<commit_after>
|
from unittest import mock
from django.test import TestCase
import groups # Needed for instantiating AppConfig classes.
from groups import _apps_base
from groups.admin import GroupAdmin
from groups.models import Group
class TestAdminRegisteringAppConfig(TestCase):
def setUp(self):
"""
Create an AdminRegisteringAppConfig pointed at whichever models we have handy.
"""
self.config = _apps_base.AdminRegisteringAppConfig('groups', groups)
def test_register_admin_classes(self):
"""
Assert that admin.site.register() is called based on the value of admin_classes.
"""
self.config.admin_classes = {
'Group': 'groups.admin.GroupAdmin',
}
# Mock out get_model() because otherwise the AppConfig will try to check if it's
# prepared the models already, which it hasn't since we're shortcutting the
# process to get a better unit test. Assert that it's called with the correct
# input later on to make sure we're not cheating.
with mock.patch.object(self.config, 'get_model', return_value=Group) as get_model:
with mock.patch('django.contrib.admin.site.register') as site_register:
self.config._register_admin_classes()
get_model.assert_called_once_with('Group')
site_register.assert_called_once_with(Group, GroupAdmin)
def test_ready(self):
"""
Assert that ready() calls _register_admin_classes() and the superclass's ready().
"""
with mock.patch('django.apps.config.AppConfig.ready') as super_ready:
with mock.patch.object(self.config, '_register_admin_classes') as register:
self.config.ready()
super_ready.assert_called_once_with()
register.assert_called_once_with()
|
Add a test for AdminRegisteringAppConfig.from unittest import mock
from django.test import TestCase
import groups # Needed for instantiating AppConfig classes.
from groups import _apps_base
from groups.admin import GroupAdmin
from groups.models import Group
class TestAdminRegisteringAppConfig(TestCase):
def setUp(self):
"""
Create an AdminRegisteringAppConfig pointed at whichever models we have handy.
"""
self.config = _apps_base.AdminRegisteringAppConfig('groups', groups)
def test_register_admin_classes(self):
"""
Assert that admin.site.register() is called based on the value of admin_classes.
"""
self.config.admin_classes = {
'Group': 'groups.admin.GroupAdmin',
}
# Mock out get_model() because otherwise the AppConfig will try to check if it's
# prepared the models already, which it hasn't since we're shortcutting the
# process to get a better unit test. Assert that it's called with the correct
# input later on to make sure we're not cheating.
with mock.patch.object(self.config, 'get_model', return_value=Group) as get_model:
with mock.patch('django.contrib.admin.site.register') as site_register:
self.config._register_admin_classes()
get_model.assert_called_once_with('Group')
site_register.assert_called_once_with(Group, GroupAdmin)
def test_ready(self):
"""
Assert that ready() calls _register_admin_classes() and the superclass's ready().
"""
with mock.patch('django.apps.config.AppConfig.ready') as super_ready:
with mock.patch.object(self.config, '_register_admin_classes') as register:
self.config.ready()
super_ready.assert_called_once_with()
register.assert_called_once_with()
|
<commit_before><commit_msg>Add a test for AdminRegisteringAppConfig.<commit_after>from unittest import mock
from django.test import TestCase
import groups # Needed for instantiating AppConfig classes.
from groups import _apps_base
from groups.admin import GroupAdmin
from groups.models import Group
class TestAdminRegisteringAppConfig(TestCase):
def setUp(self):
"""
Create an AdminRegisteringAppConfig pointed at whichever models we have handy.
"""
self.config = _apps_base.AdminRegisteringAppConfig('groups', groups)
def test_register_admin_classes(self):
"""
Assert that admin.site.register() is called based on the value of admin_classes.
"""
self.config.admin_classes = {
'Group': 'groups.admin.GroupAdmin',
}
# Mock out get_model() because otherwise the AppConfig will try to check if it's
# prepared the models already, which it hasn't since we're shortcutting the
# process to get a better unit test. Assert that it's called with the correct
# input later on to make sure we're not cheating.
with mock.patch.object(self.config, 'get_model', return_value=Group) as get_model:
with mock.patch('django.contrib.admin.site.register') as site_register:
self.config._register_admin_classes()
get_model.assert_called_once_with('Group')
site_register.assert_called_once_with(Group, GroupAdmin)
def test_ready(self):
"""
Assert that ready() calls _register_admin_classes() and the superclass's ready().
"""
with mock.patch('django.apps.config.AppConfig.ready') as super_ready:
with mock.patch.object(self.config, '_register_admin_classes') as register:
self.config.ready()
super_ready.assert_called_once_with()
register.assert_called_once_with()
|
|
7d5eb61423c8538698a65efca8522aa0fbda17c6
|
curiosity/plugins/fuyu.py
|
curiosity/plugins/fuyu.py
|
import curio
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses import Member
def has_admin(ctx: Context):
return ctx.channel.permissions(ctx.author).administrator
class Fuyu(Plugin):
"""
Commands for my server.
"""
async def plugin_check(self, ctx: Context):
return ctx.guild.id == 198101180180594688
@command(aliases=["shouting"], invokation_checks=[has_admin])
async def screaming(self, ctx: Context):
"""
Makes the server screaming.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.upper())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send("AAAAAAAAAAAAAAAA (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["librarian"], invokation_checks=[has_admin])
async def whispering(self, ctx: Context):
"""
Makes the server quiet.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.lower())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send(":zzz: (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["mute"], invokation_checks=[has_admin])
async def parental_control(self, ctx: Context, victim: Member, timeout: int):
"""
Mutes a member. Mention somebody and give a timeout in seconds.
"""
role = ctx.guild.get_role(248525039400517632)
if not role:
await ctx.channel.send("<@133139430495092737>")
return
await ctx.guild.add_roles(victim, role)
await ctx.channel.send("{} needs to sit out".format(victim.user.mention))
await curio.sleep(timeout)
await ctx.channel.send("{} is back in the arena".format(victim.user.mention))
await ctx.guild.remove_roles(victim, role)
|
Add plugin for my server.
|
Add plugin for my server.
|
Python
|
mit
|
SunDwarf/curiosity
|
Add plugin for my server.
|
import curio
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses import Member
def has_admin(ctx: Context):
return ctx.channel.permissions(ctx.author).administrator
class Fuyu(Plugin):
"""
Commands for my server.
"""
async def plugin_check(self, ctx: Context):
return ctx.guild.id == 198101180180594688
@command(aliases=["shouting"], invokation_checks=[has_admin])
async def screaming(self, ctx: Context):
"""
Makes the server screaming.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.upper())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send("AAAAAAAAAAAAAAAA (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["librarian"], invokation_checks=[has_admin])
async def whispering(self, ctx: Context):
"""
Makes the server quiet.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.lower())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send(":zzz: (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["mute"], invokation_checks=[has_admin])
async def parental_control(self, ctx: Context, victim: Member, timeout: int):
"""
Mutes a member. Mention somebody and give a timeout in seconds.
"""
role = ctx.guild.get_role(248525039400517632)
if not role:
await ctx.channel.send("<@133139430495092737>")
return
await ctx.guild.add_roles(victim, role)
await ctx.channel.send("{} needs to sit out".format(victim.user.mention))
await curio.sleep(timeout)
await ctx.channel.send("{} is back in the arena".format(victim.user.mention))
await ctx.guild.remove_roles(victim, role)
|
<commit_before><commit_msg>Add plugin for my server.<commit_after>
|
import curio
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses import Member
def has_admin(ctx: Context):
return ctx.channel.permissions(ctx.author).administrator
class Fuyu(Plugin):
"""
Commands for my server.
"""
async def plugin_check(self, ctx: Context):
return ctx.guild.id == 198101180180594688
@command(aliases=["shouting"], invokation_checks=[has_admin])
async def screaming(self, ctx: Context):
"""
Makes the server screaming.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.upper())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send("AAAAAAAAAAAAAAAA (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["librarian"], invokation_checks=[has_admin])
async def whispering(self, ctx: Context):
"""
Makes the server quiet.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.lower())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send(":zzz: (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["mute"], invokation_checks=[has_admin])
async def parental_control(self, ctx: Context, victim: Member, timeout: int):
"""
Mutes a member. Mention somebody and give a timeout in seconds.
"""
role = ctx.guild.get_role(248525039400517632)
if not role:
await ctx.channel.send("<@133139430495092737>")
return
await ctx.guild.add_roles(victim, role)
await ctx.channel.send("{} needs to sit out".format(victim.user.mention))
await curio.sleep(timeout)
await ctx.channel.send("{} is back in the arena".format(victim.user.mention))
await ctx.guild.remove_roles(victim, role)
|
Add plugin for my server.import curio
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses import Member
def has_admin(ctx: Context):
return ctx.channel.permissions(ctx.author).administrator
class Fuyu(Plugin):
"""
Commands for my server.
"""
async def plugin_check(self, ctx: Context):
return ctx.guild.id == 198101180180594688
@command(aliases=["shouting"], invokation_checks=[has_admin])
async def screaming(self, ctx: Context):
"""
Makes the server screaming.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.upper())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send("AAAAAAAAAAAAAAAA (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["librarian"], invokation_checks=[has_admin])
async def whispering(self, ctx: Context):
"""
Makes the server quiet.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.lower())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send(":zzz: (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["mute"], invokation_checks=[has_admin])
async def parental_control(self, ctx: Context, victim: Member, timeout: int):
"""
Mutes a member. Mention somebody and give a timeout in seconds.
"""
role = ctx.guild.get_role(248525039400517632)
if not role:
await ctx.channel.send("<@133139430495092737>")
return
await ctx.guild.add_roles(victim, role)
await ctx.channel.send("{} needs to sit out".format(victim.user.mention))
await curio.sleep(timeout)
await ctx.channel.send("{} is back in the arena".format(victim.user.mention))
await ctx.guild.remove_roles(victim, role)
|
<commit_before><commit_msg>Add plugin for my server.<commit_after>import curio
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses import Member
def has_admin(ctx: Context):
return ctx.channel.permissions(ctx.author).administrator
class Fuyu(Plugin):
"""
Commands for my server.
"""
async def plugin_check(self, ctx: Context):
return ctx.guild.id == 198101180180594688
@command(aliases=["shouting"], invokation_checks=[has_admin])
async def screaming(self, ctx: Context):
"""
Makes the server screaming.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.upper())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send("AAAAAAAAAAAAAAAA (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["librarian"], invokation_checks=[has_admin])
async def whispering(self, ctx: Context):
"""
Makes the server quiet.
"""
coros = []
for member in ctx.guild.members:
coros.append(await curio.spawn(member.change_nickname(member.name.lower())))
async with ctx.channel.typing:
results = await curio.gather(coros, return_exceptions=True)
exc = sum(1 for x in results if isinstance(x, Exception))
await ctx.channel.send(":zzz: (`{}` changed, `{}` failed)".format(len(results) - exc, exc))
@command(aliases=["mute"], invokation_checks=[has_admin])
async def parental_control(self, ctx: Context, victim: Member, timeout: int):
"""
Mutes a member. Mention somebody and give a timeout in seconds.
"""
role = ctx.guild.get_role(248525039400517632)
if not role:
await ctx.channel.send("<@133139430495092737>")
return
await ctx.guild.add_roles(victim, role)
await ctx.channel.send("{} needs to sit out".format(victim.user.mention))
await curio.sleep(timeout)
await ctx.channel.send("{} is back in the arena".format(victim.user.mention))
await ctx.guild.remove_roles(victim, role)
|
|
77a6da686e4def9548ac59de9eace929a4332fca
|
Python/prims_mst.py
|
Python/prims_mst.py
|
# Prim's algorithm is a greedy algorithm that
# finds a minimum spanning tree
# for a weighted undirected graph.
#
# Time complexity: O(m * n)
# Input Format:
# First line has two integers, denoting the number of nodes in the graph and
# denoting the number of edges in the graph.
# The next lines each consist of three space separated integers,
# where and denote the two nodes between which the undirected edge
# exists, denotes the length of edge between the corresponding nodes.
# Output Format:
# Single integer denoting the weight of MST
import heapq
from collections import defaultdict
g = defaultdict(list) # graph
n, m = map(int, raw_input().split(' ')) # number of vertexes and edges
weight = 0 # weight of MST
connected = set([]) # set containing connected vertexes
pq = [] # heap
for _ in range(m):
u, v, c = map(int, raw_input().split(' '))
g[u].append((c, v))
g[v].append((c, u))
start = int(raw_input())
connected.add(start)
for tup in g[start]:
heapq.heappush(pq, tup)
while pq:
w, b = heapq.heappop(pq)
if b not in connected:
weight += w
connected.add(b)
for tup in g[b]:
heapq.heappush(pq, tup)
print weight
|
Add Prim's algorithm for finding MST
|
Add Prim's algorithm for finding MST
|
Python
|
mit
|
saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA
|
Add Prim's algorithm for finding MST
|
# Prim's algorithm is a greedy algorithm that
# finds a minimum spanning tree
# for a weighted undirected graph.
#
# Time complexity: O(m * n)
# Input Format:
# First line has two integers, denoting the number of nodes in the graph and
# denoting the number of edges in the graph.
# The next lines each consist of three space separated integers,
# where and denote the two nodes between which the undirected edge
# exists, denotes the length of edge between the corresponding nodes.
# Output Format:
# Single integer denoting the weight of MST
import heapq
from collections import defaultdict
g = defaultdict(list) # graph
n, m = map(int, raw_input().split(' ')) # number of vertexes and edges
weight = 0 # weight of MST
connected = set([]) # set containing connected vertexes
pq = [] # heap
for _ in range(m):
u, v, c = map(int, raw_input().split(' '))
g[u].append((c, v))
g[v].append((c, u))
start = int(raw_input())
connected.add(start)
for tup in g[start]:
heapq.heappush(pq, tup)
while pq:
w, b = heapq.heappop(pq)
if b not in connected:
weight += w
connected.add(b)
for tup in g[b]:
heapq.heappush(pq, tup)
print weight
|
<commit_before><commit_msg>Add Prim's algorithm for finding MST<commit_after>
|
# Prim's algorithm is a greedy algorithm that
# finds a minimum spanning tree
# for a weighted undirected graph.
#
# Time complexity: O(m * n)
# Input Format:
# First line has two integers, denoting the number of nodes in the graph and
# denoting the number of edges in the graph.
# The next lines each consist of three space separated integers,
# where and denote the two nodes between which the undirected edge
# exists, denotes the length of edge between the corresponding nodes.
# Output Format:
# Single integer denoting the weight of MST
import heapq
from collections import defaultdict
g = defaultdict(list) # graph
n, m = map(int, raw_input().split(' ')) # number of vertexes and edges
weight = 0 # weight of MST
connected = set([]) # set containing connected vertexes
pq = [] # heap
for _ in range(m):
u, v, c = map(int, raw_input().split(' '))
g[u].append((c, v))
g[v].append((c, u))
start = int(raw_input())
connected.add(start)
for tup in g[start]:
heapq.heappush(pq, tup)
while pq:
w, b = heapq.heappop(pq)
if b not in connected:
weight += w
connected.add(b)
for tup in g[b]:
heapq.heappush(pq, tup)
print weight
|
Add Prim's algorithm for finding MST# Prim's algorithm is a greedy algorithm that
# finds a minimum spanning tree
# for a weighted undirected graph.
#
# Time complexity: O(m * n)
# Input Format:
# First line has two integers, denoting the number of nodes in the graph and
# denoting the number of edges in the graph.
# The next lines each consist of three space separated integers,
# where and denote the two nodes between which the undirected edge
# exists, denotes the length of edge between the corresponding nodes.
# Output Format:
# Single integer denoting the weight of MST
import heapq
from collections import defaultdict
g = defaultdict(list) # graph
n, m = map(int, raw_input().split(' ')) # number of vertexes and edges
weight = 0 # weight of MST
connected = set([]) # set containing connected vertexes
pq = [] # heap
for _ in range(m):
u, v, c = map(int, raw_input().split(' '))
g[u].append((c, v))
g[v].append((c, u))
start = int(raw_input())
connected.add(start)
for tup in g[start]:
heapq.heappush(pq, tup)
while pq:
w, b = heapq.heappop(pq)
if b not in connected:
weight += w
connected.add(b)
for tup in g[b]:
heapq.heappush(pq, tup)
print weight
|
<commit_before><commit_msg>Add Prim's algorithm for finding MST<commit_after># Prim's algorithm is a greedy algorithm that
# finds a minimum spanning tree
# for a weighted undirected graph.
#
# Time complexity: O(m * n)
# Input Format:
# First line has two integers, denoting the number of nodes in the graph and
# denoting the number of edges in the graph.
# The next lines each consist of three space separated integers,
# where and denote the two nodes between which the undirected edge
# exists, denotes the length of edge between the corresponding nodes.
# Output Format:
# Single integer denoting the weight of MST
import heapq
from collections import defaultdict
g = defaultdict(list) # graph
n, m = map(int, raw_input().split(' ')) # number of vertexes and edges
weight = 0 # weight of MST
connected = set([]) # set containing connected vertexes
pq = [] # heap
for _ in range(m):
u, v, c = map(int, raw_input().split(' '))
g[u].append((c, v))
g[v].append((c, u))
start = int(raw_input())
connected.add(start)
for tup in g[start]:
heapq.heappush(pq, tup)
while pq:
w, b = heapq.heappop(pq)
if b not in connected:
weight += w
connected.add(b)
for tup in g[b]:
heapq.heappush(pq, tup)
print weight
|
|
69b546df0fe93377e420e62b29e0666b558cde19
|
demo/producer_consumer.py
|
demo/producer_consumer.py
|
from sparts.tasks.periodic import PeriodicTask
from sparts.tasks.queue import QueueTask
from sparts.vservice import VService
import random
import threading
class Producer(PeriodicTask):
INTERVAL = 1.0
def initTask(self):
super(Producer, self).initTask()
self.consumer = self.service.requireTask('Consumer')
def execute(self, *args, **kwargs):
for i in xrange(5):
item = random.random()
self.consumer.queue.put(item)
self.logger.info("Producer put %s into queue", item)
class Consumer(QueueTask):
WORKERS = 10
def execute(self, item, context):
self.logger.info("[%s] Got %s", threading.current_thread().name, item)
class ProducerConsumer(VService):
TASKS=[Producer, Consumer]
if __name__ == '__main__':
ProducerConsumer.initFromCLI()
|
Add a "producer consumer" demo using QueueTask and PeriodicTask
|
Add a "producer consumer" demo using QueueTask and PeriodicTask
|
Python
|
bsd-3-clause
|
djipko/sparts,fmoo/sparts,pshuff/sparts,fmoo/sparts,bboozzoo/sparts,facebook/sparts,djipko/sparts,facebook/sparts,bboozzoo/sparts,pshuff/sparts
|
Add a "producer consumer" demo using QueueTask and PeriodicTask
|
from sparts.tasks.periodic import PeriodicTask
from sparts.tasks.queue import QueueTask
from sparts.vservice import VService
import random
import threading
class Producer(PeriodicTask):
INTERVAL = 1.0
def initTask(self):
super(Producer, self).initTask()
self.consumer = self.service.requireTask('Consumer')
def execute(self, *args, **kwargs):
for i in xrange(5):
item = random.random()
self.consumer.queue.put(item)
self.logger.info("Producer put %s into queue", item)
class Consumer(QueueTask):
WORKERS = 10
def execute(self, item, context):
self.logger.info("[%s] Got %s", threading.current_thread().name, item)
class ProducerConsumer(VService):
TASKS=[Producer, Consumer]
if __name__ == '__main__':
ProducerConsumer.initFromCLI()
|
<commit_before><commit_msg>Add a "producer consumer" demo using QueueTask and PeriodicTask<commit_after>
|
from sparts.tasks.periodic import PeriodicTask
from sparts.tasks.queue import QueueTask
from sparts.vservice import VService
import random
import threading
class Producer(PeriodicTask):
INTERVAL = 1.0
def initTask(self):
super(Producer, self).initTask()
self.consumer = self.service.requireTask('Consumer')
def execute(self, *args, **kwargs):
for i in xrange(5):
item = random.random()
self.consumer.queue.put(item)
self.logger.info("Producer put %s into queue", item)
class Consumer(QueueTask):
WORKERS = 10
def execute(self, item, context):
self.logger.info("[%s] Got %s", threading.current_thread().name, item)
class ProducerConsumer(VService):
TASKS=[Producer, Consumer]
if __name__ == '__main__':
ProducerConsumer.initFromCLI()
|
Add a "producer consumer" demo using QueueTask and PeriodicTaskfrom sparts.tasks.periodic import PeriodicTask
from sparts.tasks.queue import QueueTask
from sparts.vservice import VService
import random
import threading
class Producer(PeriodicTask):
INTERVAL = 1.0
def initTask(self):
super(Producer, self).initTask()
self.consumer = self.service.requireTask('Consumer')
def execute(self, *args, **kwargs):
for i in xrange(5):
item = random.random()
self.consumer.queue.put(item)
self.logger.info("Producer put %s into queue", item)
class Consumer(QueueTask):
WORKERS = 10
def execute(self, item, context):
self.logger.info("[%s] Got %s", threading.current_thread().name, item)
class ProducerConsumer(VService):
TASKS=[Producer, Consumer]
if __name__ == '__main__':
ProducerConsumer.initFromCLI()
|
<commit_before><commit_msg>Add a "producer consumer" demo using QueueTask and PeriodicTask<commit_after>from sparts.tasks.periodic import PeriodicTask
from sparts.tasks.queue import QueueTask
from sparts.vservice import VService
import random
import threading
class Producer(PeriodicTask):
INTERVAL = 1.0
def initTask(self):
super(Producer, self).initTask()
self.consumer = self.service.requireTask('Consumer')
def execute(self, *args, **kwargs):
for i in xrange(5):
item = random.random()
self.consumer.queue.put(item)
self.logger.info("Producer put %s into queue", item)
class Consumer(QueueTask):
WORKERS = 10
def execute(self, item, context):
self.logger.info("[%s] Got %s", threading.current_thread().name, item)
class ProducerConsumer(VService):
TASKS=[Producer, Consumer]
if __name__ == '__main__':
ProducerConsumer.initFromCLI()
|
|
fbb93749870b1ba9a228afa7c1bc1791ca449dca
|
gittaggers.py
|
gittaggers.py
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '..']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
Fix Python packaging to use correct git log for package time/version stamps (2nd try)
|
Fix Python packaging to use correct git log for package time/version stamps (2nd try)
|
Python
|
apache-2.0
|
chapmanb/cwltool,dleehr/cwltool,jeremiahsavage/cwltool,SciDAP/cwltool,chapmanb/cwltool,SciDAP/cwltool,dleehr/cwltool,dleehr/cwltool,dleehr/cwltool,common-workflow-language/cwltool,jeremiahsavage/cwltool,chapmanb/cwltool,jeremiahsavage/cwltool,SciDAP/cwltool,common-workflow-language/cwltool,jeremiahsavage/cwltool,chapmanb/cwltool,common-workflow-language/cwltool,SciDAP/cwltool
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '..']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
Fix Python packaging to use correct git log for package time/version stamps (2nd try)
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
<commit_before>from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '..']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
<commit_msg>Fix Python packaging to use correct git log for package time/version stamps (2nd try)<commit_after>
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '..']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
Fix Python packaging to use correct git log for package time/version stamps (2nd try)from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
<commit_before>from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', '..']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
<commit_msg>Fix Python packaging to use correct git log for package time/version stamps (2nd try)<commit_after>from setuptools.command.egg_info import egg_info
import subprocess
import time
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct']).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_timestamp_tag()
return egg_info.tags(self)
|
4fd470ad2aa5e6c075e2d34667908401f4284f70
|
openfda3/server_opnfda.py
|
openfda3/server_opnfda.py
|
import http.server
import json
import socketserver
PORT = 8000
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# GET
def do_GET(self):
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection("api.fda.gov")
# Get a https://api.fda.gov/drug/label.json drug label from this URL and
# extract what is the id,
# the purpose of the drug and the manufacturer_name
conn.request("GET", "/drug/label.json", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)
drug = drugs['results'][0]
drug_id = drug['id']
drug_purpose = drug['purpose'][0]
drug_manufacturer_name = drug['openfda']['manufacturer_name'][0]
print(drug_id, drug_purpose, drug_manufacturer_name)
# Get 10 drugs and extract from all of them the id (tip: use the limit param for it)
conn.request("GET", "/drug/label.json?limit=10", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)['results']
for drug in drugs:
print(drug['id'])
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = drugs[0]['id']
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
Handler = http.server.SimpleHTTPRequestHandler
Handler = testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
# https://github.com/joshmaker/simple-python-webserver/blob/master/server.py
|
Add first version of openfda3
|
Add first version of openfda3
|
Python
|
apache-2.0
|
acs-test/openfda,acs-test/openfda
|
Add first version of openfda3
|
import http.server
import json
import socketserver
PORT = 8000
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# GET
def do_GET(self):
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection("api.fda.gov")
# Get a https://api.fda.gov/drug/label.json drug label from this URL and
# extract what is the id,
# the purpose of the drug and the manufacturer_name
conn.request("GET", "/drug/label.json", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)
drug = drugs['results'][0]
drug_id = drug['id']
drug_purpose = drug['purpose'][0]
drug_manufacturer_name = drug['openfda']['manufacturer_name'][0]
print(drug_id, drug_purpose, drug_manufacturer_name)
# Get 10 drugs and extract from all of them the id (tip: use the limit param for it)
conn.request("GET", "/drug/label.json?limit=10", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)['results']
for drug in drugs:
print(drug['id'])
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = drugs[0]['id']
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
Handler = http.server.SimpleHTTPRequestHandler
Handler = testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
# https://github.com/joshmaker/simple-python-webserver/blob/master/server.py
|
<commit_before><commit_msg>Add first version of openfda3<commit_after>
|
import http.server
import json
import socketserver
PORT = 8000
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# GET
def do_GET(self):
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection("api.fda.gov")
# Get a https://api.fda.gov/drug/label.json drug label from this URL and
# extract what is the id,
# the purpose of the drug and the manufacturer_name
conn.request("GET", "/drug/label.json", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)
drug = drugs['results'][0]
drug_id = drug['id']
drug_purpose = drug['purpose'][0]
drug_manufacturer_name = drug['openfda']['manufacturer_name'][0]
print(drug_id, drug_purpose, drug_manufacturer_name)
# Get 10 drugs and extract from all of them the id (tip: use the limit param for it)
conn.request("GET", "/drug/label.json?limit=10", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)['results']
for drug in drugs:
print(drug['id'])
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = drugs[0]['id']
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
Handler = http.server.SimpleHTTPRequestHandler
Handler = testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
# https://github.com/joshmaker/simple-python-webserver/blob/master/server.py
|
Add first version of openfda3import http.server
import json
import socketserver
PORT = 8000
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# GET
def do_GET(self):
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection("api.fda.gov")
# Get a https://api.fda.gov/drug/label.json drug label from this URL and
# extract what is the id,
# the purpose of the drug and the manufacturer_name
conn.request("GET", "/drug/label.json", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)
drug = drugs['results'][0]
drug_id = drug['id']
drug_purpose = drug['purpose'][0]
drug_manufacturer_name = drug['openfda']['manufacturer_name'][0]
print(drug_id, drug_purpose, drug_manufacturer_name)
# Get 10 drugs and extract from all of them the id (tip: use the limit param for it)
conn.request("GET", "/drug/label.json?limit=10", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)['results']
for drug in drugs:
print(drug['id'])
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = drugs[0]['id']
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
Handler = http.server.SimpleHTTPRequestHandler
Handler = testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
# https://github.com/joshmaker/simple-python-webserver/blob/master/server.py
|
<commit_before><commit_msg>Add first version of openfda3<commit_after>import http.server
import json
import socketserver
PORT = 8000
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
# GET
def do_GET(self):
headers = {'User-Agent': 'http-client'}
conn = http.client.HTTPSConnection("api.fda.gov")
# Get a https://api.fda.gov/drug/label.json drug label from this URL and
# extract what is the id,
# the purpose of the drug and the manufacturer_name
conn.request("GET", "/drug/label.json", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)
drug = drugs['results'][0]
drug_id = drug['id']
drug_purpose = drug['purpose'][0]
drug_manufacturer_name = drug['openfda']['manufacturer_name'][0]
print(drug_id, drug_purpose, drug_manufacturer_name)
# Get 10 drugs and extract from all of them the id (tip: use the limit param for it)
conn.request("GET", "/drug/label.json?limit=10", None, headers)
r1 = conn.getresponse()
print(r1.status, r1.reason)
drugs_raw = r1.read().decode("utf-8")
conn.close()
drugs = json.loads(drugs_raw)['results']
for drug in drugs:
print(drug['id'])
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = drugs[0]['id']
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
Handler = http.server.SimpleHTTPRequestHandler
Handler = testHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
# https://github.com/joshmaker/simple-python-webserver/blob/master/server.py
|
|
bc8a8686d601bb0d1890e3eacbf7430bbdb50b3c
|
CodeFights/digitDegree.py
|
CodeFights/digitDegree.py
|
#!/usr/local/bin/python
# Code Fights Digits Degree Problem
def digitsDegree(n):
degree = 0
if n < 10:
return 0
while n > 0:
dig = n % 10
n = n // 10
if dig > 0:
degree += 1
return degree
def main():
tests = [
[5, 0],
[100, 1],
[91, 2],
[99, 2]
]
for t in tests:
res = digitsDegree(t[0])
ans = t[1]
if ans == res:
print("PASSED: digitsDegree({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: digitsDegree({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights digits degree problem
|
Solve Code Fights digits degree problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights digits degree problem
|
#!/usr/local/bin/python
# Code Fights Digits Degree Problem
def digitsDegree(n):
degree = 0
if n < 10:
return 0
while n > 0:
dig = n % 10
n = n // 10
if dig > 0:
degree += 1
return degree
def main():
tests = [
[5, 0],
[100, 1],
[91, 2],
[99, 2]
]
for t in tests:
res = digitsDegree(t[0])
ans = t[1]
if ans == res:
print("PASSED: digitsDegree({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: digitsDegree({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights digits degree problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Digits Degree Problem
def digitsDegree(n):
degree = 0
if n < 10:
return 0
while n > 0:
dig = n % 10
n = n // 10
if dig > 0:
degree += 1
return degree
def main():
tests = [
[5, 0],
[100, 1],
[91, 2],
[99, 2]
]
for t in tests:
res = digitsDegree(t[0])
ans = t[1]
if ans == res:
print("PASSED: digitsDegree({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: digitsDegree({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights digits degree problem#!/usr/local/bin/python
# Code Fights Digits Degree Problem
def digitsDegree(n):
degree = 0
if n < 10:
return 0
while n > 0:
dig = n % 10
n = n // 10
if dig > 0:
degree += 1
return degree
def main():
tests = [
[5, 0],
[100, 1],
[91, 2],
[99, 2]
]
for t in tests:
res = digitsDegree(t[0])
ans = t[1]
if ans == res:
print("PASSED: digitsDegree({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: digitsDegree({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights digits degree problem<commit_after>#!/usr/local/bin/python
# Code Fights Digits Degree Problem
def digitsDegree(n):
degree = 0
if n < 10:
return 0
while n > 0:
dig = n % 10
n = n // 10
if dig > 0:
degree += 1
return degree
def main():
tests = [
[5, 0],
[100, 1],
[91, 2],
[99, 2]
]
for t in tests:
res = digitsDegree(t[0])
ans = t[1]
if ans == res:
print("PASSED: digitsDegree({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: digitsDegree({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
c1412a5aac7c0917917e058ff5e7dab11ab48e8e
|
pyeda/gvmagic.py
|
pyeda/gvmagic.py
|
"""
Graphviz IPython magic extensions
Magic methods:
%dot <dot graph>
%%dot <dot ...
... graph>
%dotstr "<dot graph>"
%dotobj obj.to_dot()
%dotobjs obj[0].to_dot(), obj[1].to_dot(), ...
Usage:
%load_ext gvmagic
"""
from subprocess import Popen, PIPE
from IPython.core.display import display_svg
from IPython.core.magic import (
Magics, magics_class,
line_magic, line_cell_magic
)
from IPython.utils.warn import info, error
def rundot(s):
"""Execute dot and return a raw SVG image, or None."""
dot = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = dot.communicate(s.encode('utf-8'))
status = dot.wait()
if status == 0:
return stdoutdata
else:
fstr = "dot returned {}\n[==== stderr ====]\n{}"
error(fstr.format(status, stderrdata.decode('utf-8')))
return None
@magics_class
class GraphvizMagics(Magics):
@line_cell_magic
def dot(self, line, cell=None):
"""dot line magic"""
if cell is None:
s = line
else:
s = line + '\n' + cell
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotstr(self, line):
"""dot string magic"""
s = self.shell.ev(line)
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotobj(self, line):
"""dot object magic"""
obj = self.shell.ev(line)
data = rundot(obj.to_dot())
if data:
display_svg(data, raw=True)
@line_magic
def dotobjs(self, line):
"""dot objects magic"""
objs = self.shell.ev(line)
for i, obj in enumerate(objs):
data = rundot(obj.to_dot())
if data:
info("object {}:".format(i))
display_svg(data, raw=True)
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
ipython.register_magics(GraphvizMagics)
|
Add Graphviz magic methods module
|
Add Graphviz magic methods module
|
Python
|
bsd-2-clause
|
pombredanne/pyeda,karissa/pyeda,cjdrake/pyeda,sschnug/pyeda,sschnug/pyeda,karissa/pyeda,cjdrake/pyeda,sschnug/pyeda,karissa/pyeda,pombredanne/pyeda,GtTmy/pyeda,cjdrake/pyeda,pombredanne/pyeda,GtTmy/pyeda,GtTmy/pyeda
|
Add Graphviz magic methods module
|
"""
Graphviz IPython magic extensions
Magic methods:
%dot <dot graph>
%%dot <dot ...
... graph>
%dotstr "<dot graph>"
%dotobj obj.to_dot()
%dotobjs obj[0].to_dot(), obj[1].to_dot(), ...
Usage:
%load_ext gvmagic
"""
from subprocess import Popen, PIPE
from IPython.core.display import display_svg
from IPython.core.magic import (
Magics, magics_class,
line_magic, line_cell_magic
)
from IPython.utils.warn import info, error
def rundot(s):
"""Execute dot and return a raw SVG image, or None."""
dot = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = dot.communicate(s.encode('utf-8'))
status = dot.wait()
if status == 0:
return stdoutdata
else:
fstr = "dot returned {}\n[==== stderr ====]\n{}"
error(fstr.format(status, stderrdata.decode('utf-8')))
return None
@magics_class
class GraphvizMagics(Magics):
@line_cell_magic
def dot(self, line, cell=None):
"""dot line magic"""
if cell is None:
s = line
else:
s = line + '\n' + cell
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotstr(self, line):
"""dot string magic"""
s = self.shell.ev(line)
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotobj(self, line):
"""dot object magic"""
obj = self.shell.ev(line)
data = rundot(obj.to_dot())
if data:
display_svg(data, raw=True)
@line_magic
def dotobjs(self, line):
"""dot objects magic"""
objs = self.shell.ev(line)
for i, obj in enumerate(objs):
data = rundot(obj.to_dot())
if data:
info("object {}:".format(i))
display_svg(data, raw=True)
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
ipython.register_magics(GraphvizMagics)
|
<commit_before><commit_msg>Add Graphviz magic methods module<commit_after>
|
"""
Graphviz IPython magic extensions
Magic methods:
%dot <dot graph>
%%dot <dot ...
... graph>
%dotstr "<dot graph>"
%dotobj obj.to_dot()
%dotobjs obj[0].to_dot(), obj[1].to_dot(), ...
Usage:
%load_ext gvmagic
"""
from subprocess import Popen, PIPE
from IPython.core.display import display_svg
from IPython.core.magic import (
Magics, magics_class,
line_magic, line_cell_magic
)
from IPython.utils.warn import info, error
def rundot(s):
"""Execute dot and return a raw SVG image, or None."""
dot = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = dot.communicate(s.encode('utf-8'))
status = dot.wait()
if status == 0:
return stdoutdata
else:
fstr = "dot returned {}\n[==== stderr ====]\n{}"
error(fstr.format(status, stderrdata.decode('utf-8')))
return None
@magics_class
class GraphvizMagics(Magics):
@line_cell_magic
def dot(self, line, cell=None):
"""dot line magic"""
if cell is None:
s = line
else:
s = line + '\n' + cell
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotstr(self, line):
"""dot string magic"""
s = self.shell.ev(line)
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotobj(self, line):
"""dot object magic"""
obj = self.shell.ev(line)
data = rundot(obj.to_dot())
if data:
display_svg(data, raw=True)
@line_magic
def dotobjs(self, line):
"""dot objects magic"""
objs = self.shell.ev(line)
for i, obj in enumerate(objs):
data = rundot(obj.to_dot())
if data:
info("object {}:".format(i))
display_svg(data, raw=True)
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
ipython.register_magics(GraphvizMagics)
|
Add Graphviz magic methods module"""
Graphviz IPython magic extensions
Magic methods:
%dot <dot graph>
%%dot <dot ...
... graph>
%dotstr "<dot graph>"
%dotobj obj.to_dot()
%dotobjs obj[0].to_dot(), obj[1].to_dot(), ...
Usage:
%load_ext gvmagic
"""
from subprocess import Popen, PIPE
from IPython.core.display import display_svg
from IPython.core.magic import (
Magics, magics_class,
line_magic, line_cell_magic
)
from IPython.utils.warn import info, error
def rundot(s):
"""Execute dot and return a raw SVG image, or None."""
dot = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = dot.communicate(s.encode('utf-8'))
status = dot.wait()
if status == 0:
return stdoutdata
else:
fstr = "dot returned {}\n[==== stderr ====]\n{}"
error(fstr.format(status, stderrdata.decode('utf-8')))
return None
@magics_class
class GraphvizMagics(Magics):
@line_cell_magic
def dot(self, line, cell=None):
"""dot line magic"""
if cell is None:
s = line
else:
s = line + '\n' + cell
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotstr(self, line):
"""dot string magic"""
s = self.shell.ev(line)
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotobj(self, line):
"""dot object magic"""
obj = self.shell.ev(line)
data = rundot(obj.to_dot())
if data:
display_svg(data, raw=True)
@line_magic
def dotobjs(self, line):
"""dot objects magic"""
objs = self.shell.ev(line)
for i, obj in enumerate(objs):
data = rundot(obj.to_dot())
if data:
info("object {}:".format(i))
display_svg(data, raw=True)
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
ipython.register_magics(GraphvizMagics)
|
<commit_before><commit_msg>Add Graphviz magic methods module<commit_after>"""
Graphviz IPython magic extensions
Magic methods:
%dot <dot graph>
%%dot <dot ...
... graph>
%dotstr "<dot graph>"
%dotobj obj.to_dot()
%dotobjs obj[0].to_dot(), obj[1].to_dot(), ...
Usage:
%load_ext gvmagic
"""
from subprocess import Popen, PIPE
from IPython.core.display import display_svg
from IPython.core.magic import (
Magics, magics_class,
line_magic, line_cell_magic
)
from IPython.utils.warn import info, error
def rundot(s):
"""Execute dot and return a raw SVG image, or None."""
dot = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = dot.communicate(s.encode('utf-8'))
status = dot.wait()
if status == 0:
return stdoutdata
else:
fstr = "dot returned {}\n[==== stderr ====]\n{}"
error(fstr.format(status, stderrdata.decode('utf-8')))
return None
@magics_class
class GraphvizMagics(Magics):
@line_cell_magic
def dot(self, line, cell=None):
"""dot line magic"""
if cell is None:
s = line
else:
s = line + '\n' + cell
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotstr(self, line):
"""dot string magic"""
s = self.shell.ev(line)
data = rundot(s)
if data:
display_svg(data, raw=True)
@line_magic
def dotobj(self, line):
"""dot object magic"""
obj = self.shell.ev(line)
data = rundot(obj.to_dot())
if data:
display_svg(data, raw=True)
@line_magic
def dotobjs(self, line):
"""dot objects magic"""
objs = self.shell.ev(line)
for i, obj in enumerate(objs):
data = rundot(obj.to_dot())
if data:
info("object {}:".format(i))
display_svg(data, raw=True)
def load_ipython_extension(ipython):
"""Load the extension in IPython."""
ipython.register_magics(GraphvizMagics)
|
|
37a42a6e344c0ff030353c13c6bf6a1717eefbaa
|
test/integration_tests/python/test_gears.py
|
test/integration_tests/python/test_gears.py
|
import json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_gear_add(as_admin):
r = as_admin.post('/gears/test-case-gear', json={
"category" : "converter",
"gear" : {
"inputs" : {
"wat" : {
"base" : "file",
"type" : {
"enum" : [
"wat"
]
}
}
},
"maintainer" : "Example",
"description" : "Example",
"license" : "BSD-2-Clause",
"author" : "Example",
"url" : "https://example.example",
"label" : "wat",
"flywheel" : "0",
"source" : "https://example.example",
"version" : "0.0.1",
"config" : {},
"name" : "test-case-gear"
},
"exchange" : {
"git-commit" : "aex",
"rootfs-hash" : "sha384:oy",
"rootfs-url" : "https://example.example"
}
})
assert r.ok
|
Add example test case in python
|
Add example test case in python
|
Python
|
mit
|
scitran/api,scitran/core,scitran/core,scitran/core,scitran/core,scitran/api
|
Add example test case in python
|
import json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_gear_add(as_admin):
r = as_admin.post('/gears/test-case-gear', json={
"category" : "converter",
"gear" : {
"inputs" : {
"wat" : {
"base" : "file",
"type" : {
"enum" : [
"wat"
]
}
}
},
"maintainer" : "Example",
"description" : "Example",
"license" : "BSD-2-Clause",
"author" : "Example",
"url" : "https://example.example",
"label" : "wat",
"flywheel" : "0",
"source" : "https://example.example",
"version" : "0.0.1",
"config" : {},
"name" : "test-case-gear"
},
"exchange" : {
"git-commit" : "aex",
"rootfs-hash" : "sha384:oy",
"rootfs-url" : "https://example.example"
}
})
assert r.ok
|
<commit_before><commit_msg>Add example test case in python<commit_after>
|
import json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_gear_add(as_admin):
r = as_admin.post('/gears/test-case-gear', json={
"category" : "converter",
"gear" : {
"inputs" : {
"wat" : {
"base" : "file",
"type" : {
"enum" : [
"wat"
]
}
}
},
"maintainer" : "Example",
"description" : "Example",
"license" : "BSD-2-Clause",
"author" : "Example",
"url" : "https://example.example",
"label" : "wat",
"flywheel" : "0",
"source" : "https://example.example",
"version" : "0.0.1",
"config" : {},
"name" : "test-case-gear"
},
"exchange" : {
"git-commit" : "aex",
"rootfs-hash" : "sha384:oy",
"rootfs-url" : "https://example.example"
}
})
assert r.ok
|
Add example test case in pythonimport json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_gear_add(as_admin):
r = as_admin.post('/gears/test-case-gear', json={
"category" : "converter",
"gear" : {
"inputs" : {
"wat" : {
"base" : "file",
"type" : {
"enum" : [
"wat"
]
}
}
},
"maintainer" : "Example",
"description" : "Example",
"license" : "BSD-2-Clause",
"author" : "Example",
"url" : "https://example.example",
"label" : "wat",
"flywheel" : "0",
"source" : "https://example.example",
"version" : "0.0.1",
"config" : {},
"name" : "test-case-gear"
},
"exchange" : {
"git-commit" : "aex",
"rootfs-hash" : "sha384:oy",
"rootfs-url" : "https://example.example"
}
})
assert r.ok
|
<commit_before><commit_msg>Add example test case in python<commit_after>import json
import logging
log = logging.getLogger(__name__)
sh = logging.StreamHandler()
log.addHandler(sh)
def test_gear_add(as_admin):
r = as_admin.post('/gears/test-case-gear', json={
"category" : "converter",
"gear" : {
"inputs" : {
"wat" : {
"base" : "file",
"type" : {
"enum" : [
"wat"
]
}
}
},
"maintainer" : "Example",
"description" : "Example",
"license" : "BSD-2-Clause",
"author" : "Example",
"url" : "https://example.example",
"label" : "wat",
"flywheel" : "0",
"source" : "https://example.example",
"version" : "0.0.1",
"config" : {},
"name" : "test-case-gear"
},
"exchange" : {
"git-commit" : "aex",
"rootfs-hash" : "sha384:oy",
"rootfs-url" : "https://example.example"
}
})
assert r.ok
|
|
060ee041b6f7b0cf7748b11869665183f6656357
|
fluent_contents/plugins/markup/migrations/0002_fix_polymorphic_ctype.py
|
fluent_contents/plugins/markup/migrations/0002_fix_polymorphic_ctype.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
)
def _backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('markup', '0001_initial'),
]
operations = [
migrations.RunPython(_forwards, _backwards),
]
|
Make sure MarkupItem is stored under it's proxy class type ID
|
Make sure MarkupItem is stored under it's proxy class type ID
This is needed for proper formset saving/retrieval,
in combination with django-polymorphic 1.4-git
|
Python
|
apache-2.0
|
edoburu/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents
|
Make sure MarkupItem is stored under it's proxy class type ID
This is needed for proper formset saving/retrieval,
in combination with django-polymorphic 1.4-git
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
)
def _backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('markup', '0001_initial'),
]
operations = [
migrations.RunPython(_forwards, _backwards),
]
|
<commit_before><commit_msg>Make sure MarkupItem is stored under it's proxy class type ID
This is needed for proper formset saving/retrieval,
in combination with django-polymorphic 1.4-git<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
)
def _backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('markup', '0001_initial'),
]
operations = [
migrations.RunPython(_forwards, _backwards),
]
|
Make sure MarkupItem is stored under it's proxy class type ID
This is needed for proper formset saving/retrieval,
in combination with django-polymorphic 1.4-git# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
)
def _backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('markup', '0001_initial'),
]
operations = [
migrations.RunPython(_forwards, _backwards),
]
|
<commit_before><commit_msg>Make sure MarkupItem is stored under it's proxy class type ID
This is needed for proper formset saving/retrieval,
in combination with django-polymorphic 1.4-git<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.db import models, migrations
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
)
def _backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('markup', '0001_initial'),
]
operations = [
migrations.RunPython(_forwards, _backwards),
]
|
|
1f4b525aa93421b890dde2181f02b4445683fd08
|
toolbox/stack_to_h5.py
|
toolbox/stack_to_h5.py
|
import vigra
import argparse
def convert_to_volume(options):
data = vigra.impex.readVolume(options.input_file)
print("Saving h5 volume of shape {}".format(data.shape))
vigra.writeHDF5(data, options.output_file, options.output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute TRA loss of a new labeling compared to ground truth')
# file paths
parser.add_argument('--input-file', type=str, dest='input_file', required=True,
help='Filename of the first image of the tiff stack')
parser.add_argument('--output-file', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file.')
parser.add_argument('--output-path', type=str, dest='output_path', default='exported_data',
help='Path inside the HDF5 file to the data')
# parse command line
args = parser.parse_args()
convert_to_volume(args)
|
Add short script to create a raw hdf5 file from a tiff stack
|
Add short script to create a raw hdf5 file from a tiff stack
|
Python
|
mit
|
chaubold/hytra,chaubold/hytra,chaubold/hytra
|
Add short script to create a raw hdf5 file from a tiff stack
|
import vigra
import argparse
def convert_to_volume(options):
data = vigra.impex.readVolume(options.input_file)
print("Saving h5 volume of shape {}".format(data.shape))
vigra.writeHDF5(data, options.output_file, options.output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute TRA loss of a new labeling compared to ground truth')
# file paths
parser.add_argument('--input-file', type=str, dest='input_file', required=True,
help='Filename of the first image of the tiff stack')
parser.add_argument('--output-file', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file.')
parser.add_argument('--output-path', type=str, dest='output_path', default='exported_data',
help='Path inside the HDF5 file to the data')
# parse command line
args = parser.parse_args()
convert_to_volume(args)
|
<commit_before><commit_msg>Add short script to create a raw hdf5 file from a tiff stack<commit_after>
|
import vigra
import argparse
def convert_to_volume(options):
data = vigra.impex.readVolume(options.input_file)
print("Saving h5 volume of shape {}".format(data.shape))
vigra.writeHDF5(data, options.output_file, options.output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute TRA loss of a new labeling compared to ground truth')
# file paths
parser.add_argument('--input-file', type=str, dest='input_file', required=True,
help='Filename of the first image of the tiff stack')
parser.add_argument('--output-file', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file.')
parser.add_argument('--output-path', type=str, dest='output_path', default='exported_data',
help='Path inside the HDF5 file to the data')
# parse command line
args = parser.parse_args()
convert_to_volume(args)
|
Add short script to create a raw hdf5 file from a tiff stackimport vigra
import argparse
def convert_to_volume(options):
data = vigra.impex.readVolume(options.input_file)
print("Saving h5 volume of shape {}".format(data.shape))
vigra.writeHDF5(data, options.output_file, options.output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute TRA loss of a new labeling compared to ground truth')
# file paths
parser.add_argument('--input-file', type=str, dest='input_file', required=True,
help='Filename of the first image of the tiff stack')
parser.add_argument('--output-file', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file.')
parser.add_argument('--output-path', type=str, dest='output_path', default='exported_data',
help='Path inside the HDF5 file to the data')
# parse command line
args = parser.parse_args()
convert_to_volume(args)
|
<commit_before><commit_msg>Add short script to create a raw hdf5 file from a tiff stack<commit_after>import vigra
import argparse
def convert_to_volume(options):
data = vigra.impex.readVolume(options.input_file)
print("Saving h5 volume of shape {}".format(data.shape))
vigra.writeHDF5(data, options.output_file, options.output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Compute TRA loss of a new labeling compared to ground truth')
# file paths
parser.add_argument('--input-file', type=str, dest='input_file', required=True,
help='Filename of the first image of the tiff stack')
parser.add_argument('--output-file', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file.')
parser.add_argument('--output-path', type=str, dest='output_path', default='exported_data',
help='Path inside the HDF5 file to the data')
# parse command line
args = parser.parse_args()
convert_to_volume(args)
|
|
e30a19d1b72acec4587ddfe85096a6db43b8c7ff
|
tools/rebuild-index.py
|
tools/rebuild-index.py
|
# Rebuild asset indices
# Usage: python3 rebuild-index.py in the project root
import io
import json
import pathlib
index = {}
try:
stream = open("assets/index.json")
index = json.loads(stream.readall())
except Exception:
pass
# An index is a dictionary from filename to a structure:
# "filename" : { "timestamp": 9583, "version": 2 }, ...
for path in pathlib.Path("assets").glob("**/*"):
if str(path) == "assets/index.json" or not path.is_file(): continue
entry = { "timestamp": 0, "version": -1 }
try: entry = index[str(path)]
except: pass
newtime = path.stat().st_mtime
if newtime != entry["timestamp"]:
newentry = { "timestamp": newtime, "version": entry["version"] + 1 }
index[str(path)] = newentry
stream = open("assets/index.json", mode="w")
stream.write(json.dumps(index))
|
Add script to update index
|
Add script to update index
|
Python
|
agpl-3.0
|
nagakawa/x801,nagakawa/x801,nagakawa/x801,nagakawa/x801,nagakawa/x801
|
Add script to update index
|
# Rebuild asset indices
# Usage: python3 rebuild-index.py in the project root
import io
import json
import pathlib
index = {}
try:
stream = open("assets/index.json")
index = json.loads(stream.readall())
except Exception:
pass
# An index is a dictionary from filename to a structure:
# "filename" : { "timestamp": 9583, "version": 2 }, ...
for path in pathlib.Path("assets").glob("**/*"):
if str(path) == "assets/index.json" or not path.is_file(): continue
entry = { "timestamp": 0, "version": -1 }
try: entry = index[str(path)]
except: pass
newtime = path.stat().st_mtime
if newtime != entry["timestamp"]:
newentry = { "timestamp": newtime, "version": entry["version"] + 1 }
index[str(path)] = newentry
stream = open("assets/index.json", mode="w")
stream.write(json.dumps(index))
|
<commit_before><commit_msg>Add script to update index<commit_after>
|
# Rebuild asset indices
# Usage: python3 rebuild-index.py in the project root
import io
import json
import pathlib
index = {}
try:
stream = open("assets/index.json")
index = json.loads(stream.readall())
except Exception:
pass
# An index is a dictionary from filename to a structure:
# "filename" : { "timestamp": 9583, "version": 2 }, ...
for path in pathlib.Path("assets").glob("**/*"):
if str(path) == "assets/index.json" or not path.is_file(): continue
entry = { "timestamp": 0, "version": -1 }
try: entry = index[str(path)]
except: pass
newtime = path.stat().st_mtime
if newtime != entry["timestamp"]:
newentry = { "timestamp": newtime, "version": entry["version"] + 1 }
index[str(path)] = newentry
stream = open("assets/index.json", mode="w")
stream.write(json.dumps(index))
|
Add script to update index# Rebuild asset indices
# Usage: python3 rebuild-index.py in the project root
import io
import json
import pathlib
index = {}
try:
stream = open("assets/index.json")
index = json.loads(stream.readall())
except Exception:
pass
# An index is a dictionary from filename to a structure:
# "filename" : { "timestamp": 9583, "version": 2 }, ...
for path in pathlib.Path("assets").glob("**/*"):
if str(path) == "assets/index.json" or not path.is_file(): continue
entry = { "timestamp": 0, "version": -1 }
try: entry = index[str(path)]
except: pass
newtime = path.stat().st_mtime
if newtime != entry["timestamp"]:
newentry = { "timestamp": newtime, "version": entry["version"] + 1 }
index[str(path)] = newentry
stream = open("assets/index.json", mode="w")
stream.write(json.dumps(index))
|
<commit_before><commit_msg>Add script to update index<commit_after># Rebuild asset indices
# Usage: python3 rebuild-index.py in the project root
import io
import json
import pathlib
index = {}
try:
stream = open("assets/index.json")
index = json.loads(stream.readall())
except Exception:
pass
# An index is a dictionary from filename to a structure:
# "filename" : { "timestamp": 9583, "version": 2 }, ...
for path in pathlib.Path("assets").glob("**/*"):
if str(path) == "assets/index.json" or not path.is_file(): continue
entry = { "timestamp": 0, "version": -1 }
try: entry = index[str(path)]
except: pass
newtime = path.stat().st_mtime
if newtime != entry["timestamp"]:
newentry = { "timestamp": newtime, "version": entry["version"] + 1 }
index[str(path)] = newentry
stream = open("assets/index.json", mode="w")
stream.write(json.dumps(index))
|
|
72ee5d6949f25158b6cbd1deead45ee1e939be5b
|
sympy/series/__init__.py
|
sympy/series/__init__.py
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = [gruntz, limit, series, O, Order, Limit]
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = ['gruntz', 'limit', 'series', 'O', 'Order', 'Limit']
|
Fix __all__ usage for sympy/series
|
Fix __all__ usage for sympy/series
|
Python
|
bsd-3-clause
|
hargup/sympy,Arafatk/sympy,chaffra/sympy,kaushik94/sympy,atreyv/sympy,jbbskinny/sympy,aktech/sympy,AunShiLord/sympy,AkademieOlympia/sympy,beni55/sympy,jerli/sympy,vipulroxx/sympy,Titan-C/sympy,yukoba/sympy,minrk/sympy,Shaswat27/sympy,abloomston/sympy,yashsharan/sympy,shipci/sympy,drufat/sympy,jerli/sympy,toolforger/sympy,Gadal/sympy,cswiercz/sympy,cswiercz/sympy,atsao72/sympy,MridulS/sympy,emon10005/sympy,shikil/sympy,aktech/sympy,iamutkarshtiwari/sympy,moble/sympy,Arafatk/sympy,VaibhavAgarwalVA/sympy,wanglongqi/sympy,kaichogami/sympy,Gadal/sympy,postvakje/sympy,ahhda/sympy,yukoba/sympy,sahmed95/sympy,hrashk/sympy,oliverlee/sympy,garvitr/sympy,farhaanbukhsh/sympy,jaimahajan1997/sympy,wanglongqi/sympy,kaushik94/sympy,shikil/sympy,grevutiu-gabriel/sympy,maniteja123/sympy,VaibhavAgarwalVA/sympy,drufat/sympy,abloomston/sympy,atsao72/sympy,Davidjohnwilson/sympy,jaimahajan1997/sympy,sahilshekhawat/sympy,moble/sympy,Davidjohnwilson/sympy,diofant/diofant,Sumith1896/sympy,meghana1995/sympy,AunShiLord/sympy,Vishluck/sympy,kmacinnis/sympy,toolforger/sympy,Davidjohnwilson/sympy,Curious72/sympy,grevutiu-gabriel/sympy,madan96/sympy,kumarkrishna/sympy,chaffra/sympy,souravsingh/sympy,MechCoder/sympy,jamesblunt/sympy,ga7g08/sympy,postvakje/sympy,jbbskinny/sympy,mcdaniel67/sympy,farhaanbukhsh/sympy,rahuldan/sympy,sunny94/temp,Gadal/sympy,hargup/sympy,garvitr/sympy,atreyv/sympy,lidavidm/sympy,abhiii5459/sympy,MechCoder/sympy,saurabhjn76/sympy,shipci/sympy,mcdaniel67/sympy,hrashk/sympy,yashsharan/sympy,bukzor/sympy,Shaswat27/sympy,sahmed95/sympy,Sumith1896/sympy,rahuldan/sympy,shipci/sympy,yukoba/sympy,cccfran/sympy,liangjiaxing/sympy,kaichogami/sympy,skidzo/sympy,bukzor/sympy,kaushik94/sympy,jerli/sympy,Designist/sympy,kmacinnis/sympy,asm666/sympy,Arafatk/sympy,yashsharan/sympy,sahilshekhawat/sympy,pandeyadarsh/sympy,Mitchkoens/sympy,jamesblunt/sympy,saurabhjn76/sympy,hargup/sympy,maniteja123/sympy,debugger22/sympy,saurabhjn76/sympy,wyom/sympy,kmacinnis/sympy,MridulS/sympy,cswiercz/sympy,debugger22/sympy,cccfran/sympy,cccfran/sympy,iamutkarshtiwari/sympy,oliverlee/sympy,maniteja123/sympy,MechCoder/sympy,skirpichev/omg,lindsayad/sympy,VaibhavAgarwalVA/sympy,rahuldan/sympy,madan96/sympy,iamutkarshtiwari/sympy,asm666/sympy,asm666/sympy,pernici/sympy,Designist/sympy,Mitchkoens/sympy,grevutiu-gabriel/sympy,AkademieOlympia/sympy,garvitr/sympy,sampadsaha5/sympy,lindsayad/sympy,atsao72/sympy,drufat/sympy,ga7g08/sympy,Vishluck/sympy,hrashk/sympy,bukzor/sympy,pandeyadarsh/sympy,flacjacket/sympy,sahmed95/sympy,skidzo/sympy,beni55/sympy,emon10005/sympy,emon10005/sympy,pbrady/sympy,jamesblunt/sympy,Designist/sympy,chaffra/sympy,kumarkrishna/sympy,madan96/sympy,moble/sympy,beni55/sympy,Sumith1896/sympy,dqnykamp/sympy,mafiya69/sympy,pbrady/sympy,ChristinaZografou/sympy,ChristinaZografou/sympy,kevalds51/sympy,liangjiaxing/sympy,Titan-C/sympy,amitjamadagni/sympy,minrk/sympy,lidavidm/sympy,dqnykamp/sympy,mafiya69/sympy,sampadsaha5/sympy,liangjiaxing/sympy,ahhda/sympy,sampadsaha5/sympy,Curious72/sympy,aktech/sympy,lindsayad/sympy,wyom/sympy,vipulroxx/sympy,atreyv/sympy,jbbskinny/sympy,mafiya69/sympy,sunny94/temp,srjoglekar246/sympy,pbrady/sympy,shikil/sympy,wyom/sympy,debugger22/sympy,Titan-C/sympy,toolforger/sympy,mcdaniel67/sympy,kevalds51/sympy,MridulS/sympy,Vishluck/sympy,sahilshekhawat/sympy,lidavidm/sympy,sunny94/temp,Shaswat27/sympy,oliverlee/sympy,abhiii5459/sympy,ChristinaZografou/sympy,abloomston/sympy,AunShiLord/sympy,dqnykamp/sympy,jaimahajan1997/sympy,meghana1995/sympy,postvakje/sympy,souravsingh/sympy,souravsingh/sympy,AkademieOlympia/sympy,skidzo/sympy,wanglongqi/sympy,pandeyadarsh/sympy,kevalds51/sympy,farhaanbukhsh/sympy,kaichogami/sympy,meghana1995/sympy,amitjamadagni/sympy,ahhda/sympy,Curious72/sympy,abhiii5459/sympy,Mitchkoens/sympy,kumarkrishna/sympy,vipulroxx/sympy,ga7g08/sympy
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = [gruntz, limit, series, O, Order, Limit]
Fix __all__ usage for sympy/series
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = ['gruntz', 'limit', 'series', 'O', 'Order', 'Limit']
|
<commit_before>"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = [gruntz, limit, series, O, Order, Limit]
<commit_msg>Fix __all__ usage for sympy/series<commit_after>
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = ['gruntz', 'limit', 'series', 'O', 'Order', 'Limit']
|
"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = [gruntz, limit, series, O, Order, Limit]
Fix __all__ usage for sympy/series"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = ['gruntz', 'limit', 'series', 'O', 'Order', 'Limit']
|
<commit_before>"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = [gruntz, limit, series, O, Order, Limit]
<commit_msg>Fix __all__ usage for sympy/series<commit_after>"""A module that handles series: find a limit, order the series etc.
"""
from order import Order
from limits import limit, Limit
from gruntz import gruntz
from series import series
O = Order
__all__ = ['gruntz', 'limit', 'series', 'O', 'Order', 'Limit']
|
3df9b3962dbade0175b2bdade04dd709fd69fef2
|
core/admin/migrations/versions/f1393877871d_.py
|
core/admin/migrations/versions/f1393877871d_.py
|
""" Add default columns to the configuration table
Revision ID: f1393877871d
Revises: 546b04c886f0
Create Date: 2018-12-09 16:15:42.317104
"""
# revision identifiers, used by Alembic.
revision = 'f1393877871d'
down_revision = '546b04c886f0'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
def downgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.drop_column('updated_at')
batch_op.drop_column('created_at')
batch_op.drop_column('comment')
|
Add default columns to the configuration table
|
Add default columns to the configuration table
|
Python
|
mit
|
kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io
|
Add default columns to the configuration table
|
""" Add default columns to the configuration table
Revision ID: f1393877871d
Revises: 546b04c886f0
Create Date: 2018-12-09 16:15:42.317104
"""
# revision identifiers, used by Alembic.
revision = 'f1393877871d'
down_revision = '546b04c886f0'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
def downgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.drop_column('updated_at')
batch_op.drop_column('created_at')
batch_op.drop_column('comment')
|
<commit_before><commit_msg>Add default columns to the configuration table<commit_after>
|
""" Add default columns to the configuration table
Revision ID: f1393877871d
Revises: 546b04c886f0
Create Date: 2018-12-09 16:15:42.317104
"""
# revision identifiers, used by Alembic.
revision = 'f1393877871d'
down_revision = '546b04c886f0'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
def downgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.drop_column('updated_at')
batch_op.drop_column('created_at')
batch_op.drop_column('comment')
|
Add default columns to the configuration table""" Add default columns to the configuration table
Revision ID: f1393877871d
Revises: 546b04c886f0
Create Date: 2018-12-09 16:15:42.317104
"""
# revision identifiers, used by Alembic.
revision = 'f1393877871d'
down_revision = '546b04c886f0'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
def downgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.drop_column('updated_at')
batch_op.drop_column('created_at')
batch_op.drop_column('comment')
|
<commit_before><commit_msg>Add default columns to the configuration table<commit_after>""" Add default columns to the configuration table
Revision ID: f1393877871d
Revises: 546b04c886f0
Create Date: 2018-12-09 16:15:42.317104
"""
# revision identifiers, used by Alembic.
revision = 'f1393877871d'
down_revision = '546b04c886f0'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
def downgrade():
with op.batch_alter_table('config') as batch_op:
batch_op.drop_column('updated_at')
batch_op.drop_column('created_at')
batch_op.drop_column('comment')
|
|
5766e412c18d8b049a48b54e1244a735845055b1
|
scripts/migrate_addons.py
|
scripts/migrate_addons.py
|
import logging
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.app import init_app
from website.project.model import Node
logger = logging.getLogger(__name__)
def main():
init_app()
migrate_nodes()
def migrate_addons(node):
ret = False
if not node.has_addon('wiki'):
node.add_addon('wiki', auth=node.creator, log=False)
ret = True
if not node.has_addon('osffiles'):
node.add_addon('osffiles', auth=node.creator, log=False)
ret = True
return ret
def migrate_nodes():
migrated_count = 0
nodes = []
for node in Node.find():
was_migrated = migrate_addons(node)
if was_migrated:
node.save()
nodes.append(node)
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
return nodes
class TestMigratingAddons(OsfTestCase):
def test_migrate_wiki(self):
node = NodeFactory()
(node.get_addon('wiki')).delete(save=True)
assert_false(node.has_addon('wiki'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('wiki'))
def test_migrate_osffiles(self):
node = NodeFactory()
(node.get_addon('osffiles')).delete(save=True)
assert_false(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('osffiles'))
def test_no_migration_if_addon_exists(self):
node = NodeFactory()
assert_true(node.has_addon('wiki'))
assert_true(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_false(was_migrated)
if __name__ == '__main__':
main()
|
Add migration for wiki and osffiles addons
|
Add migration for wiki and osffiles addons
|
Python
|
apache-2.0
|
himanshuo/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,Ghalko/osf.io,laurenrevere/osf.io,monikagrabowska/osf.io,mattclark/osf.io,GageGaskins/osf.io,danielneis/osf.io,brianjgeiger/osf.io,kushG/osf.io,acshi/osf.io,himanshuo/osf.io,pattisdr/osf.io,njantrania/osf.io,baylee-d/osf.io,doublebits/osf.io,GageGaskins/osf.io,haoyuchen1992/osf.io,mluke93/osf.io,hmoco/osf.io,sbt9uc/osf.io,cosenal/osf.io,zachjanicki/osf.io,doublebits/osf.io,CenterForOpenScience/osf.io,jmcarp/osf.io,GageGaskins/osf.io,rdhyee/osf.io,ZobairAlijan/osf.io,njantrania/osf.io,njantrania/osf.io,felliott/osf.io,TomBaxter/osf.io,Ghalko/osf.io,kwierman/osf.io,adlius/osf.io,cwisecarver/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,abought/osf.io,lyndsysimon/osf.io,bdyetton/prettychart,jinluyuan/osf.io,samanehsan/osf.io,ZobairAlijan/osf.io,lamdnhan/osf.io,TomHeatwole/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,sloria/osf.io,lyndsysimon/osf.io,dplorimer/osf,brianjgeiger/osf.io,brandonPurvis/osf.io,acshi/osf.io,hmoco/osf.io,saradbowman/osf.io,billyhunt/osf.io,chrisseto/osf.io,erinspace/osf.io,revanthkolli/osf.io,cldershem/osf.io,jmcarp/osf.io,jeffreyliu3230/osf.io,lamdnhan/osf.io,kwierman/osf.io,AndrewSallans/osf.io,brandonPurvis/osf.io,barbour-em/osf.io,bdyetton/prettychart,caseyrygt/osf.io,KAsante95/osf.io,chrisseto/osf.io,bdyetton/prettychart,emetsger/osf.io,sloria/osf.io,cslzchen/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,emetsger/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,cosenal/osf.io,Johnetordoff/osf.io,TomHeatwole/osf.io,caseyrygt/osf.io,kushG/osf.io,monikagrabowska/osf.io,caseyrollins/osf.io,asanfilippo7/osf.io,kch8qx/osf.io,mluo613/osf.io,amyshi188/osf.io,wearpants/osf.io,RomanZWang/osf.io,DanielSBrown/osf.io,billyhunt/osf.io,alexschiller/osf.io,samanehsan/osf.io,kushG/osf.io,adlius/osf.io,GageGaskins/osf.io,Nesiehr/osf.io,njantrania/osf.io,billyhunt/osf.io,ticklemepierce/osf.io,lyndsysimon/osf.io,zamattiac/osf.io,cldershem/osf.io,mluo613/osf.io,Nesiehr/osf.io,kch8qx/osf.io,acshi/osf.io,baylee-d/osf.io,SSJohns/osf.io,acshi/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,doublebits/osf.io,MerlinZhang/osf.io,reinaH/osf.io,samchrisinger/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,GageGaskins/osf.io,caneruguz/osf.io,caseyrygt/osf.io,ZobairAlijan/osf.io,samchrisinger/osf.io,mattclark/osf.io,laurenrevere/osf.io,jinluyuan/osf.io,cldershem/osf.io,mluke93/osf.io,dplorimer/osf,erinspace/osf.io,cslzchen/osf.io,binoculars/osf.io,GaryKriebel/osf.io,jnayak1/osf.io,petermalcolm/osf.io,petermalcolm/osf.io,CenterForOpenScience/osf.io,samchrisinger/osf.io,fabianvf/osf.io,petermalcolm/osf.io,felliott/osf.io,icereval/osf.io,jmcarp/osf.io,samanehsan/osf.io,RomanZWang/osf.io,amyshi188/osf.io,zamattiac/osf.io,hmoco/osf.io,dplorimer/osf,revanthkolli/osf.io,ckc6cz/osf.io,jinluyuan/osf.io,brandonPurvis/osf.io,ckc6cz/osf.io,mfraezz/osf.io,ckc6cz/osf.io,kwierman/osf.io,HarryRybacki/osf.io,mluo613/osf.io,asanfilippo7/osf.io,GaryKriebel/osf.io,arpitar/osf.io,alexschiller/osf.io,zkraime/osf.io,felliott/osf.io,alexschiller/osf.io,lyndsysimon/osf.io,aaxelb/osf.io,haoyuchen1992/osf.io,jinluyuan/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,ZobairAlijan/osf.io,erinspace/osf.io,mluo613/osf.io,caseyrollins/osf.io,doublebits/osf.io,RomanZWang/osf.io,MerlinZhang/osf.io,binoculars/osf.io,icereval/osf.io,jolene-esposito/osf.io,reinaH/osf.io,leb2dg/osf.io,mluo613/osf.io,dplorimer/osf,arpitar/osf.io,himanshuo/osf.io,zachjanicki/osf.io,HarryRybacki/osf.io,HarryRybacki/osf.io,kwierman/osf.io,caseyrygt/osf.io,jolene-esposito/osf.io,doublebits/osf.io,brandonPurvis/osf.io,cwisecarver/osf.io,jolene-esposito/osf.io,jeffreyliu3230/osf.io,GaryKriebel/osf.io,HarryRybacki/osf.io,chrisseto/osf.io,crcresearch/osf.io,RomanZWang/osf.io,pattisdr/osf.io,amyshi188/osf.io,chennan47/osf.io,SSJohns/osf.io,hmoco/osf.io,AndrewSallans/osf.io,revanthkolli/osf.io,mluke93/osf.io,reinaH/osf.io,lamdnhan/osf.io,caneruguz/osf.io,mfraezz/osf.io,Nesiehr/osf.io,Ghalko/osf.io,danielneis/osf.io,DanielSBrown/osf.io,Johnetordoff/osf.io,KAsante95/osf.io,zamattiac/osf.io,wearpants/osf.io,brianjgeiger/osf.io,revanthkolli/osf.io,leb2dg/osf.io,zkraime/osf.io,laurenrevere/osf.io,aaxelb/osf.io,mattclark/osf.io,caneruguz/osf.io,GaryKriebel/osf.io,zachjanicki/osf.io,rdhyee/osf.io,jolene-esposito/osf.io,samanehsan/osf.io,fabianvf/osf.io,pattisdr/osf.io,sbt9uc/osf.io,cwisecarver/osf.io,rdhyee/osf.io,abought/osf.io,jnayak1/osf.io,asanfilippo7/osf.io,Johnetordoff/osf.io,sbt9uc/osf.io,danielneis/osf.io,zkraime/osf.io,brandonPurvis/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,amyshi188/osf.io,arpitar/osf.io,TomHeatwole/osf.io,crcresearch/osf.io,reinaH/osf.io,adlius/osf.io,rdhyee/osf.io,abought/osf.io,kushG/osf.io,mfraezz/osf.io,RomanZWang/osf.io,icereval/osf.io,MerlinZhang/osf.io,fabianvf/osf.io,KAsante95/osf.io,himanshuo/osf.io,caseyrollins/osf.io,fabianvf/osf.io,cosenal/osf.io,chrisseto/osf.io,cosenal/osf.io,jeffreyliu3230/osf.io,kch8qx/osf.io,zkraime/osf.io,lamdnhan/osf.io,cslzchen/osf.io,wearpants/osf.io,abought/osf.io,aaxelb/osf.io,danielneis/osf.io,wearpants/osf.io,MerlinZhang/osf.io,TomBaxter/osf.io,KAsante95/osf.io,jeffreyliu3230/osf.io,mluke93/osf.io,kch8qx/osf.io,cwisecarver/osf.io,arpitar/osf.io,Nesiehr/osf.io,SSJohns/osf.io,KAsante95/osf.io,acshi/osf.io,samchrisinger/osf.io,adlius/osf.io,cslzchen/osf.io,felliott/osf.io,DanielSBrown/osf.io,leb2dg/osf.io,saradbowman/osf.io,barbour-em/osf.io,ticklemepierce/osf.io,petermalcolm/osf.io,Johnetordoff/osf.io,jnayak1/osf.io,Ghalko/osf.io,leb2dg/osf.io,TomHeatwole/osf.io,alexschiller/osf.io,barbour-em/osf.io,monikagrabowska/osf.io,ckc6cz/osf.io,billyhunt/osf.io,sbt9uc/osf.io,billyhunt/osf.io,mfraezz/osf.io,emetsger/osf.io,TomBaxter/osf.io,bdyetton/prettychart,zachjanicki/osf.io,cldershem/osf.io,ticklemepierce/osf.io,zamattiac/osf.io,jmcarp/osf.io,aaxelb/osf.io,sloria/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,SSJohns/osf.io,barbour-em/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,ticklemepierce/osf.io
|
Add migration for wiki and osffiles addons
|
import logging
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.app import init_app
from website.project.model import Node
logger = logging.getLogger(__name__)
def main():
init_app()
migrate_nodes()
def migrate_addons(node):
ret = False
if not node.has_addon('wiki'):
node.add_addon('wiki', auth=node.creator, log=False)
ret = True
if not node.has_addon('osffiles'):
node.add_addon('osffiles', auth=node.creator, log=False)
ret = True
return ret
def migrate_nodes():
migrated_count = 0
nodes = []
for node in Node.find():
was_migrated = migrate_addons(node)
if was_migrated:
node.save()
nodes.append(node)
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
return nodes
class TestMigratingAddons(OsfTestCase):
def test_migrate_wiki(self):
node = NodeFactory()
(node.get_addon('wiki')).delete(save=True)
assert_false(node.has_addon('wiki'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('wiki'))
def test_migrate_osffiles(self):
node = NodeFactory()
(node.get_addon('osffiles')).delete(save=True)
assert_false(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('osffiles'))
def test_no_migration_if_addon_exists(self):
node = NodeFactory()
assert_true(node.has_addon('wiki'))
assert_true(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_false(was_migrated)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for wiki and osffiles addons<commit_after>
|
import logging
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.app import init_app
from website.project.model import Node
logger = logging.getLogger(__name__)
def main():
init_app()
migrate_nodes()
def migrate_addons(node):
ret = False
if not node.has_addon('wiki'):
node.add_addon('wiki', auth=node.creator, log=False)
ret = True
if not node.has_addon('osffiles'):
node.add_addon('osffiles', auth=node.creator, log=False)
ret = True
return ret
def migrate_nodes():
migrated_count = 0
nodes = []
for node in Node.find():
was_migrated = migrate_addons(node)
if was_migrated:
node.save()
nodes.append(node)
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
return nodes
class TestMigratingAddons(OsfTestCase):
def test_migrate_wiki(self):
node = NodeFactory()
(node.get_addon('wiki')).delete(save=True)
assert_false(node.has_addon('wiki'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('wiki'))
def test_migrate_osffiles(self):
node = NodeFactory()
(node.get_addon('osffiles')).delete(save=True)
assert_false(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('osffiles'))
def test_no_migration_if_addon_exists(self):
node = NodeFactory()
assert_true(node.has_addon('wiki'))
assert_true(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_false(was_migrated)
if __name__ == '__main__':
main()
|
Add migration for wiki and osffiles addonsimport logging
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.app import init_app
from website.project.model import Node
logger = logging.getLogger(__name__)
def main():
init_app()
migrate_nodes()
def migrate_addons(node):
ret = False
if not node.has_addon('wiki'):
node.add_addon('wiki', auth=node.creator, log=False)
ret = True
if not node.has_addon('osffiles'):
node.add_addon('osffiles', auth=node.creator, log=False)
ret = True
return ret
def migrate_nodes():
migrated_count = 0
nodes = []
for node in Node.find():
was_migrated = migrate_addons(node)
if was_migrated:
node.save()
nodes.append(node)
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
return nodes
class TestMigratingAddons(OsfTestCase):
def test_migrate_wiki(self):
node = NodeFactory()
(node.get_addon('wiki')).delete(save=True)
assert_false(node.has_addon('wiki'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('wiki'))
def test_migrate_osffiles(self):
node = NodeFactory()
(node.get_addon('osffiles')).delete(save=True)
assert_false(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('osffiles'))
def test_no_migration_if_addon_exists(self):
node = NodeFactory()
assert_true(node.has_addon('wiki'))
assert_true(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_false(was_migrated)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for wiki and osffiles addons<commit_after>import logging
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.app import init_app
from website.project.model import Node
logger = logging.getLogger(__name__)
def main():
init_app()
migrate_nodes()
def migrate_addons(node):
ret = False
if not node.has_addon('wiki'):
node.add_addon('wiki', auth=node.creator, log=False)
ret = True
if not node.has_addon('osffiles'):
node.add_addon('osffiles', auth=node.creator, log=False)
ret = True
return ret
def migrate_nodes():
migrated_count = 0
nodes = []
for node in Node.find():
was_migrated = migrate_addons(node)
if was_migrated:
node.save()
nodes.append(node)
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
return nodes
class TestMigratingAddons(OsfTestCase):
def test_migrate_wiki(self):
node = NodeFactory()
(node.get_addon('wiki')).delete(save=True)
assert_false(node.has_addon('wiki'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('wiki'))
def test_migrate_osffiles(self):
node = NodeFactory()
(node.get_addon('osffiles')).delete(save=True)
assert_false(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_true(was_migrated)
node.save()
assert_true(node.has_addon('osffiles'))
def test_no_migration_if_addon_exists(self):
node = NodeFactory()
assert_true(node.has_addon('wiki'))
assert_true(node.has_addon('osffiles'))
was_migrated = migrate_addons(node)
assert_false(was_migrated)
if __name__ == '__main__':
main()
|
|
8e285914db46c2acc67e455eb09100ed1d39b32e
|
rmq_utils/common.py
|
rmq_utils/common.py
|
from pyrabbit.api import Client
from exceptions import InvalidUser
def connect_to_management_api(host, user, password):
client = Client(host, user, password)
if not client.has_admin_rights:
raise InvalidUser('User must have admin rights')
return client
|
Connect to the management API
|
Connect to the management API
|
Python
|
mit
|
projectweekend/RMQ-Utils
|
Connect to the management API
|
from pyrabbit.api import Client
from exceptions import InvalidUser
def connect_to_management_api(host, user, password):
client = Client(host, user, password)
if not client.has_admin_rights:
raise InvalidUser('User must have admin rights')
return client
|
<commit_before><commit_msg>Connect to the management API<commit_after>
|
from pyrabbit.api import Client
from exceptions import InvalidUser
def connect_to_management_api(host, user, password):
client = Client(host, user, password)
if not client.has_admin_rights:
raise InvalidUser('User must have admin rights')
return client
|
Connect to the management APIfrom pyrabbit.api import Client
from exceptions import InvalidUser
def connect_to_management_api(host, user, password):
client = Client(host, user, password)
if not client.has_admin_rights:
raise InvalidUser('User must have admin rights')
return client
|
<commit_before><commit_msg>Connect to the management API<commit_after>from pyrabbit.api import Client
from exceptions import InvalidUser
def connect_to_management_api(host, user, password):
client = Client(host, user, password)
if not client.has_admin_rights:
raise InvalidUser('User must have admin rights')
return client
|
|
bfa56bf9ee0e66d397652db0750a0c99c4437082
|
hevector.py
|
hevector.py
|
INNER_PRODUCT = '*'
ADD = '+'
def tupleToVec(t):
if type(t) is int: return str(t)
return '[%s]' % ' '.join(map(tupleToVec,t))
def vecToTuple(v):
return tuple(map(int, v.strip('[]').split()))
def send(ops):
return '\n'.join(v if type(v) is str else tupleToVec(v) for v in ops)
def recv(output):
return tuple(vecToTuple(l) for l in map(str.strip, output.splitlines()))
def evaluate(operations):
import subprocess
output, error = subprocess.Popen(['iven'], shell=True).communicate(send(operations))
if error:
from sys import stderr
stderr.write(error + '\n')
stderr.flush()
return recv(output)
|
Add initial python vector interface.
|
Add initial python vector interface.
|
Python
|
mit
|
jamespayor/vector-homomorphic-encryption,jamespayor/vector-homomorphic-encryption,jamespayor/vector-homomorphic-encryption
|
Add initial python vector interface.
|
INNER_PRODUCT = '*'
ADD = '+'
def tupleToVec(t):
if type(t) is int: return str(t)
return '[%s]' % ' '.join(map(tupleToVec,t))
def vecToTuple(v):
return tuple(map(int, v.strip('[]').split()))
def send(ops):
return '\n'.join(v if type(v) is str else tupleToVec(v) for v in ops)
def recv(output):
return tuple(vecToTuple(l) for l in map(str.strip, output.splitlines()))
def evaluate(operations):
import subprocess
output, error = subprocess.Popen(['iven'], shell=True).communicate(send(operations))
if error:
from sys import stderr
stderr.write(error + '\n')
stderr.flush()
return recv(output)
|
<commit_before><commit_msg>Add initial python vector interface.<commit_after>
|
INNER_PRODUCT = '*'
ADD = '+'
def tupleToVec(t):
if type(t) is int: return str(t)
return '[%s]' % ' '.join(map(tupleToVec,t))
def vecToTuple(v):
return tuple(map(int, v.strip('[]').split()))
def send(ops):
return '\n'.join(v if type(v) is str else tupleToVec(v) for v in ops)
def recv(output):
return tuple(vecToTuple(l) for l in map(str.strip, output.splitlines()))
def evaluate(operations):
import subprocess
output, error = subprocess.Popen(['iven'], shell=True).communicate(send(operations))
if error:
from sys import stderr
stderr.write(error + '\n')
stderr.flush()
return recv(output)
|
Add initial python vector interface.
INNER_PRODUCT = '*'
ADD = '+'
def tupleToVec(t):
if type(t) is int: return str(t)
return '[%s]' % ' '.join(map(tupleToVec,t))
def vecToTuple(v):
return tuple(map(int, v.strip('[]').split()))
def send(ops):
return '\n'.join(v if type(v) is str else tupleToVec(v) for v in ops)
def recv(output):
return tuple(vecToTuple(l) for l in map(str.strip, output.splitlines()))
def evaluate(operations):
import subprocess
output, error = subprocess.Popen(['iven'], shell=True).communicate(send(operations))
if error:
from sys import stderr
stderr.write(error + '\n')
stderr.flush()
return recv(output)
|
<commit_before><commit_msg>Add initial python vector interface.<commit_after>
INNER_PRODUCT = '*'
ADD = '+'
def tupleToVec(t):
if type(t) is int: return str(t)
return '[%s]' % ' '.join(map(tupleToVec,t))
def vecToTuple(v):
return tuple(map(int, v.strip('[]').split()))
def send(ops):
return '\n'.join(v if type(v) is str else tupleToVec(v) for v in ops)
def recv(output):
return tuple(vecToTuple(l) for l in map(str.strip, output.splitlines()))
def evaluate(operations):
import subprocess
output, error = subprocess.Popen(['iven'], shell=True).communicate(send(operations))
if error:
from sys import stderr
stderr.write(error + '\n')
stderr.flush()
return recv(output)
|
|
138bb2b3e1188463d88edb176e26c1c2f633207d
|
entity_history/migrations/0003_update_triggers.py
|
entity_history/migrations/0003_update_triggers.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from entity_history.sql.triggers import EntityActivationTrigger, EntityRelationshipActivationTrigger
def refresh_entity_activation_trigger(*args, **kwargs):
EntityActivationTrigger().disable()
EntityActivationTrigger().enable()
def refresh_entity_relationship_activation_trigger(*args, **kwargs):
EntityRelationshipActivationTrigger().disable()
EntityRelationshipActivationTrigger().enable()
class Migration(migrations.Migration):
dependencies = [
('entity_history', '0002_auto_20150406_1605'),
]
operations = [
migrations.RunPython(
code=refresh_entity_activation_trigger,
reverse_code=refresh_entity_relationship_activation_trigger
)
]
|
Add a migration for updating the triggers
|
Add a migration for updating the triggers
|
Python
|
mit
|
ambitioninc/django-entity-history,jaredlewis/django-entity-history
|
Add a migration for updating the triggers
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from entity_history.sql.triggers import EntityActivationTrigger, EntityRelationshipActivationTrigger
def refresh_entity_activation_trigger(*args, **kwargs):
EntityActivationTrigger().disable()
EntityActivationTrigger().enable()
def refresh_entity_relationship_activation_trigger(*args, **kwargs):
EntityRelationshipActivationTrigger().disable()
EntityRelationshipActivationTrigger().enable()
class Migration(migrations.Migration):
dependencies = [
('entity_history', '0002_auto_20150406_1605'),
]
operations = [
migrations.RunPython(
code=refresh_entity_activation_trigger,
reverse_code=refresh_entity_relationship_activation_trigger
)
]
|
<commit_before><commit_msg>Add a migration for updating the triggers<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from entity_history.sql.triggers import EntityActivationTrigger, EntityRelationshipActivationTrigger
def refresh_entity_activation_trigger(*args, **kwargs):
EntityActivationTrigger().disable()
EntityActivationTrigger().enable()
def refresh_entity_relationship_activation_trigger(*args, **kwargs):
EntityRelationshipActivationTrigger().disable()
EntityRelationshipActivationTrigger().enable()
class Migration(migrations.Migration):
dependencies = [
('entity_history', '0002_auto_20150406_1605'),
]
operations = [
migrations.RunPython(
code=refresh_entity_activation_trigger,
reverse_code=refresh_entity_relationship_activation_trigger
)
]
|
Add a migration for updating the triggers# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from entity_history.sql.triggers import EntityActivationTrigger, EntityRelationshipActivationTrigger
def refresh_entity_activation_trigger(*args, **kwargs):
EntityActivationTrigger().disable()
EntityActivationTrigger().enable()
def refresh_entity_relationship_activation_trigger(*args, **kwargs):
EntityRelationshipActivationTrigger().disable()
EntityRelationshipActivationTrigger().enable()
class Migration(migrations.Migration):
dependencies = [
('entity_history', '0002_auto_20150406_1605'),
]
operations = [
migrations.RunPython(
code=refresh_entity_activation_trigger,
reverse_code=refresh_entity_relationship_activation_trigger
)
]
|
<commit_before><commit_msg>Add a migration for updating the triggers<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from entity_history.sql.triggers import EntityActivationTrigger, EntityRelationshipActivationTrigger
def refresh_entity_activation_trigger(*args, **kwargs):
EntityActivationTrigger().disable()
EntityActivationTrigger().enable()
def refresh_entity_relationship_activation_trigger(*args, **kwargs):
EntityRelationshipActivationTrigger().disable()
EntityRelationshipActivationTrigger().enable()
class Migration(migrations.Migration):
dependencies = [
('entity_history', '0002_auto_20150406_1605'),
]
operations = [
migrations.RunPython(
code=refresh_entity_activation_trigger,
reverse_code=refresh_entity_relationship_activation_trigger
)
]
|
|
11ae4f118f00e053d882340eb2c031234fce60bd
|
lin_reg_gradient_desc.py
|
lin_reg_gradient_desc.py
|
# Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
|
Complete linear regression using TF
|
Complete linear regression using TF
Linear regression using gradient descent
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Complete linear regression using TF
Linear regression using gradient descent
|
# Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
|
<commit_before><commit_msg>Complete linear regression using TF
Linear regression using gradient descent<commit_after>
|
# Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
|
Complete linear regression using TF
Linear regression using gradient descent# Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
|
<commit_before><commit_msg>Complete linear regression using TF
Linear regression using gradient descent<commit_after># Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
|
|
9773aea7e75fd4eeed5ff9e539abe749f96bdfbf
|
migrations/versions/56fbf79e705_add_account_id_to_li.py
|
migrations/versions/56fbf79e705_add_account_id_to_li.py
|
"""Add account_id to list
Revision ID: 56fbf79e705
Revises: 4d93f81e7c0
Create Date: 2013-09-07 16:54:04.163852
"""
# revision identifiers, used by Alembic.
revision = '56fbf79e705'
down_revision = '4d93f81e7c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# XXX(gmwils): Works if an account exists with id=1
op.add_column('list',
sa.Column('account_id',
sa.Integer,
sa.ForeignKey("account.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
server_default='1'))
op.alter_column('list', 'account_id', server_default=None)
def downgrade():
op.drop_column('list', 'account_id')
|
Add lists to an account in data schema
|
Add lists to an account in data schema
|
Python
|
mit
|
gmwils/cihui
|
Add lists to an account in data schema
|
"""Add account_id to list
Revision ID: 56fbf79e705
Revises: 4d93f81e7c0
Create Date: 2013-09-07 16:54:04.163852
"""
# revision identifiers, used by Alembic.
revision = '56fbf79e705'
down_revision = '4d93f81e7c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# XXX(gmwils): Works if an account exists with id=1
op.add_column('list',
sa.Column('account_id',
sa.Integer,
sa.ForeignKey("account.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
server_default='1'))
op.alter_column('list', 'account_id', server_default=None)
def downgrade():
op.drop_column('list', 'account_id')
|
<commit_before><commit_msg>Add lists to an account in data schema<commit_after>
|
"""Add account_id to list
Revision ID: 56fbf79e705
Revises: 4d93f81e7c0
Create Date: 2013-09-07 16:54:04.163852
"""
# revision identifiers, used by Alembic.
revision = '56fbf79e705'
down_revision = '4d93f81e7c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# XXX(gmwils): Works if an account exists with id=1
op.add_column('list',
sa.Column('account_id',
sa.Integer,
sa.ForeignKey("account.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
server_default='1'))
op.alter_column('list', 'account_id', server_default=None)
def downgrade():
op.drop_column('list', 'account_id')
|
Add lists to an account in data schema"""Add account_id to list
Revision ID: 56fbf79e705
Revises: 4d93f81e7c0
Create Date: 2013-09-07 16:54:04.163852
"""
# revision identifiers, used by Alembic.
revision = '56fbf79e705'
down_revision = '4d93f81e7c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# XXX(gmwils): Works if an account exists with id=1
op.add_column('list',
sa.Column('account_id',
sa.Integer,
sa.ForeignKey("account.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
server_default='1'))
op.alter_column('list', 'account_id', server_default=None)
def downgrade():
op.drop_column('list', 'account_id')
|
<commit_before><commit_msg>Add lists to an account in data schema<commit_after>"""Add account_id to list
Revision ID: 56fbf79e705
Revises: 4d93f81e7c0
Create Date: 2013-09-07 16:54:04.163852
"""
# revision identifiers, used by Alembic.
revision = '56fbf79e705'
down_revision = '4d93f81e7c0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# XXX(gmwils): Works if an account exists with id=1
op.add_column('list',
sa.Column('account_id',
sa.Integer,
sa.ForeignKey("account.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
server_default='1'))
op.alter_column('list', 'account_id', server_default=None)
def downgrade():
op.drop_column('list', 'account_id')
|
|
f24534ac03d77443e6dd3df37894c17f0999a03b
|
tests/simplehtmlparser.py
|
tests/simplehtmlparser.py
|
class SimpleHTMLParser(object):
"""
A simple HTML parser for testing.
Not suitable for harsh cases, and the time efficiency is not considered.
Examples:
<tag key1="val1" key2>text</tag>
<tag />
"""
def __init__(self):
pass
def parse(self, content, parent=None):
if parent is None:
_, children = self.parse(content, self.body)
self.body = {
'trunk': ('body'),
'leaves': []
}
return
if content == '':
return None
i = 0
while i < len(content):
first = i
while i < len(content) and content[i] != '<':
i += 1
if first != i:
text = content[first:i]
parent['leaves'].append(('text', text))
if i == len(content):
break
first = i
while content[i] != '>':
i += 1
parts = map(lambda x: len(x) > 0, content[first + 1:i].split(' '))
if parts[0] == '/':
return i + 1
attrs = {}
for part in parts[1:]:
if part != '/':
if '=' in part:
key, val = part.split('=')
attrs[key] = val
else:
attrs[part] = None
if parts[-1] == '/':
parent['leaves'].append(('tag', parts[0], attrs))
else:
leaf = {
'trunk': ('tag', parts[0], attrs),
'leaves': [],
}
i += self.parse(content[i + 1:], leaf)
parent['leaves'].append(leaf)
return len(content)
|
Add a simple HTML parser for testing.
|
Add a simple HTML parser for testing.
Signed-off-by: CyberZHG <9ba3659efa42549cffa53a0bc35ef0cabfb20593@gmail.com>
|
Python
|
agpl-3.0
|
fakepoet/markdown.py,fakepoet/markdown.py
|
Add a simple HTML parser for testing.
Signed-off-by: CyberZHG <9ba3659efa42549cffa53a0bc35ef0cabfb20593@gmail.com>
|
class SimpleHTMLParser(object):
"""
A simple HTML parser for testing.
Not suitable for harsh cases, and the time efficiency is not considered.
Examples:
<tag key1="val1" key2>text</tag>
<tag />
"""
def __init__(self):
pass
def parse(self, content, parent=None):
if parent is None:
_, children = self.parse(content, self.body)
self.body = {
'trunk': ('body'),
'leaves': []
}
return
if content == '':
return None
i = 0
while i < len(content):
first = i
while i < len(content) and content[i] != '<':
i += 1
if first != i:
text = content[first:i]
parent['leaves'].append(('text', text))
if i == len(content):
break
first = i
while content[i] != '>':
i += 1
parts = map(lambda x: len(x) > 0, content[first + 1:i].split(' '))
if parts[0] == '/':
return i + 1
attrs = {}
for part in parts[1:]:
if part != '/':
if '=' in part:
key, val = part.split('=')
attrs[key] = val
else:
attrs[part] = None
if parts[-1] == '/':
parent['leaves'].append(('tag', parts[0], attrs))
else:
leaf = {
'trunk': ('tag', parts[0], attrs),
'leaves': [],
}
i += self.parse(content[i + 1:], leaf)
parent['leaves'].append(leaf)
return len(content)
|
<commit_before><commit_msg>Add a simple HTML parser for testing.
Signed-off-by: CyberZHG <9ba3659efa42549cffa53a0bc35ef0cabfb20593@gmail.com><commit_after>
|
class SimpleHTMLParser(object):
"""
A simple HTML parser for testing.
Not suitable for harsh cases, and the time efficiency is not considered.
Examples:
<tag key1="val1" key2>text</tag>
<tag />
"""
def __init__(self):
pass
def parse(self, content, parent=None):
if parent is None:
_, children = self.parse(content, self.body)
self.body = {
'trunk': ('body'),
'leaves': []
}
return
if content == '':
return None
i = 0
while i < len(content):
first = i
while i < len(content) and content[i] != '<':
i += 1
if first != i:
text = content[first:i]
parent['leaves'].append(('text', text))
if i == len(content):
break
first = i
while content[i] != '>':
i += 1
parts = map(lambda x: len(x) > 0, content[first + 1:i].split(' '))
if parts[0] == '/':
return i + 1
attrs = {}
for part in parts[1:]:
if part != '/':
if '=' in part:
key, val = part.split('=')
attrs[key] = val
else:
attrs[part] = None
if parts[-1] == '/':
parent['leaves'].append(('tag', parts[0], attrs))
else:
leaf = {
'trunk': ('tag', parts[0], attrs),
'leaves': [],
}
i += self.parse(content[i + 1:], leaf)
parent['leaves'].append(leaf)
return len(content)
|
Add a simple HTML parser for testing.
Signed-off-by: CyberZHG <9ba3659efa42549cffa53a0bc35ef0cabfb20593@gmail.com>
class SimpleHTMLParser(object):
"""
A simple HTML parser for testing.
Not suitable for harsh cases, and the time efficiency is not considered.
Examples:
<tag key1="val1" key2>text</tag>
<tag />
"""
def __init__(self):
pass
def parse(self, content, parent=None):
if parent is None:
_, children = self.parse(content, self.body)
self.body = {
'trunk': ('body'),
'leaves': []
}
return
if content == '':
return None
i = 0
while i < len(content):
first = i
while i < len(content) and content[i] != '<':
i += 1
if first != i:
text = content[first:i]
parent['leaves'].append(('text', text))
if i == len(content):
break
first = i
while content[i] != '>':
i += 1
parts = map(lambda x: len(x) > 0, content[first + 1:i].split(' '))
if parts[0] == '/':
return i + 1
attrs = {}
for part in parts[1:]:
if part != '/':
if '=' in part:
key, val = part.split('=')
attrs[key] = val
else:
attrs[part] = None
if parts[-1] == '/':
parent['leaves'].append(('tag', parts[0], attrs))
else:
leaf = {
'trunk': ('tag', parts[0], attrs),
'leaves': [],
}
i += self.parse(content[i + 1:], leaf)
parent['leaves'].append(leaf)
return len(content)
|
<commit_before><commit_msg>Add a simple HTML parser for testing.
Signed-off-by: CyberZHG <9ba3659efa42549cffa53a0bc35ef0cabfb20593@gmail.com><commit_after>
class SimpleHTMLParser(object):
"""
A simple HTML parser for testing.
Not suitable for harsh cases, and the time efficiency is not considered.
Examples:
<tag key1="val1" key2>text</tag>
<tag />
"""
def __init__(self):
pass
def parse(self, content, parent=None):
if parent is None:
_, children = self.parse(content, self.body)
self.body = {
'trunk': ('body'),
'leaves': []
}
return
if content == '':
return None
i = 0
while i < len(content):
first = i
while i < len(content) and content[i] != '<':
i += 1
if first != i:
text = content[first:i]
parent['leaves'].append(('text', text))
if i == len(content):
break
first = i
while content[i] != '>':
i += 1
parts = map(lambda x: len(x) > 0, content[first + 1:i].split(' '))
if parts[0] == '/':
return i + 1
attrs = {}
for part in parts[1:]:
if part != '/':
if '=' in part:
key, val = part.split('=')
attrs[key] = val
else:
attrs[part] = None
if parts[-1] == '/':
parent['leaves'].append(('tag', parts[0], attrs))
else:
leaf = {
'trunk': ('tag', parts[0], attrs),
'leaves': [],
}
i += self.parse(content[i + 1:], leaf)
parent['leaves'].append(leaf)
return len(content)
|
|
bbe2f218fb738a32db9f12d308e729712146c18d
|
src/unittest/python/test_garbage_collection.py
|
src/unittest/python/test_garbage_collection.py
|
# coding=utf-8
#
# fysom - pYthOn Finite State Machine - this is a port of Jake
# Gordon's javascript-state-machine to python
# https://github.com/jakesgordon/javascript-state-machine
#
# Copyright (C) 2011 Mansour Behabadi <mansour@oxplot.com>, Jake Gordon
# and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
import gc
from fysom import Fysom
class FysomGarbageCollectionTests(unittest.TestCase):
def test_should_not_create_circular_ref(self):
class MyTestObject(object):
def __init__(self):
self._states = []
self._fsm = Fysom({
'initial': 'green',
'events': [
{'name': 'warn', 'src': 'green', 'dst': 'yellow'},
{'name': 'panic', 'src': 'yellow', 'dst': 'red'},
{'name': 'calm', 'src': 'red', 'dst': 'yellow'},
{'name': 'clear', 'src': 'yellow', 'dst': 'green'}
],
'callbacks': {
'ongreen': self._on_green,
'onyellow': self._on_yellow,
'onred': self._on_red
}
})
def warn(self):
self._fsm.warn()
def panic(self):
self._fsm.panic()
def calm(self):
self._fsm.calm()
def clear(self):
self._fsm.clear()
def _on_green(self, *args, **kwargs):
self._states.append('green')
def _on_yellow(self, *args, **kwargs):
self._states.append('yellow')
def _on_red(self, *args, **kwargs):
self._states.append('red')
obj = MyTestObject()
obj.warn()
obj.clear()
del obj
self.assertEqual(filter(lambda o : isinstance(o, MyTestObject),
gc.get_objects()), [])
|
Add unit test for garbage collection
|
Add unit test for garbage collection
|
Python
|
mit
|
mriehl/fysom
|
Add unit test for garbage collection
|
# coding=utf-8
#
# fysom - pYthOn Finite State Machine - this is a port of Jake
# Gordon's javascript-state-machine to python
# https://github.com/jakesgordon/javascript-state-machine
#
# Copyright (C) 2011 Mansour Behabadi <mansour@oxplot.com>, Jake Gordon
# and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
import gc
from fysom import Fysom
class FysomGarbageCollectionTests(unittest.TestCase):
def test_should_not_create_circular_ref(self):
class MyTestObject(object):
def __init__(self):
self._states = []
self._fsm = Fysom({
'initial': 'green',
'events': [
{'name': 'warn', 'src': 'green', 'dst': 'yellow'},
{'name': 'panic', 'src': 'yellow', 'dst': 'red'},
{'name': 'calm', 'src': 'red', 'dst': 'yellow'},
{'name': 'clear', 'src': 'yellow', 'dst': 'green'}
],
'callbacks': {
'ongreen': self._on_green,
'onyellow': self._on_yellow,
'onred': self._on_red
}
})
def warn(self):
self._fsm.warn()
def panic(self):
self._fsm.panic()
def calm(self):
self._fsm.calm()
def clear(self):
self._fsm.clear()
def _on_green(self, *args, **kwargs):
self._states.append('green')
def _on_yellow(self, *args, **kwargs):
self._states.append('yellow')
def _on_red(self, *args, **kwargs):
self._states.append('red')
obj = MyTestObject()
obj.warn()
obj.clear()
del obj
self.assertEqual(filter(lambda o : isinstance(o, MyTestObject),
gc.get_objects()), [])
|
<commit_before><commit_msg>Add unit test for garbage collection<commit_after>
|
# coding=utf-8
#
# fysom - pYthOn Finite State Machine - this is a port of Jake
# Gordon's javascript-state-machine to python
# https://github.com/jakesgordon/javascript-state-machine
#
# Copyright (C) 2011 Mansour Behabadi <mansour@oxplot.com>, Jake Gordon
# and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
import gc
from fysom import Fysom
class FysomGarbageCollectionTests(unittest.TestCase):
def test_should_not_create_circular_ref(self):
class MyTestObject(object):
def __init__(self):
self._states = []
self._fsm = Fysom({
'initial': 'green',
'events': [
{'name': 'warn', 'src': 'green', 'dst': 'yellow'},
{'name': 'panic', 'src': 'yellow', 'dst': 'red'},
{'name': 'calm', 'src': 'red', 'dst': 'yellow'},
{'name': 'clear', 'src': 'yellow', 'dst': 'green'}
],
'callbacks': {
'ongreen': self._on_green,
'onyellow': self._on_yellow,
'onred': self._on_red
}
})
def warn(self):
self._fsm.warn()
def panic(self):
self._fsm.panic()
def calm(self):
self._fsm.calm()
def clear(self):
self._fsm.clear()
def _on_green(self, *args, **kwargs):
self._states.append('green')
def _on_yellow(self, *args, **kwargs):
self._states.append('yellow')
def _on_red(self, *args, **kwargs):
self._states.append('red')
obj = MyTestObject()
obj.warn()
obj.clear()
del obj
self.assertEqual(filter(lambda o : isinstance(o, MyTestObject),
gc.get_objects()), [])
|
Add unit test for garbage collection# coding=utf-8
#
# fysom - pYthOn Finite State Machine - this is a port of Jake
# Gordon's javascript-state-machine to python
# https://github.com/jakesgordon/javascript-state-machine
#
# Copyright (C) 2011 Mansour Behabadi <mansour@oxplot.com>, Jake Gordon
# and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
import gc
from fysom import Fysom
class FysomGarbageCollectionTests(unittest.TestCase):
def test_should_not_create_circular_ref(self):
class MyTestObject(object):
def __init__(self):
self._states = []
self._fsm = Fysom({
'initial': 'green',
'events': [
{'name': 'warn', 'src': 'green', 'dst': 'yellow'},
{'name': 'panic', 'src': 'yellow', 'dst': 'red'},
{'name': 'calm', 'src': 'red', 'dst': 'yellow'},
{'name': 'clear', 'src': 'yellow', 'dst': 'green'}
],
'callbacks': {
'ongreen': self._on_green,
'onyellow': self._on_yellow,
'onred': self._on_red
}
})
def warn(self):
self._fsm.warn()
def panic(self):
self._fsm.panic()
def calm(self):
self._fsm.calm()
def clear(self):
self._fsm.clear()
def _on_green(self, *args, **kwargs):
self._states.append('green')
def _on_yellow(self, *args, **kwargs):
self._states.append('yellow')
def _on_red(self, *args, **kwargs):
self._states.append('red')
obj = MyTestObject()
obj.warn()
obj.clear()
del obj
self.assertEqual(filter(lambda o : isinstance(o, MyTestObject),
gc.get_objects()), [])
|
<commit_before><commit_msg>Add unit test for garbage collection<commit_after># coding=utf-8
#
# fysom - pYthOn Finite State Machine - this is a port of Jake
# Gordon's javascript-state-machine to python
# https://github.com/jakesgordon/javascript-state-machine
#
# Copyright (C) 2011 Mansour Behabadi <mansour@oxplot.com>, Jake Gordon
# and other contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
import gc
from fysom import Fysom
class FysomGarbageCollectionTests(unittest.TestCase):
def test_should_not_create_circular_ref(self):
class MyTestObject(object):
def __init__(self):
self._states = []
self._fsm = Fysom({
'initial': 'green',
'events': [
{'name': 'warn', 'src': 'green', 'dst': 'yellow'},
{'name': 'panic', 'src': 'yellow', 'dst': 'red'},
{'name': 'calm', 'src': 'red', 'dst': 'yellow'},
{'name': 'clear', 'src': 'yellow', 'dst': 'green'}
],
'callbacks': {
'ongreen': self._on_green,
'onyellow': self._on_yellow,
'onred': self._on_red
}
})
def warn(self):
self._fsm.warn()
def panic(self):
self._fsm.panic()
def calm(self):
self._fsm.calm()
def clear(self):
self._fsm.clear()
def _on_green(self, *args, **kwargs):
self._states.append('green')
def _on_yellow(self, *args, **kwargs):
self._states.append('yellow')
def _on_red(self, *args, **kwargs):
self._states.append('red')
obj = MyTestObject()
obj.warn()
obj.clear()
del obj
self.assertEqual(filter(lambda o : isinstance(o, MyTestObject),
gc.get_objects()), [])
|
|
d407112debfadeba47742e157779c28d5dc82e0c
|
connect_to_postgres.py
|
connect_to_postgres.py
|
import os
import psycopg2
import urlparse
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur.execute("SELECT * FROM pitches LIMIT 10;")
print cur.fetchone()
cur.close()
conn.close()
|
Write script to connect to postgres
|
Write script to connect to postgres
|
Python
|
mit
|
gsganden/pitcher-reports,gsganden/pitcher-reports
|
Write script to connect to postgres
|
import os
import psycopg2
import urlparse
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur.execute("SELECT * FROM pitches LIMIT 10;")
print cur.fetchone()
cur.close()
conn.close()
|
<commit_before><commit_msg>Write script to connect to postgres<commit_after>
|
import os
import psycopg2
import urlparse
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur.execute("SELECT * FROM pitches LIMIT 10;")
print cur.fetchone()
cur.close()
conn.close()
|
Write script to connect to postgresimport os
import psycopg2
import urlparse
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur.execute("SELECT * FROM pitches LIMIT 10;")
print cur.fetchone()
cur.close()
conn.close()
|
<commit_before><commit_msg>Write script to connect to postgres<commit_after>import os
import psycopg2
import urlparse
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur.execute("SELECT * FROM pitches LIMIT 10;")
print cur.fetchone()
cur.close()
conn.close()
|
|
59fa12f76b564d020196043431cd0551129fb834
|
localore/home/migrations/0014_auto_20160328_1654.py
|
localore/home/migrations/0014_auto_20160328_1654.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20160328_1515'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='view_more_page',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, verbose_name='Page to link to', to='wagtailcore.Page', related_name='+'),
),
migrations.AlterField(
model_name='homepage',
name='view_more_title',
field=models.CharField(verbose_name='"View more" link title', help_text='For example, "View more connections"', max_length=255),
),
]
|
Add missed migration for ddf77b1
|
Add missed migration for ddf77b1
|
Python
|
mpl-2.0
|
ghostwords/localore,ghostwords/localore,ghostwords/localore
|
Add missed migration for ddf77b1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20160328_1515'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='view_more_page',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, verbose_name='Page to link to', to='wagtailcore.Page', related_name='+'),
),
migrations.AlterField(
model_name='homepage',
name='view_more_title',
field=models.CharField(verbose_name='"View more" link title', help_text='For example, "View more connections"', max_length=255),
),
]
|
<commit_before><commit_msg>Add missed migration for ddf77b1<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20160328_1515'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='view_more_page',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, verbose_name='Page to link to', to='wagtailcore.Page', related_name='+'),
),
migrations.AlterField(
model_name='homepage',
name='view_more_title',
field=models.CharField(verbose_name='"View more" link title', help_text='For example, "View more connections"', max_length=255),
),
]
|
Add missed migration for ddf77b1# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20160328_1515'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='view_more_page',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, verbose_name='Page to link to', to='wagtailcore.Page', related_name='+'),
),
migrations.AlterField(
model_name='homepage',
name='view_more_title',
field=models.CharField(verbose_name='"View more" link title', help_text='For example, "View more connections"', max_length=255),
),
]
|
<commit_before><commit_msg>Add missed migration for ddf77b1<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20160328_1515'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='view_more_page',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, null=True, verbose_name='Page to link to', to='wagtailcore.Page', related_name='+'),
),
migrations.AlterField(
model_name='homepage',
name='view_more_title',
field=models.CharField(verbose_name='"View more" link title', help_text='For example, "View more connections"', max_length=255),
),
]
|
|
4c0c2289ee8deeb6ea7f8bb76db886329e7ae1b3
|
zinnia/migrations/0002_subtitle_and_caption.py
|
zinnia/migrations/0002_subtitle_and_caption.py
|
from django.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='caption',
field=models.TextField(
default='', help_text="Image's caption",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='subtitle',
field=models.TextField(
default='', verbose_name='subtitle', blank=True),
preserve_default=False,
),
]
|
Add migration for adding subtitle and caption fields
|
Add migration for adding subtitle and caption fields
|
Python
|
bsd-3-clause
|
Maplecroft/django-blog-zinnia,extertioner/django-blog-zinnia,marctc/django-blog-zinnia,aorzh/django-blog-zinnia,Maplecroft/django-blog-zinnia,ZuluPro/django-blog-zinnia,ghachey/django-blog-zinnia,dapeng0802/django-blog-zinnia,bywbilly/django-blog-zinnia,aorzh/django-blog-zinnia,Zopieux/django-blog-zinnia,petecummings/django-blog-zinnia,Fantomas42/django-blog-zinnia,bywbilly/django-blog-zinnia,Fantomas42/django-blog-zinnia,marctc/django-blog-zinnia,bywbilly/django-blog-zinnia,ZuluPro/django-blog-zinnia,Zopieux/django-blog-zinnia,extertioner/django-blog-zinnia,marctc/django-blog-zinnia,aorzh/django-blog-zinnia,dapeng0802/django-blog-zinnia,Maplecroft/django-blog-zinnia,dapeng0802/django-blog-zinnia,extertioner/django-blog-zinnia,Zopieux/django-blog-zinnia,ghachey/django-blog-zinnia,Fantomas42/django-blog-zinnia,petecummings/django-blog-zinnia,petecummings/django-blog-zinnia,ghachey/django-blog-zinnia,ZuluPro/django-blog-zinnia
|
Add migration for adding subtitle and caption fields
|
from django.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='caption',
field=models.TextField(
default='', help_text="Image's caption",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='subtitle',
field=models.TextField(
default='', verbose_name='subtitle', blank=True),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration for adding subtitle and caption fields<commit_after>
|
from django.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='caption',
field=models.TextField(
default='', help_text="Image's caption",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='subtitle',
field=models.TextField(
default='', verbose_name='subtitle', blank=True),
preserve_default=False,
),
]
|
Add migration for adding subtitle and caption fieldsfrom django.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='caption',
field=models.TextField(
default='', help_text="Image's caption",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='subtitle',
field=models.TextField(
default='', verbose_name='subtitle', blank=True),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration for adding subtitle and caption fields<commit_after>from django.db import models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='caption',
field=models.TextField(
default='', help_text="Image's caption",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='subtitle',
field=models.TextField(
default='', verbose_name='subtitle', blank=True),
preserve_default=False,
),
]
|
|
692bf6df15410ac1d5deac81081526d6c2aa27ae
|
aggregate_if.py
|
aggregate_if.py
|
# coding: utf-8
'''
Implements conditional aggregates.
This code was based on the work of others found on the internet:
1. http://web.archive.org/web/20101115170804/http://www.voteruniverse.com/Members/jlantz/blog/conditional-aggregates-in-django
2. https://code.djangoproject.com/ticket/11305
3. https://groups.google.com/forum/?fromgroups=#!topic/django-users/cjzloTUwmS0
4. https://groups.google.com/forum/?fromgroups=#!topic/django-users/vVprMpsAnPo
'''
from django.db.models.aggregates import Aggregate as DjangoAggregate
from django.db.models.sql.aggregates import Aggregate as DjangoSqlAggregate
class SqlAggregate(DjangoSqlAggregate):
conditional_template = '%(function)s(CASE WHEN %(condition)s THEN %(field)s ELSE null END)'
def __init__(self, col, source=None, is_summary=False, condition=None, **extra):
super(SqlAggregate, self).__init__(col, source, is_summary, **extra)
self.condition = condition
def relabel_aliases(self, change_map):
super(SqlAggregate, self).relabel_aliases(change_map)
if self.has_condition:
self.condition.relabel_aliases(change_map)
def as_sql(self, qn, connection):
if self.has_condition:
self.sql_template = self.conditional_template
self.extra['condition'] = self._condition_as_sql(qn, connection)
return super(SqlAggregate, self).as_sql(qn, connection)
@property
def has_condition(self):
# Warning: bool(QuerySet) will hit the database
return self.condition is not None
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
escape = lambda p: qn(p) if isinstance(p, basestring) else p
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
class SqlSum(SqlAggregate):
sql_function = 'SUM'
class Sum(DjangoAggregate):
name = 'Sum'
def __init__(self, lookup, only=None, **extra):
super(Sum, self).__init__(lookup, **extra)
self.only = only
self.condition = None
def add_to_query(self, query, alias, col, source, is_summary):
if self.only:
self.condition = query.model._default_manager.filter(self.only)
aggregate = SqlSum(col, source, is_summary, self.condition, **self.extra)
query.aggregates[alias] = aggregate
|
Add aggregate-if source with support to Sum
|
Add aggregate-if source with support to Sum
|
Python
|
mit
|
henriquebastos/django-aggregate-if
|
Add aggregate-if source with support to Sum
|
# coding: utf-8
'''
Implements conditional aggregates.
This code was based on the work of others found on the internet:
1. http://web.archive.org/web/20101115170804/http://www.voteruniverse.com/Members/jlantz/blog/conditional-aggregates-in-django
2. https://code.djangoproject.com/ticket/11305
3. https://groups.google.com/forum/?fromgroups=#!topic/django-users/cjzloTUwmS0
4. https://groups.google.com/forum/?fromgroups=#!topic/django-users/vVprMpsAnPo
'''
from django.db.models.aggregates import Aggregate as DjangoAggregate
from django.db.models.sql.aggregates import Aggregate as DjangoSqlAggregate
class SqlAggregate(DjangoSqlAggregate):
conditional_template = '%(function)s(CASE WHEN %(condition)s THEN %(field)s ELSE null END)'
def __init__(self, col, source=None, is_summary=False, condition=None, **extra):
super(SqlAggregate, self).__init__(col, source, is_summary, **extra)
self.condition = condition
def relabel_aliases(self, change_map):
super(SqlAggregate, self).relabel_aliases(change_map)
if self.has_condition:
self.condition.relabel_aliases(change_map)
def as_sql(self, qn, connection):
if self.has_condition:
self.sql_template = self.conditional_template
self.extra['condition'] = self._condition_as_sql(qn, connection)
return super(SqlAggregate, self).as_sql(qn, connection)
@property
def has_condition(self):
# Warning: bool(QuerySet) will hit the database
return self.condition is not None
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
escape = lambda p: qn(p) if isinstance(p, basestring) else p
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
class SqlSum(SqlAggregate):
sql_function = 'SUM'
class Sum(DjangoAggregate):
name = 'Sum'
def __init__(self, lookup, only=None, **extra):
super(Sum, self).__init__(lookup, **extra)
self.only = only
self.condition = None
def add_to_query(self, query, alias, col, source, is_summary):
if self.only:
self.condition = query.model._default_manager.filter(self.only)
aggregate = SqlSum(col, source, is_summary, self.condition, **self.extra)
query.aggregates[alias] = aggregate
|
<commit_before><commit_msg>Add aggregate-if source with support to Sum<commit_after>
|
# coding: utf-8
'''
Implements conditional aggregates.
This code was based on the work of others found on the internet:
1. http://web.archive.org/web/20101115170804/http://www.voteruniverse.com/Members/jlantz/blog/conditional-aggregates-in-django
2. https://code.djangoproject.com/ticket/11305
3. https://groups.google.com/forum/?fromgroups=#!topic/django-users/cjzloTUwmS0
4. https://groups.google.com/forum/?fromgroups=#!topic/django-users/vVprMpsAnPo
'''
from django.db.models.aggregates import Aggregate as DjangoAggregate
from django.db.models.sql.aggregates import Aggregate as DjangoSqlAggregate
class SqlAggregate(DjangoSqlAggregate):
conditional_template = '%(function)s(CASE WHEN %(condition)s THEN %(field)s ELSE null END)'
def __init__(self, col, source=None, is_summary=False, condition=None, **extra):
super(SqlAggregate, self).__init__(col, source, is_summary, **extra)
self.condition = condition
def relabel_aliases(self, change_map):
super(SqlAggregate, self).relabel_aliases(change_map)
if self.has_condition:
self.condition.relabel_aliases(change_map)
def as_sql(self, qn, connection):
if self.has_condition:
self.sql_template = self.conditional_template
self.extra['condition'] = self._condition_as_sql(qn, connection)
return super(SqlAggregate, self).as_sql(qn, connection)
@property
def has_condition(self):
# Warning: bool(QuerySet) will hit the database
return self.condition is not None
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
escape = lambda p: qn(p) if isinstance(p, basestring) else p
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
class SqlSum(SqlAggregate):
sql_function = 'SUM'
class Sum(DjangoAggregate):
name = 'Sum'
def __init__(self, lookup, only=None, **extra):
super(Sum, self).__init__(lookup, **extra)
self.only = only
self.condition = None
def add_to_query(self, query, alias, col, source, is_summary):
if self.only:
self.condition = query.model._default_manager.filter(self.only)
aggregate = SqlSum(col, source, is_summary, self.condition, **self.extra)
query.aggregates[alias] = aggregate
|
Add aggregate-if source with support to Sum# coding: utf-8
'''
Implements conditional aggregates.
This code was based on the work of others found on the internet:
1. http://web.archive.org/web/20101115170804/http://www.voteruniverse.com/Members/jlantz/blog/conditional-aggregates-in-django
2. https://code.djangoproject.com/ticket/11305
3. https://groups.google.com/forum/?fromgroups=#!topic/django-users/cjzloTUwmS0
4. https://groups.google.com/forum/?fromgroups=#!topic/django-users/vVprMpsAnPo
'''
from django.db.models.aggregates import Aggregate as DjangoAggregate
from django.db.models.sql.aggregates import Aggregate as DjangoSqlAggregate
class SqlAggregate(DjangoSqlAggregate):
conditional_template = '%(function)s(CASE WHEN %(condition)s THEN %(field)s ELSE null END)'
def __init__(self, col, source=None, is_summary=False, condition=None, **extra):
super(SqlAggregate, self).__init__(col, source, is_summary, **extra)
self.condition = condition
def relabel_aliases(self, change_map):
super(SqlAggregate, self).relabel_aliases(change_map)
if self.has_condition:
self.condition.relabel_aliases(change_map)
def as_sql(self, qn, connection):
if self.has_condition:
self.sql_template = self.conditional_template
self.extra['condition'] = self._condition_as_sql(qn, connection)
return super(SqlAggregate, self).as_sql(qn, connection)
@property
def has_condition(self):
# Warning: bool(QuerySet) will hit the database
return self.condition is not None
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
escape = lambda p: qn(p) if isinstance(p, basestring) else p
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
class SqlSum(SqlAggregate):
sql_function = 'SUM'
class Sum(DjangoAggregate):
name = 'Sum'
def __init__(self, lookup, only=None, **extra):
super(Sum, self).__init__(lookup, **extra)
self.only = only
self.condition = None
def add_to_query(self, query, alias, col, source, is_summary):
if self.only:
self.condition = query.model._default_manager.filter(self.only)
aggregate = SqlSum(col, source, is_summary, self.condition, **self.extra)
query.aggregates[alias] = aggregate
|
<commit_before><commit_msg>Add aggregate-if source with support to Sum<commit_after># coding: utf-8
'''
Implements conditional aggregates.
This code was based on the work of others found on the internet:
1. http://web.archive.org/web/20101115170804/http://www.voteruniverse.com/Members/jlantz/blog/conditional-aggregates-in-django
2. https://code.djangoproject.com/ticket/11305
3. https://groups.google.com/forum/?fromgroups=#!topic/django-users/cjzloTUwmS0
4. https://groups.google.com/forum/?fromgroups=#!topic/django-users/vVprMpsAnPo
'''
from django.db.models.aggregates import Aggregate as DjangoAggregate
from django.db.models.sql.aggregates import Aggregate as DjangoSqlAggregate
class SqlAggregate(DjangoSqlAggregate):
conditional_template = '%(function)s(CASE WHEN %(condition)s THEN %(field)s ELSE null END)'
def __init__(self, col, source=None, is_summary=False, condition=None, **extra):
super(SqlAggregate, self).__init__(col, source, is_summary, **extra)
self.condition = condition
def relabel_aliases(self, change_map):
super(SqlAggregate, self).relabel_aliases(change_map)
if self.has_condition:
self.condition.relabel_aliases(change_map)
def as_sql(self, qn, connection):
if self.has_condition:
self.sql_template = self.conditional_template
self.extra['condition'] = self._condition_as_sql(qn, connection)
return super(SqlAggregate, self).as_sql(qn, connection)
@property
def has_condition(self):
# Warning: bool(QuerySet) will hit the database
return self.condition is not None
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
escape = lambda p: qn(p) if isinstance(p, basestring) else p
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
class SqlSum(SqlAggregate):
sql_function = 'SUM'
class Sum(DjangoAggregate):
name = 'Sum'
def __init__(self, lookup, only=None, **extra):
super(Sum, self).__init__(lookup, **extra)
self.only = only
self.condition = None
def add_to_query(self, query, alias, col, source, is_summary):
if self.only:
self.condition = query.model._default_manager.filter(self.only)
aggregate = SqlSum(col, source, is_summary, self.condition, **self.extra)
query.aggregates[alias] = aggregate
|
|
73e5df0b277b25bdbe88acd31518106615e02cb4
|
indra/sources/eidos/make_eidos_ontology.py
|
indra/sources/eidos/make_eidos_ontology.py
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
eidos_ont_url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/toy_ontology.yml'
eidos_ns = Namespace('http://github.com/clulab/eidos/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node):
return eidos_ns.term(node)
def build_relations(G, node, tree):
print(node)
this_term = get_term(node)
for entry in tree:
if isinstance(entry, str):
continue
child = list(entry.keys())[0]
build_relations(G, child, entry[child])
child_term = get_term(child)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(eidos_ont_url).content
root = yaml.load(yml)
G = Graph()
build_relations(G, 'root', root)
rdf_path = join(dirname(abspath(__file__)), 'eidos_ontology.rdf')
save_hierarchy(G, rdf_path)
|
Add script to make RDF Eidos ontology
|
Add script to make RDF Eidos ontology
|
Python
|
bsd-2-clause
|
bgyori/indra,johnbachman/indra,sorgerlab/indra,pvtodorov/indra,sorgerlab/belpy,johnbachman/indra,sorgerlab/indra,johnbachman/indra,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,bgyori/indra,johnbachman/belpy,sorgerlab/belpy,bgyori/indra
|
Add script to make RDF Eidos ontology
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
eidos_ont_url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/toy_ontology.yml'
eidos_ns = Namespace('http://github.com/clulab/eidos/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node):
return eidos_ns.term(node)
def build_relations(G, node, tree):
print(node)
this_term = get_term(node)
for entry in tree:
if isinstance(entry, str):
continue
child = list(entry.keys())[0]
build_relations(G, child, entry[child])
child_term = get_term(child)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(eidos_ont_url).content
root = yaml.load(yml)
G = Graph()
build_relations(G, 'root', root)
rdf_path = join(dirname(abspath(__file__)), 'eidos_ontology.rdf')
save_hierarchy(G, rdf_path)
|
<commit_before><commit_msg>Add script to make RDF Eidos ontology<commit_after>
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
eidos_ont_url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/toy_ontology.yml'
eidos_ns = Namespace('http://github.com/clulab/eidos/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node):
return eidos_ns.term(node)
def build_relations(G, node, tree):
print(node)
this_term = get_term(node)
for entry in tree:
if isinstance(entry, str):
continue
child = list(entry.keys())[0]
build_relations(G, child, entry[child])
child_term = get_term(child)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(eidos_ont_url).content
root = yaml.load(yml)
G = Graph()
build_relations(G, 'root', root)
rdf_path = join(dirname(abspath(__file__)), 'eidos_ontology.rdf')
save_hierarchy(G, rdf_path)
|
Add script to make RDF Eidos ontologyimport yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
eidos_ont_url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/toy_ontology.yml'
eidos_ns = Namespace('http://github.com/clulab/eidos/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node):
return eidos_ns.term(node)
def build_relations(G, node, tree):
print(node)
this_term = get_term(node)
for entry in tree:
if isinstance(entry, str):
continue
child = list(entry.keys())[0]
build_relations(G, child, entry[child])
child_term = get_term(child)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(eidos_ont_url).content
root = yaml.load(yml)
G = Graph()
build_relations(G, 'root', root)
rdf_path = join(dirname(abspath(__file__)), 'eidos_ontology.rdf')
save_hierarchy(G, rdf_path)
|
<commit_before><commit_msg>Add script to make RDF Eidos ontology<commit_after>import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
eidos_ont_url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \
'src/main/resources/org/clulab/wm/eidos/toy_ontology.yml'
eidos_ns = Namespace('http://github.com/clulab/eidos/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node):
return eidos_ns.term(node)
def build_relations(G, node, tree):
print(node)
this_term = get_term(node)
for entry in tree:
if isinstance(entry, str):
continue
child = list(entry.keys())[0]
build_relations(G, child, entry[child])
child_term = get_term(child)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(eidos_ont_url).content
root = yaml.load(yml)
G = Graph()
build_relations(G, 'root', root)
rdf_path = join(dirname(abspath(__file__)), 'eidos_ontology.rdf')
save_hierarchy(G, rdf_path)
|
|
793b1ad0dade4afcdb8ef4a3bc81c53fd1a47f6d
|
tests/test_pypi.py
|
tests/test_pypi.py
|
from tests.helper import ExternalVersionTestCase
class PyPITest(ExternalVersionTestCase):
def test_pypi(self):
self.assertEqual(self.sync_get_version("example", {"pypi": None}), "0.1.0")
|
Add a testcase for PyPI
|
Add a testcase for PyPI
|
Python
|
mit
|
lilydjwg/nvchecker
|
Add a testcase for PyPI
|
from tests.helper import ExternalVersionTestCase
class PyPITest(ExternalVersionTestCase):
def test_pypi(self):
self.assertEqual(self.sync_get_version("example", {"pypi": None}), "0.1.0")
|
<commit_before><commit_msg>Add a testcase for PyPI<commit_after>
|
from tests.helper import ExternalVersionTestCase
class PyPITest(ExternalVersionTestCase):
def test_pypi(self):
self.assertEqual(self.sync_get_version("example", {"pypi": None}), "0.1.0")
|
Add a testcase for PyPIfrom tests.helper import ExternalVersionTestCase
class PyPITest(ExternalVersionTestCase):
def test_pypi(self):
self.assertEqual(self.sync_get_version("example", {"pypi": None}), "0.1.0")
|
<commit_before><commit_msg>Add a testcase for PyPI<commit_after>from tests.helper import ExternalVersionTestCase
class PyPITest(ExternalVersionTestCase):
def test_pypi(self):
self.assertEqual(self.sync_get_version("example", {"pypi": None}), "0.1.0")
|
|
60c9874bcd085f808ddc676bbe019bbed91900cd
|
tests/test_util.py
|
tests/test_util.py
|
"""Tests for util submodule."""
from pathlib import Path
import requests
import xdg
from mccurse import util
def test_cache_use_default():
"""Use cache dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_cache_path(util.RESOURCE_NAME))
assert util.default_cache_dir(INPUT) == EXPECT
def test_cache_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_cache_dir(INPUT) == EXPECT
def test_data_use_default():
"""Use data dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_data_path(util.RESOURCE_NAME))
assert util.default_data_dir(INPUT) == EXPECT
def test_data_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_data_dir(INPUT) == EXPECT
def test_use_existing_session():
"""Use existing session when specified?"""
INPUT = requests.Session()
EXPECT = INPUT
assert util.default_new_session(INPUT) is EXPECT
def test_make_new_session():
"""Make new session when none provided?"""
INPUT = None
assert isinstance(util.default_new_session(INPUT), requests.Session)
|
Add tests for util submodule
|
Add tests for util submodule
|
Python
|
agpl-3.0
|
khardix/mccurse
|
Add tests for util submodule
|
"""Tests for util submodule."""
from pathlib import Path
import requests
import xdg
from mccurse import util
def test_cache_use_default():
"""Use cache dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_cache_path(util.RESOURCE_NAME))
assert util.default_cache_dir(INPUT) == EXPECT
def test_cache_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_cache_dir(INPUT) == EXPECT
def test_data_use_default():
"""Use data dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_data_path(util.RESOURCE_NAME))
assert util.default_data_dir(INPUT) == EXPECT
def test_data_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_data_dir(INPUT) == EXPECT
def test_use_existing_session():
"""Use existing session when specified?"""
INPUT = requests.Session()
EXPECT = INPUT
assert util.default_new_session(INPUT) is EXPECT
def test_make_new_session():
"""Make new session when none provided?"""
INPUT = None
assert isinstance(util.default_new_session(INPUT), requests.Session)
|
<commit_before><commit_msg>Add tests for util submodule<commit_after>
|
"""Tests for util submodule."""
from pathlib import Path
import requests
import xdg
from mccurse import util
def test_cache_use_default():
"""Use cache dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_cache_path(util.RESOURCE_NAME))
assert util.default_cache_dir(INPUT) == EXPECT
def test_cache_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_cache_dir(INPUT) == EXPECT
def test_data_use_default():
"""Use data dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_data_path(util.RESOURCE_NAME))
assert util.default_data_dir(INPUT) == EXPECT
def test_data_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_data_dir(INPUT) == EXPECT
def test_use_existing_session():
"""Use existing session when specified?"""
INPUT = requests.Session()
EXPECT = INPUT
assert util.default_new_session(INPUT) is EXPECT
def test_make_new_session():
"""Make new session when none provided?"""
INPUT = None
assert isinstance(util.default_new_session(INPUT), requests.Session)
|
Add tests for util submodule"""Tests for util submodule."""
from pathlib import Path
import requests
import xdg
from mccurse import util
def test_cache_use_default():
"""Use cache dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_cache_path(util.RESOURCE_NAME))
assert util.default_cache_dir(INPUT) == EXPECT
def test_cache_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_cache_dir(INPUT) == EXPECT
def test_data_use_default():
"""Use data dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_data_path(util.RESOURCE_NAME))
assert util.default_data_dir(INPUT) == EXPECT
def test_data_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_data_dir(INPUT) == EXPECT
def test_use_existing_session():
"""Use existing session when specified?"""
INPUT = requests.Session()
EXPECT = INPUT
assert util.default_new_session(INPUT) is EXPECT
def test_make_new_session():
"""Make new session when none provided?"""
INPUT = None
assert isinstance(util.default_new_session(INPUT), requests.Session)
|
<commit_before><commit_msg>Add tests for util submodule<commit_after>"""Tests for util submodule."""
from pathlib import Path
import requests
import xdg
from mccurse import util
def test_cache_use_default():
"""Use cache dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_cache_path(util.RESOURCE_NAME))
assert util.default_cache_dir(INPUT) == EXPECT
def test_cache_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_cache_dir(INPUT) == EXPECT
def test_data_use_default():
"""Use data dir when no dir specified?"""
INPUT = None
EXPECT = Path(xdg.BaseDirectory.save_data_path(util.RESOURCE_NAME))
assert util.default_data_dir(INPUT) == EXPECT
def test_data_use_input():
"""Use existing dir when specified?"""
INPUT = Path.home()
EXPECT = INPUT
assert util.default_data_dir(INPUT) == EXPECT
def test_use_existing_session():
"""Use existing session when specified?"""
INPUT = requests.Session()
EXPECT = INPUT
assert util.default_new_session(INPUT) is EXPECT
def test_make_new_session():
"""Make new session when none provided?"""
INPUT = None
assert isinstance(util.default_new_session(INPUT), requests.Session)
|
|
3d82021cca499ecaee8f66e563d366268d0422e2
|
lemur/migrations/versions/4fe230f7a26e_.py
|
lemur/migrations/versions/4fe230f7a26e_.py
|
"""Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: c301c59688d2
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = 'c301c59688d2'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
Add ports column to certificate_associations - alembic migration only
|
Add ports column to certificate_associations - alembic migration only
|
Python
|
apache-2.0
|
Netflix/lemur,Netflix/lemur,Netflix/lemur,Netflix/lemur
|
Add ports column to certificate_associations - alembic migration only
|
"""Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: c301c59688d2
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = 'c301c59688d2'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
<commit_before><commit_msg>Add ports column to certificate_associations - alembic migration only<commit_after>
|
"""Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: c301c59688d2
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = 'c301c59688d2'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
Add ports column to certificate_associations - alembic migration only"""Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: c301c59688d2
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = 'c301c59688d2'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
<commit_before><commit_msg>Add ports column to certificate_associations - alembic migration only<commit_after>"""Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: c301c59688d2
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = 'c301c59688d2'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
|
430726e060f1d033f233f59f2c956ec6dd09b49f
|
Scripts/mode2-parser.py
|
Scripts/mode2-parser.py
|
#!/usr/bin/env python
# -*- encoding:utf8 -*-
from __future__ import print_function
import argparse
class Parser(object):
@classmethod
def parse(cls, target_file):
print("Target file : %r" % target_file)
if target_file is None or len(target_file) == 0:
return
with open(target_file, "r") as file:
raw = file.readlines()
if raw is None:
return
counter = 0
for line in raw[1:-1]:
code = line.split(" ")[1].strip()
if code is None or len(code) == 0:
continue
counter += 1
if counter % 8 == 0:
print(code)
else:
print(code, end=" ")
print(" ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
args = parser.parse_args()
Parser().parse(args.file)
|
Add mode2 raw code parser script.
|
Add mode2 raw code parser script.
|
Python
|
mit
|
supistar/PiAirRemote,supistar/PiAirRemote
|
Add mode2 raw code parser script.
|
#!/usr/bin/env python
# -*- encoding:utf8 -*-
from __future__ import print_function
import argparse
class Parser(object):
@classmethod
def parse(cls, target_file):
print("Target file : %r" % target_file)
if target_file is None or len(target_file) == 0:
return
with open(target_file, "r") as file:
raw = file.readlines()
if raw is None:
return
counter = 0
for line in raw[1:-1]:
code = line.split(" ")[1].strip()
if code is None or len(code) == 0:
continue
counter += 1
if counter % 8 == 0:
print(code)
else:
print(code, end=" ")
print(" ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
args = parser.parse_args()
Parser().parse(args.file)
|
<commit_before><commit_msg>Add mode2 raw code parser script.<commit_after>
|
#!/usr/bin/env python
# -*- encoding:utf8 -*-
from __future__ import print_function
import argparse
class Parser(object):
@classmethod
def parse(cls, target_file):
print("Target file : %r" % target_file)
if target_file is None or len(target_file) == 0:
return
with open(target_file, "r") as file:
raw = file.readlines()
if raw is None:
return
counter = 0
for line in raw[1:-1]:
code = line.split(" ")[1].strip()
if code is None or len(code) == 0:
continue
counter += 1
if counter % 8 == 0:
print(code)
else:
print(code, end=" ")
print(" ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
args = parser.parse_args()
Parser().parse(args.file)
|
Add mode2 raw code parser script.#!/usr/bin/env python
# -*- encoding:utf8 -*-
from __future__ import print_function
import argparse
class Parser(object):
@classmethod
def parse(cls, target_file):
print("Target file : %r" % target_file)
if target_file is None or len(target_file) == 0:
return
with open(target_file, "r") as file:
raw = file.readlines()
if raw is None:
return
counter = 0
for line in raw[1:-1]:
code = line.split(" ")[1].strip()
if code is None or len(code) == 0:
continue
counter += 1
if counter % 8 == 0:
print(code)
else:
print(code, end=" ")
print(" ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
args = parser.parse_args()
Parser().parse(args.file)
|
<commit_before><commit_msg>Add mode2 raw code parser script.<commit_after>#!/usr/bin/env python
# -*- encoding:utf8 -*-
from __future__ import print_function
import argparse
class Parser(object):
@classmethod
def parse(cls, target_file):
print("Target file : %r" % target_file)
if target_file is None or len(target_file) == 0:
return
with open(target_file, "r") as file:
raw = file.readlines()
if raw is None:
return
counter = 0
for line in raw[1:-1]:
code = line.split(" ")[1].strip()
if code is None or len(code) == 0:
continue
counter += 1
if counter % 8 == 0:
print(code)
else:
print(code, end=" ")
print(" ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
args = parser.parse_args()
Parser().parse(args.file)
|
|
0f6ff08896604e662c0912b0e8548e4c7f37c4e8
|
depends.py
|
depends.py
|
#!/usr/bin/env python
"""depends.py - print package dependencies
Usage: python depends.py [-h | --help]
"""
from __future__ import print_function
import os
import subprocess
import sys
def depends(home, pkgpath):
os.chdir(os.path.join(home, 'usr', 'pkgsrc'))
os.chdir(pkgpath)
p = subprocess.Popen(
['bmake', 'show-depends-pkgpaths'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
assert p.returncode == 0, 'bmake show-depends-pkgpaths'
lines = out.split('\n')
deps = [line for line in lines if line]
return deps
def all_depends(home, pkgs):
if pkgs == []:
return []
else:
pkg = pkgs.pop(0)
deps = depends(home, pkg)
return [pkg] + all_depends(home, deps + pkgs)
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'):
print(__doc__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
home = os.environ['HOME']
lines = sys.stdin.readlines()
pkgs = [line.rstrip('\n') for line in lines]
pkgs = [pkg for pkg in pkgs if pkg]
deps = []
for dep in reversed(all_depends(home, pkgs)):
if dep not in deps:
deps.append(dep)
print(dep)
|
Add a script to determine package dependencies.
|
Add a script to determine package dependencies.
|
Python
|
isc
|
eliteraspberries/minipkg,eliteraspberries/minipkg
|
Add a script to determine package dependencies.
|
#!/usr/bin/env python
"""depends.py - print package dependencies
Usage: python depends.py [-h | --help]
"""
from __future__ import print_function
import os
import subprocess
import sys
def depends(home, pkgpath):
os.chdir(os.path.join(home, 'usr', 'pkgsrc'))
os.chdir(pkgpath)
p = subprocess.Popen(
['bmake', 'show-depends-pkgpaths'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
assert p.returncode == 0, 'bmake show-depends-pkgpaths'
lines = out.split('\n')
deps = [line for line in lines if line]
return deps
def all_depends(home, pkgs):
if pkgs == []:
return []
else:
pkg = pkgs.pop(0)
deps = depends(home, pkg)
return [pkg] + all_depends(home, deps + pkgs)
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'):
print(__doc__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
home = os.environ['HOME']
lines = sys.stdin.readlines()
pkgs = [line.rstrip('\n') for line in lines]
pkgs = [pkg for pkg in pkgs if pkg]
deps = []
for dep in reversed(all_depends(home, pkgs)):
if dep not in deps:
deps.append(dep)
print(dep)
|
<commit_before><commit_msg>Add a script to determine package dependencies.<commit_after>
|
#!/usr/bin/env python
"""depends.py - print package dependencies
Usage: python depends.py [-h | --help]
"""
from __future__ import print_function
import os
import subprocess
import sys
def depends(home, pkgpath):
os.chdir(os.path.join(home, 'usr', 'pkgsrc'))
os.chdir(pkgpath)
p = subprocess.Popen(
['bmake', 'show-depends-pkgpaths'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
assert p.returncode == 0, 'bmake show-depends-pkgpaths'
lines = out.split('\n')
deps = [line for line in lines if line]
return deps
def all_depends(home, pkgs):
if pkgs == []:
return []
else:
pkg = pkgs.pop(0)
deps = depends(home, pkg)
return [pkg] + all_depends(home, deps + pkgs)
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'):
print(__doc__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
home = os.environ['HOME']
lines = sys.stdin.readlines()
pkgs = [line.rstrip('\n') for line in lines]
pkgs = [pkg for pkg in pkgs if pkg]
deps = []
for dep in reversed(all_depends(home, pkgs)):
if dep not in deps:
deps.append(dep)
print(dep)
|
Add a script to determine package dependencies.#!/usr/bin/env python
"""depends.py - print package dependencies
Usage: python depends.py [-h | --help]
"""
from __future__ import print_function
import os
import subprocess
import sys
def depends(home, pkgpath):
os.chdir(os.path.join(home, 'usr', 'pkgsrc'))
os.chdir(pkgpath)
p = subprocess.Popen(
['bmake', 'show-depends-pkgpaths'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
assert p.returncode == 0, 'bmake show-depends-pkgpaths'
lines = out.split('\n')
deps = [line for line in lines if line]
return deps
def all_depends(home, pkgs):
if pkgs == []:
return []
else:
pkg = pkgs.pop(0)
deps = depends(home, pkg)
return [pkg] + all_depends(home, deps + pkgs)
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'):
print(__doc__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
home = os.environ['HOME']
lines = sys.stdin.readlines()
pkgs = [line.rstrip('\n') for line in lines]
pkgs = [pkg for pkg in pkgs if pkg]
deps = []
for dep in reversed(all_depends(home, pkgs)):
if dep not in deps:
deps.append(dep)
print(dep)
|
<commit_before><commit_msg>Add a script to determine package dependencies.<commit_after>#!/usr/bin/env python
"""depends.py - print package dependencies
Usage: python depends.py [-h | --help]
"""
from __future__ import print_function
import os
import subprocess
import sys
def depends(home, pkgpath):
os.chdir(os.path.join(home, 'usr', 'pkgsrc'))
os.chdir(pkgpath)
p = subprocess.Popen(
['bmake', 'show-depends-pkgpaths'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
assert p.returncode == 0, 'bmake show-depends-pkgpaths'
lines = out.split('\n')
deps = [line for line in lines if line]
return deps
def all_depends(home, pkgs):
if pkgs == []:
return []
else:
pkg = pkgs.pop(0)
deps = depends(home, pkg)
return [pkg] + all_depends(home, deps + pkgs)
if __name__ == '__main__':
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and sys.argv[1] in ('-h', '--help'):
print(__doc__)
sys.exit(os.EX_OK)
else:
print(__doc__)
sys.exit(os.EX_USAGE)
home = os.environ['HOME']
lines = sys.stdin.readlines()
pkgs = [line.rstrip('\n') for line in lines]
pkgs = [pkg for pkg in pkgs if pkg]
deps = []
for dep in reversed(all_depends(home, pkgs)):
if dep not in deps:
deps.append(dep)
print(dep)
|
|
2c0581c79bacf48f0408f55f1d44668d746d0ae6
|
analysis_bistability.py
|
analysis_bistability.py
|
"""
Analyse the distribution of peak distances to make appear the bistability.
To be used for two glomeruli simulations.
"""
import tables
import matplotlib.pyplot as plt
import analysis
import h5manager as hm
def histogram_peaks(simulation):
dists = get_peak_dists(simulation)
plt.figure()
plt.hist(dists)
def get_peak_dists(simulation):
# First, check that this is two glomeruli simulation
n_glom = simulation.paramset._v_attrs['Common']['N_subpop']
assert n_glom == 2, "Number of glomeruli must be 2."
# Get the interesting signals : s_syn_self
s_syn_self = simulation.results.s_syn_self.read()
# Return the peak distances, with distances as angles
return analysis.get_directional_distances(s_syn_self[0], s_syn_self[1])
def main(dbfile):
# Get the db
db = tables.openFile(dbfile)
simulations = hm.get_first_level_groups(db.root)
selected_simus = raw_input("Simulations #").split(' ')
selected_simus = [int(s) for s in selected_simus]
# Plot the histogram for each selected simulation
for i_sim in selected_simus:
histogram_peaks(simulations[i_sim])
plt.show()
db.close()
if __name__ == '__main__':
from sys import argv
main(argv[1]) # argument must be a HDF5 file with simulations
|
Add script to plot histogram of peak distances
|
Add script to plot histogram of peak distances
Useful to see if any bistability occurs.
|
Python
|
mit
|
neuro-lyon/multiglom-model,neuro-lyon/multiglom-model
|
Add script to plot histogram of peak distances
Useful to see if any bistability occurs.
|
"""
Analyse the distribution of peak distances to make appear the bistability.
To be used for two glomeruli simulations.
"""
import tables
import matplotlib.pyplot as plt
import analysis
import h5manager as hm
def histogram_peaks(simulation):
dists = get_peak_dists(simulation)
plt.figure()
plt.hist(dists)
def get_peak_dists(simulation):
# First, check that this is two glomeruli simulation
n_glom = simulation.paramset._v_attrs['Common']['N_subpop']
assert n_glom == 2, "Number of glomeruli must be 2."
# Get the interesting signals : s_syn_self
s_syn_self = simulation.results.s_syn_self.read()
# Return the peak distances, with distances as angles
return analysis.get_directional_distances(s_syn_self[0], s_syn_self[1])
def main(dbfile):
# Get the db
db = tables.openFile(dbfile)
simulations = hm.get_first_level_groups(db.root)
selected_simus = raw_input("Simulations #").split(' ')
selected_simus = [int(s) for s in selected_simus]
# Plot the histogram for each selected simulation
for i_sim in selected_simus:
histogram_peaks(simulations[i_sim])
plt.show()
db.close()
if __name__ == '__main__':
from sys import argv
main(argv[1]) # argument must be a HDF5 file with simulations
|
<commit_before><commit_msg>Add script to plot histogram of peak distances
Useful to see if any bistability occurs.<commit_after>
|
"""
Analyse the distribution of peak distances to make appear the bistability.
To be used for two glomeruli simulations.
"""
import tables
import matplotlib.pyplot as plt
import analysis
import h5manager as hm
def histogram_peaks(simulation):
dists = get_peak_dists(simulation)
plt.figure()
plt.hist(dists)
def get_peak_dists(simulation):
# First, check that this is two glomeruli simulation
n_glom = simulation.paramset._v_attrs['Common']['N_subpop']
assert n_glom == 2, "Number of glomeruli must be 2."
# Get the interesting signals : s_syn_self
s_syn_self = simulation.results.s_syn_self.read()
# Return the peak distances, with distances as angles
return analysis.get_directional_distances(s_syn_self[0], s_syn_self[1])
def main(dbfile):
# Get the db
db = tables.openFile(dbfile)
simulations = hm.get_first_level_groups(db.root)
selected_simus = raw_input("Simulations #").split(' ')
selected_simus = [int(s) for s in selected_simus]
# Plot the histogram for each selected simulation
for i_sim in selected_simus:
histogram_peaks(simulations[i_sim])
plt.show()
db.close()
if __name__ == '__main__':
from sys import argv
main(argv[1]) # argument must be a HDF5 file with simulations
|
Add script to plot histogram of peak distances
Useful to see if any bistability occurs."""
Analyse the distribution of peak distances to make appear the bistability.
To be used for two glomeruli simulations.
"""
import tables
import matplotlib.pyplot as plt
import analysis
import h5manager as hm
def histogram_peaks(simulation):
dists = get_peak_dists(simulation)
plt.figure()
plt.hist(dists)
def get_peak_dists(simulation):
# First, check that this is two glomeruli simulation
n_glom = simulation.paramset._v_attrs['Common']['N_subpop']
assert n_glom == 2, "Number of glomeruli must be 2."
# Get the interesting signals : s_syn_self
s_syn_self = simulation.results.s_syn_self.read()
# Return the peak distances, with distances as angles
return analysis.get_directional_distances(s_syn_self[0], s_syn_self[1])
def main(dbfile):
# Get the db
db = tables.openFile(dbfile)
simulations = hm.get_first_level_groups(db.root)
selected_simus = raw_input("Simulations #").split(' ')
selected_simus = [int(s) for s in selected_simus]
# Plot the histogram for each selected simulation
for i_sim in selected_simus:
histogram_peaks(simulations[i_sim])
plt.show()
db.close()
if __name__ == '__main__':
from sys import argv
main(argv[1]) # argument must be a HDF5 file with simulations
|
<commit_before><commit_msg>Add script to plot histogram of peak distances
Useful to see if any bistability occurs.<commit_after>"""
Analyse the distribution of peak distances to make appear the bistability.
To be used for two glomeruli simulations.
"""
import tables
import matplotlib.pyplot as plt
import analysis
import h5manager as hm
def histogram_peaks(simulation):
dists = get_peak_dists(simulation)
plt.figure()
plt.hist(dists)
def get_peak_dists(simulation):
# First, check that this is two glomeruli simulation
n_glom = simulation.paramset._v_attrs['Common']['N_subpop']
assert n_glom == 2, "Number of glomeruli must be 2."
# Get the interesting signals : s_syn_self
s_syn_self = simulation.results.s_syn_self.read()
# Return the peak distances, with distances as angles
return analysis.get_directional_distances(s_syn_self[0], s_syn_self[1])
def main(dbfile):
# Get the db
db = tables.openFile(dbfile)
simulations = hm.get_first_level_groups(db.root)
selected_simus = raw_input("Simulations #").split(' ')
selected_simus = [int(s) for s in selected_simus]
# Plot the histogram for each selected simulation
for i_sim in selected_simus:
histogram_peaks(simulations[i_sim])
plt.show()
db.close()
if __name__ == '__main__':
from sys import argv
main(argv[1]) # argument must be a HDF5 file with simulations
|
|
066555c0e6c5bfc11925ce9f60de11141e9a0d0e
|
runner/fix_hdf5_file_format.py
|
runner/fix_hdf5_file_format.py
|
#!/usr/bin/env python
#
# Fix HDF5 files and add mandatory attributes for support PyTables file format
#
import h5py
import sys
# open HDF5 file
hdf5_filename = sys.argv[1]
hdf5_file = h5py.File(hdf5_filename, 'a')
# add mandatory attributes for a file
hdf5_file['/'].attrs['CLASS'] = 'GROUP'
hdf5_file['/'].attrs['PYTABLES_FORMAT_VERSION'] = '2.1'
hdf5_file['/'].attrs['TITLE'] = 'Root element'
hdf5_file['/'].attrs['VERSION'] = '1.0'
# add mandatory attributes for a group
groups = ('/series', '/series/tarom')
for group in groups:
hdf5_file[group].attrs['CLASS'] = 'GROUP'
hdf5_file[group].attrs['TITLE'] = 'groupnode'
hdf5_file[group].attrs['VERSION'] = '1.0'
# add mandatory attributes for leaves
for group in hdf5_file['/series/tarom'].keys():
node = '/series/tarom/' + group
hdf5_file[node].attrs['CLASS'] = 'GROUP'
hdf5_file[node].attrs['TITLE'] = 'groupnode'
hdf5_file[node].attrs['VERSION'] = '1.0'
hdf5_file[node + '/data'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/data'].attrs['TITLE'] = 'data array'
hdf5_file[node + '/data'].attrs['VERSION'] = '2.4'
hdf5_file[node + '/mask'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/mask'].attrs['TITLE'] = 'mask array'
hdf5_file[node + '/mask'].attrs['VERSION'] = '2.4'
# close HDF5 file
hdf5_file.close()
|
Add mandatory attributes to support PyTables HDF5 file format.
|
Add mandatory attributes to support PyTables HDF5 file format.
|
Python
|
mit
|
lo100/MyRaspiHome,lo100/MyRaspiHome
|
Add mandatory attributes to support PyTables HDF5 file format.
|
#!/usr/bin/env python
#
# Fix HDF5 files and add mandatory attributes for support PyTables file format
#
import h5py
import sys
# open HDF5 file
hdf5_filename = sys.argv[1]
hdf5_file = h5py.File(hdf5_filename, 'a')
# add mandatory attributes for a file
hdf5_file['/'].attrs['CLASS'] = 'GROUP'
hdf5_file['/'].attrs['PYTABLES_FORMAT_VERSION'] = '2.1'
hdf5_file['/'].attrs['TITLE'] = 'Root element'
hdf5_file['/'].attrs['VERSION'] = '1.0'
# add mandatory attributes for a group
groups = ('/series', '/series/tarom')
for group in groups:
hdf5_file[group].attrs['CLASS'] = 'GROUP'
hdf5_file[group].attrs['TITLE'] = 'groupnode'
hdf5_file[group].attrs['VERSION'] = '1.0'
# add mandatory attributes for leaves
for group in hdf5_file['/series/tarom'].keys():
node = '/series/tarom/' + group
hdf5_file[node].attrs['CLASS'] = 'GROUP'
hdf5_file[node].attrs['TITLE'] = 'groupnode'
hdf5_file[node].attrs['VERSION'] = '1.0'
hdf5_file[node + '/data'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/data'].attrs['TITLE'] = 'data array'
hdf5_file[node + '/data'].attrs['VERSION'] = '2.4'
hdf5_file[node + '/mask'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/mask'].attrs['TITLE'] = 'mask array'
hdf5_file[node + '/mask'].attrs['VERSION'] = '2.4'
# close HDF5 file
hdf5_file.close()
|
<commit_before><commit_msg>Add mandatory attributes to support PyTables HDF5 file format.<commit_after>
|
#!/usr/bin/env python
#
# Fix HDF5 files and add mandatory attributes for support PyTables file format
#
import h5py
import sys
# open HDF5 file
hdf5_filename = sys.argv[1]
hdf5_file = h5py.File(hdf5_filename, 'a')
# add mandatory attributes for a file
hdf5_file['/'].attrs['CLASS'] = 'GROUP'
hdf5_file['/'].attrs['PYTABLES_FORMAT_VERSION'] = '2.1'
hdf5_file['/'].attrs['TITLE'] = 'Root element'
hdf5_file['/'].attrs['VERSION'] = '1.0'
# add mandatory attributes for a group
groups = ('/series', '/series/tarom')
for group in groups:
hdf5_file[group].attrs['CLASS'] = 'GROUP'
hdf5_file[group].attrs['TITLE'] = 'groupnode'
hdf5_file[group].attrs['VERSION'] = '1.0'
# add mandatory attributes for leaves
for group in hdf5_file['/series/tarom'].keys():
node = '/series/tarom/' + group
hdf5_file[node].attrs['CLASS'] = 'GROUP'
hdf5_file[node].attrs['TITLE'] = 'groupnode'
hdf5_file[node].attrs['VERSION'] = '1.0'
hdf5_file[node + '/data'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/data'].attrs['TITLE'] = 'data array'
hdf5_file[node + '/data'].attrs['VERSION'] = '2.4'
hdf5_file[node + '/mask'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/mask'].attrs['TITLE'] = 'mask array'
hdf5_file[node + '/mask'].attrs['VERSION'] = '2.4'
# close HDF5 file
hdf5_file.close()
|
Add mandatory attributes to support PyTables HDF5 file format.#!/usr/bin/env python
#
# Fix HDF5 files and add mandatory attributes for support PyTables file format
#
import h5py
import sys
# open HDF5 file
hdf5_filename = sys.argv[1]
hdf5_file = h5py.File(hdf5_filename, 'a')
# add mandatory attributes for a file
hdf5_file['/'].attrs['CLASS'] = 'GROUP'
hdf5_file['/'].attrs['PYTABLES_FORMAT_VERSION'] = '2.1'
hdf5_file['/'].attrs['TITLE'] = 'Root element'
hdf5_file['/'].attrs['VERSION'] = '1.0'
# add mandatory attributes for a group
groups = ('/series', '/series/tarom')
for group in groups:
hdf5_file[group].attrs['CLASS'] = 'GROUP'
hdf5_file[group].attrs['TITLE'] = 'groupnode'
hdf5_file[group].attrs['VERSION'] = '1.0'
# add mandatory attributes for leaves
for group in hdf5_file['/series/tarom'].keys():
node = '/series/tarom/' + group
hdf5_file[node].attrs['CLASS'] = 'GROUP'
hdf5_file[node].attrs['TITLE'] = 'groupnode'
hdf5_file[node].attrs['VERSION'] = '1.0'
hdf5_file[node + '/data'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/data'].attrs['TITLE'] = 'data array'
hdf5_file[node + '/data'].attrs['VERSION'] = '2.4'
hdf5_file[node + '/mask'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/mask'].attrs['TITLE'] = 'mask array'
hdf5_file[node + '/mask'].attrs['VERSION'] = '2.4'
# close HDF5 file
hdf5_file.close()
|
<commit_before><commit_msg>Add mandatory attributes to support PyTables HDF5 file format.<commit_after>#!/usr/bin/env python
#
# Fix HDF5 files and add mandatory attributes for support PyTables file format
#
import h5py
import sys
# open HDF5 file
hdf5_filename = sys.argv[1]
hdf5_file = h5py.File(hdf5_filename, 'a')
# add mandatory attributes for a file
hdf5_file['/'].attrs['CLASS'] = 'GROUP'
hdf5_file['/'].attrs['PYTABLES_FORMAT_VERSION'] = '2.1'
hdf5_file['/'].attrs['TITLE'] = 'Root element'
hdf5_file['/'].attrs['VERSION'] = '1.0'
# add mandatory attributes for a group
groups = ('/series', '/series/tarom')
for group in groups:
hdf5_file[group].attrs['CLASS'] = 'GROUP'
hdf5_file[group].attrs['TITLE'] = 'groupnode'
hdf5_file[group].attrs['VERSION'] = '1.0'
# add mandatory attributes for leaves
for group in hdf5_file['/series/tarom'].keys():
node = '/series/tarom/' + group
hdf5_file[node].attrs['CLASS'] = 'GROUP'
hdf5_file[node].attrs['TITLE'] = 'groupnode'
hdf5_file[node].attrs['VERSION'] = '1.0'
hdf5_file[node + '/data'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/data'].attrs['TITLE'] = 'data array'
hdf5_file[node + '/data'].attrs['VERSION'] = '2.4'
hdf5_file[node + '/mask'].attrs['CLASS'] = 'ARRAY'
hdf5_file[node + '/mask'].attrs['TITLE'] = 'mask array'
hdf5_file[node + '/mask'].attrs['VERSION'] = '2.4'
# close HDF5 file
hdf5_file.close()
|
|
63f47104feec03cd529b80c654a2aa80e3e7d524
|
tests/cpydiff/builtin_next_arg2.py
|
tests/cpydiff/builtin_next_arg2.py
|
"""
categories: Modules,builtins
description: Second argument to next() is not implemented
cause: MicroPython is optimised for code space.
workaround: Instead of `val = next(it, deflt)` use::
try:
val = next(it)
except StopIteration:
val = deflt
"""
print(next(iter(range(0)), 42))
|
Add difference-test for second arg of builtin next().
|
tests/cpydiff: Add difference-test for second arg of builtin next().
|
Python
|
mit
|
MrSurly/micropython,blazewicz/micropython,torwag/micropython,dmazzella/micropython,MrSurly/micropython,torwag/micropython,pfalcon/micropython,swegener/micropython,ryannathans/micropython,MrSurly/micropython,blazewicz/micropython,infinnovation/micropython,trezor/micropython,pfalcon/micropython,adafruit/circuitpython,bvernoux/micropython,adafruit/circuitpython,lowRISC/micropython,MrSurly/micropython,blazewicz/micropython,ryannathans/micropython,ryannathans/micropython,adafruit/circuitpython,pozetroninc/micropython,pozetroninc/micropython,selste/micropython,selste/micropython,pfalcon/micropython,pramasoul/micropython,tralamazza/micropython,henriknelson/micropython,selste/micropython,adafruit/micropython,bvernoux/micropython,tralamazza/micropython,selste/micropython,swegener/micropython,lowRISC/micropython,dmazzella/micropython,bvernoux/micropython,henriknelson/micropython,blazewicz/micropython,trezor/micropython,tobbad/micropython,dmazzella/micropython,torwag/micropython,pramasoul/micropython,ryannathans/micropython,swegener/micropython,ryannathans/micropython,adafruit/micropython,pramasoul/micropython,pfalcon/micropython,infinnovation/micropython,henriknelson/micropython,tobbad/micropython,pozetroninc/micropython,kerneltask/micropython,pramasoul/micropython,trezor/micropython,trezor/micropython,adafruit/circuitpython,pramasoul/micropython,swegener/micropython,MrSurly/micropython,tobbad/micropython,tobbad/micropython,pozetroninc/micropython,tralamazza/micropython,bvernoux/micropython,pozetroninc/micropython,adafruit/circuitpython,lowRISC/micropython,infinnovation/micropython,lowRISC/micropython,bvernoux/micropython,tobbad/micropython,tralamazza/micropython,pfalcon/micropython,adafruit/circuitpython,lowRISC/micropython,infinnovation/micropython,kerneltask/micropython,henriknelson/micropython,adafruit/micropython,torwag/micropython,adafruit/micropython,torwag/micropython,selste/micropython,swegener/micropython,kerneltask/micropython,kerneltask/micropython,kerneltask/micropython,henriknelson/micropython,dmazzella/micropython,trezor/micropython,blazewicz/micropython,infinnovation/micropython,adafruit/micropython
|
tests/cpydiff: Add difference-test for second arg of builtin next().
|
"""
categories: Modules,builtins
description: Second argument to next() is not implemented
cause: MicroPython is optimised for code space.
workaround: Instead of `val = next(it, deflt)` use::
try:
val = next(it)
except StopIteration:
val = deflt
"""
print(next(iter(range(0)), 42))
|
<commit_before><commit_msg>tests/cpydiff: Add difference-test for second arg of builtin next().<commit_after>
|
"""
categories: Modules,builtins
description: Second argument to next() is not implemented
cause: MicroPython is optimised for code space.
workaround: Instead of `val = next(it, deflt)` use::
try:
val = next(it)
except StopIteration:
val = deflt
"""
print(next(iter(range(0)), 42))
|
tests/cpydiff: Add difference-test for second arg of builtin next()."""
categories: Modules,builtins
description: Second argument to next() is not implemented
cause: MicroPython is optimised for code space.
workaround: Instead of `val = next(it, deflt)` use::
try:
val = next(it)
except StopIteration:
val = deflt
"""
print(next(iter(range(0)), 42))
|
<commit_before><commit_msg>tests/cpydiff: Add difference-test for second arg of builtin next().<commit_after>"""
categories: Modules,builtins
description: Second argument to next() is not implemented
cause: MicroPython is optimised for code space.
workaround: Instead of `val = next(it, deflt)` use::
try:
val = next(it)
except StopIteration:
val = deflt
"""
print(next(iter(range(0)), 42))
|
|
f06d2da6784dbc742cbe304ed1b078702a61c961
|
tests/app/soc/modules/gci/views/test_age_check.py
|
tests/app/soc/modules/gci/views/test_age_check.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for age check related views.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from tests.test_utils import GCIDjangoTestCase
class AgeCheckTest(GCIDjangoTestCase):
"""Tests age check page.
"""
def setUp(self):
self.init()
def assertProgramTemplatesUsed(self, response):
"""Asserts that all the templates were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/age_check/base.html')
def testAgeCheckViewable(self):
url = '/gci/age_check/' + self.gci.key().name()
response = self.client.get(url)
self.assertTemplateUsed(response)
# TODO(ljvderijk): Add more tests that also check the cookies that are set
|
Add basic test case for the age check view.
|
Add basic test case for the age check view.
Only check if the page can actually be rendered, need more info on how to deal with cookies and user authentication inside tests to expand this.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Add basic test case for the age check view.
Only check if the page can actually be rendered, need more info on how to deal with cookies and user authentication inside tests to expand this.
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for age check related views.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from tests.test_utils import GCIDjangoTestCase
class AgeCheckTest(GCIDjangoTestCase):
"""Tests age check page.
"""
def setUp(self):
self.init()
def assertProgramTemplatesUsed(self, response):
"""Asserts that all the templates were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/age_check/base.html')
def testAgeCheckViewable(self):
url = '/gci/age_check/' + self.gci.key().name()
response = self.client.get(url)
self.assertTemplateUsed(response)
# TODO(ljvderijk): Add more tests that also check the cookies that are set
|
<commit_before><commit_msg>Add basic test case for the age check view.
Only check if the page can actually be rendered, need more info on how to deal with cookies and user authentication inside tests to expand this.<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for age check related views.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from tests.test_utils import GCIDjangoTestCase
class AgeCheckTest(GCIDjangoTestCase):
"""Tests age check page.
"""
def setUp(self):
self.init()
def assertProgramTemplatesUsed(self, response):
"""Asserts that all the templates were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/age_check/base.html')
def testAgeCheckViewable(self):
url = '/gci/age_check/' + self.gci.key().name()
response = self.client.get(url)
self.assertTemplateUsed(response)
# TODO(ljvderijk): Add more tests that also check the cookies that are set
|
Add basic test case for the age check view.
Only check if the page can actually be rendered, need more info on how to deal with cookies and user authentication inside tests to expand this.#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for age check related views.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from tests.test_utils import GCIDjangoTestCase
class AgeCheckTest(GCIDjangoTestCase):
"""Tests age check page.
"""
def setUp(self):
self.init()
def assertProgramTemplatesUsed(self, response):
"""Asserts that all the templates were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/age_check/base.html')
def testAgeCheckViewable(self):
url = '/gci/age_check/' + self.gci.key().name()
response = self.client.get(url)
self.assertTemplateUsed(response)
# TODO(ljvderijk): Add more tests that also check the cookies that are set
|
<commit_before><commit_msg>Add basic test case for the age check view.
Only check if the page can actually be rendered, need more info on how to deal with cookies and user authentication inside tests to expand this.<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for age check related views.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from tests.test_utils import GCIDjangoTestCase
class AgeCheckTest(GCIDjangoTestCase):
"""Tests age check page.
"""
def setUp(self):
self.init()
def assertProgramTemplatesUsed(self, response):
"""Asserts that all the templates were used.
"""
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/age_check/base.html')
def testAgeCheckViewable(self):
url = '/gci/age_check/' + self.gci.key().name()
response = self.client.get(url)
self.assertTemplateUsed(response)
# TODO(ljvderijk): Add more tests that also check the cookies that are set
|
|
5072478d2c89d5235aff20f8ccb365ab9ec2e297
|
experiments/python/gtk_drawing_area_pixels.py
|
experiments/python/gtk_drawing_area_pixels.py
|
__author__ = 'Joel Wright'
import pygtk
pygtk.require('2.0')
import gtk
import math
import cairo
import struct
class DrawingAreaExample(gtk.Window):
def __init__(self):
super(DrawingAreaExample, self).__init__()
self.set_title("Drawing Area Example")
self.resize(300,400)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 300, 300)
self.draw_to_surface()
area = gtk.DrawingArea()
area.set_size_request(300, 300)
area.connect("expose-event", self.expose)
show_button = gtk.Button("dump")
show_button.connect("clicked", self.dump_pixmap)
fixed = gtk.Fixed()
fixed.put(area, 0, 0)
fixed.put(show_button, 20, 320)
self.add(fixed)
self.show_all()
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def draw_to_surface(self):
cr = cairo.Context(self.surface)
cr.set_line_width(9)
cr.set_source_rgb(0.7, 0.2, 0.0)
w = self.surface.get_width()
h = self.surface.get_height()
cr.translate(w/2, h/2)
cr.arc(0, 0, 50, 0, 2*math.pi)
cr.stroke_preserve()
cr.set_source_rgb(0.3, 0.4, 0.6)
cr.fill()
def dump_pixmap(self, widget):
d = self.surface.get_data()
self.print_buffer(d)
def print_buffer(self, data):
coord = 0
while coord < len(data):
b = struct.unpack('B',data[coord])
g = struct.unpack('B',data[coord+1])
r = struct.unpack('B',data[coord+2])
coord += 4
print '(%d,%d,%d)' % (r[0], g[0], b[0])
DrawingAreaExample()
gtk.main()
|
Add experiment getting pixel values from gtk DrawingArea
|
Add experiment getting pixel values from gtk DrawingArea
|
Python
|
mit
|
joel-wright/DDRPi,fraz3alpha/DDRPi,fraz3alpha/led-disco-dancefloor
|
Add experiment getting pixel values from gtk DrawingArea
|
__author__ = 'Joel Wright'
import pygtk
pygtk.require('2.0')
import gtk
import math
import cairo
import struct
class DrawingAreaExample(gtk.Window):
def __init__(self):
super(DrawingAreaExample, self).__init__()
self.set_title("Drawing Area Example")
self.resize(300,400)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 300, 300)
self.draw_to_surface()
area = gtk.DrawingArea()
area.set_size_request(300, 300)
area.connect("expose-event", self.expose)
show_button = gtk.Button("dump")
show_button.connect("clicked", self.dump_pixmap)
fixed = gtk.Fixed()
fixed.put(area, 0, 0)
fixed.put(show_button, 20, 320)
self.add(fixed)
self.show_all()
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def draw_to_surface(self):
cr = cairo.Context(self.surface)
cr.set_line_width(9)
cr.set_source_rgb(0.7, 0.2, 0.0)
w = self.surface.get_width()
h = self.surface.get_height()
cr.translate(w/2, h/2)
cr.arc(0, 0, 50, 0, 2*math.pi)
cr.stroke_preserve()
cr.set_source_rgb(0.3, 0.4, 0.6)
cr.fill()
def dump_pixmap(self, widget):
d = self.surface.get_data()
self.print_buffer(d)
def print_buffer(self, data):
coord = 0
while coord < len(data):
b = struct.unpack('B',data[coord])
g = struct.unpack('B',data[coord+1])
r = struct.unpack('B',data[coord+2])
coord += 4
print '(%d,%d,%d)' % (r[0], g[0], b[0])
DrawingAreaExample()
gtk.main()
|
<commit_before><commit_msg>Add experiment getting pixel values from gtk DrawingArea<commit_after>
|
__author__ = 'Joel Wright'
import pygtk
pygtk.require('2.0')
import gtk
import math
import cairo
import struct
class DrawingAreaExample(gtk.Window):
def __init__(self):
super(DrawingAreaExample, self).__init__()
self.set_title("Drawing Area Example")
self.resize(300,400)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 300, 300)
self.draw_to_surface()
area = gtk.DrawingArea()
area.set_size_request(300, 300)
area.connect("expose-event", self.expose)
show_button = gtk.Button("dump")
show_button.connect("clicked", self.dump_pixmap)
fixed = gtk.Fixed()
fixed.put(area, 0, 0)
fixed.put(show_button, 20, 320)
self.add(fixed)
self.show_all()
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def draw_to_surface(self):
cr = cairo.Context(self.surface)
cr.set_line_width(9)
cr.set_source_rgb(0.7, 0.2, 0.0)
w = self.surface.get_width()
h = self.surface.get_height()
cr.translate(w/2, h/2)
cr.arc(0, 0, 50, 0, 2*math.pi)
cr.stroke_preserve()
cr.set_source_rgb(0.3, 0.4, 0.6)
cr.fill()
def dump_pixmap(self, widget):
d = self.surface.get_data()
self.print_buffer(d)
def print_buffer(self, data):
coord = 0
while coord < len(data):
b = struct.unpack('B',data[coord])
g = struct.unpack('B',data[coord+1])
r = struct.unpack('B',data[coord+2])
coord += 4
print '(%d,%d,%d)' % (r[0], g[0], b[0])
DrawingAreaExample()
gtk.main()
|
Add experiment getting pixel values from gtk DrawingArea__author__ = 'Joel Wright'
import pygtk
pygtk.require('2.0')
import gtk
import math
import cairo
import struct
class DrawingAreaExample(gtk.Window):
def __init__(self):
super(DrawingAreaExample, self).__init__()
self.set_title("Drawing Area Example")
self.resize(300,400)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 300, 300)
self.draw_to_surface()
area = gtk.DrawingArea()
area.set_size_request(300, 300)
area.connect("expose-event", self.expose)
show_button = gtk.Button("dump")
show_button.connect("clicked", self.dump_pixmap)
fixed = gtk.Fixed()
fixed.put(area, 0, 0)
fixed.put(show_button, 20, 320)
self.add(fixed)
self.show_all()
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def draw_to_surface(self):
cr = cairo.Context(self.surface)
cr.set_line_width(9)
cr.set_source_rgb(0.7, 0.2, 0.0)
w = self.surface.get_width()
h = self.surface.get_height()
cr.translate(w/2, h/2)
cr.arc(0, 0, 50, 0, 2*math.pi)
cr.stroke_preserve()
cr.set_source_rgb(0.3, 0.4, 0.6)
cr.fill()
def dump_pixmap(self, widget):
d = self.surface.get_data()
self.print_buffer(d)
def print_buffer(self, data):
coord = 0
while coord < len(data):
b = struct.unpack('B',data[coord])
g = struct.unpack('B',data[coord+1])
r = struct.unpack('B',data[coord+2])
coord += 4
print '(%d,%d,%d)' % (r[0], g[0], b[0])
DrawingAreaExample()
gtk.main()
|
<commit_before><commit_msg>Add experiment getting pixel values from gtk DrawingArea<commit_after>__author__ = 'Joel Wright'
import pygtk
pygtk.require('2.0')
import gtk
import math
import cairo
import struct
class DrawingAreaExample(gtk.Window):
def __init__(self):
super(DrawingAreaExample, self).__init__()
self.set_title("Drawing Area Example")
self.resize(300,400)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 300, 300)
self.draw_to_surface()
area = gtk.DrawingArea()
area.set_size_request(300, 300)
area.connect("expose-event", self.expose)
show_button = gtk.Button("dump")
show_button.connect("clicked", self.dump_pixmap)
fixed = gtk.Fixed()
fixed.put(area, 0, 0)
fixed.put(show_button, 20, 320)
self.add(fixed)
self.show_all()
def expose(self, widget, event):
cr = widget.window.cairo_create()
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
def draw_to_surface(self):
cr = cairo.Context(self.surface)
cr.set_line_width(9)
cr.set_source_rgb(0.7, 0.2, 0.0)
w = self.surface.get_width()
h = self.surface.get_height()
cr.translate(w/2, h/2)
cr.arc(0, 0, 50, 0, 2*math.pi)
cr.stroke_preserve()
cr.set_source_rgb(0.3, 0.4, 0.6)
cr.fill()
def dump_pixmap(self, widget):
d = self.surface.get_data()
self.print_buffer(d)
def print_buffer(self, data):
coord = 0
while coord < len(data):
b = struct.unpack('B',data[coord])
g = struct.unpack('B',data[coord+1])
r = struct.unpack('B',data[coord+2])
coord += 4
print '(%d,%d,%d)' % (r[0], g[0], b[0])
DrawingAreaExample()
gtk.main()
|
|
f76ccddca4864b2f2faf8dfadefa6ac15c930043
|
examples/tour_examples/driverjs_maps_tour.py
|
examples/tour_examples/driverjs_maps_tour.py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("You can type a location into this Search box.",
"#searchboxinput")
self.add_tour_step("Then click here to view it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to get a Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button for more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours", "html",
title="🚃 End of Guided Tour 🚃")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
Add an example tour for DriverJS
|
Add an example tour for DriverJS
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example tour for DriverJS
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("You can type a location into this Search box.",
"#searchboxinput")
self.add_tour_step("Then click here to view it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to get a Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button for more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours", "html",
title="🚃 End of Guided Tour 🚃")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
<commit_before><commit_msg>Add an example tour for DriverJS<commit_after>
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("You can type a location into this Search box.",
"#searchboxinput")
self.add_tour_step("Then click here to view it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to get a Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button for more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours", "html",
title="🚃 End of Guided Tour 🚃")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
Add an example tour for DriverJSfrom seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("You can type a location into this Search box.",
"#searchboxinput")
self.add_tour_step("Then click here to view it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to get a Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button for more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours", "html",
title="🚃 End of Guided Tour 🚃")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
<commit_before><commit_msg>Add an example tour for DriverJS<commit_after>from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the DriverJS library
# Same as: self.create_driverjs_tour()
self.create_tour(theme="driverjs")
self.add_tour_step("🗺️ Welcome to Google Maps 🗺️", "html",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("You can type a location into this Search box.",
"#searchboxinput")
self.add_tour_step("Then click here to view it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to get a Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button for more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours", "html",
title="🚃 End of Guided Tour 🚃")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour(interval=0) # If interval > 0, autoplay after N seconds
|
|
b90af30b23015d2fbe93f401a87176c6441e5c0d
|
altair/examples/wheat_wages.py
|
altair/examples/wheat_wages.py
|
"""
Wheat and Wages
---------------
A recreation of William Playfair's classic chart visualizing
the price of wheat, the wages of a mechanic, and the reigning British monarch.
"""
# category: case studies
import altair as alt
from vega_datasets import data
base_wheat = alt.Chart(data.wheat.url).transform_calculate(
year_end="+datum.year + 5")
base_monarchs = alt.Chart(data.monarchs.url).transform_calculate(
offset="((!datum.commonwealth && datum.index % 2) ? -1: 1) * 2 + 95",
off2="((!datum.commonwealth && datum.index % 2) ? -1: 1) + 95",
y="95",
x="+datum.start + (+datum.end - +datum.start)/2"
)
bars = base_wheat.mark_bar(**{"fill": "#aaa", "stroke": "#999"}).encode(
x=alt.X("year:Q", axis=alt.Axis(format='d', tickCount=5)),
y=alt.Y("wheat:Q", axis=alt.Axis(zindex=1)),
x2=alt.X2("year_end")
)
area = base_wheat.mark_area(**{"color": "#a4cedb", "opacity": 0.7}).encode(
x=alt.X("year:Q"),
y=alt.Y("wages:Q")
)
area_line_1 = area.mark_line(**{"color": "#000", "opacity": 0.7})
area_line_2 = area.mark_line(**{"yOffset": -2, "color": "#EE8182"})
top_bars = base_monarchs.mark_bar(stroke="#000").encode(
x=alt.X("start:Q"),
x2=alt.X2("end"),
y=alt.Y("y:Q"),
y2=alt.Y2("offset"),
fill=alt.Fill("commonwealth:N", legend=None, scale=alt.Scale(range=["black", "white"]))
)
top_text = base_monarchs.mark_text(**{"yOffset": 14, "fontSize": 9, "fontStyle": "italic"}).encode(
x=alt.X("x:Q"),
y=alt.Y("off2:Q"),
text=alt.Text("name:N")
)
(bars + area + area_line_1 + area_line_2 + top_bars + top_text).properties(
width=900, height=400
).configure_axis(
title=None, gridColor="white", gridOpacity=0.25, domain=False
).configure_view(
stroke="transparent"
)
|
Add William Playfair Wheat and Wages example
|
ENH: Add William Playfair Wheat and Wages example
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
ENH: Add William Playfair Wheat and Wages example
|
"""
Wheat and Wages
---------------
A recreation of William Playfair's classic chart visualizing
the price of wheat, the wages of a mechanic, and the reigning British monarch.
"""
# category: case studies
import altair as alt
from vega_datasets import data
base_wheat = alt.Chart(data.wheat.url).transform_calculate(
year_end="+datum.year + 5")
base_monarchs = alt.Chart(data.monarchs.url).transform_calculate(
offset="((!datum.commonwealth && datum.index % 2) ? -1: 1) * 2 + 95",
off2="((!datum.commonwealth && datum.index % 2) ? -1: 1) + 95",
y="95",
x="+datum.start + (+datum.end - +datum.start)/2"
)
bars = base_wheat.mark_bar(**{"fill": "#aaa", "stroke": "#999"}).encode(
x=alt.X("year:Q", axis=alt.Axis(format='d', tickCount=5)),
y=alt.Y("wheat:Q", axis=alt.Axis(zindex=1)),
x2=alt.X2("year_end")
)
area = base_wheat.mark_area(**{"color": "#a4cedb", "opacity": 0.7}).encode(
x=alt.X("year:Q"),
y=alt.Y("wages:Q")
)
area_line_1 = area.mark_line(**{"color": "#000", "opacity": 0.7})
area_line_2 = area.mark_line(**{"yOffset": -2, "color": "#EE8182"})
top_bars = base_monarchs.mark_bar(stroke="#000").encode(
x=alt.X("start:Q"),
x2=alt.X2("end"),
y=alt.Y("y:Q"),
y2=alt.Y2("offset"),
fill=alt.Fill("commonwealth:N", legend=None, scale=alt.Scale(range=["black", "white"]))
)
top_text = base_monarchs.mark_text(**{"yOffset": 14, "fontSize": 9, "fontStyle": "italic"}).encode(
x=alt.X("x:Q"),
y=alt.Y("off2:Q"),
text=alt.Text("name:N")
)
(bars + area + area_line_1 + area_line_2 + top_bars + top_text).properties(
width=900, height=400
).configure_axis(
title=None, gridColor="white", gridOpacity=0.25, domain=False
).configure_view(
stroke="transparent"
)
|
<commit_before><commit_msg>ENH: Add William Playfair Wheat and Wages example<commit_after>
|
"""
Wheat and Wages
---------------
A recreation of William Playfair's classic chart visualizing
the price of wheat, the wages of a mechanic, and the reigning British monarch.
"""
# category: case studies
import altair as alt
from vega_datasets import data
base_wheat = alt.Chart(data.wheat.url).transform_calculate(
year_end="+datum.year + 5")
base_monarchs = alt.Chart(data.monarchs.url).transform_calculate(
offset="((!datum.commonwealth && datum.index % 2) ? -1: 1) * 2 + 95",
off2="((!datum.commonwealth && datum.index % 2) ? -1: 1) + 95",
y="95",
x="+datum.start + (+datum.end - +datum.start)/2"
)
bars = base_wheat.mark_bar(**{"fill": "#aaa", "stroke": "#999"}).encode(
x=alt.X("year:Q", axis=alt.Axis(format='d', tickCount=5)),
y=alt.Y("wheat:Q", axis=alt.Axis(zindex=1)),
x2=alt.X2("year_end")
)
area = base_wheat.mark_area(**{"color": "#a4cedb", "opacity": 0.7}).encode(
x=alt.X("year:Q"),
y=alt.Y("wages:Q")
)
area_line_1 = area.mark_line(**{"color": "#000", "opacity": 0.7})
area_line_2 = area.mark_line(**{"yOffset": -2, "color": "#EE8182"})
top_bars = base_monarchs.mark_bar(stroke="#000").encode(
x=alt.X("start:Q"),
x2=alt.X2("end"),
y=alt.Y("y:Q"),
y2=alt.Y2("offset"),
fill=alt.Fill("commonwealth:N", legend=None, scale=alt.Scale(range=["black", "white"]))
)
top_text = base_monarchs.mark_text(**{"yOffset": 14, "fontSize": 9, "fontStyle": "italic"}).encode(
x=alt.X("x:Q"),
y=alt.Y("off2:Q"),
text=alt.Text("name:N")
)
(bars + area + area_line_1 + area_line_2 + top_bars + top_text).properties(
width=900, height=400
).configure_axis(
title=None, gridColor="white", gridOpacity=0.25, domain=False
).configure_view(
stroke="transparent"
)
|
ENH: Add William Playfair Wheat and Wages example"""
Wheat and Wages
---------------
A recreation of William Playfair's classic chart visualizing
the price of wheat, the wages of a mechanic, and the reigning British monarch.
"""
# category: case studies
import altair as alt
from vega_datasets import data
base_wheat = alt.Chart(data.wheat.url).transform_calculate(
year_end="+datum.year + 5")
base_monarchs = alt.Chart(data.monarchs.url).transform_calculate(
offset="((!datum.commonwealth && datum.index % 2) ? -1: 1) * 2 + 95",
off2="((!datum.commonwealth && datum.index % 2) ? -1: 1) + 95",
y="95",
x="+datum.start + (+datum.end - +datum.start)/2"
)
bars = base_wheat.mark_bar(**{"fill": "#aaa", "stroke": "#999"}).encode(
x=alt.X("year:Q", axis=alt.Axis(format='d', tickCount=5)),
y=alt.Y("wheat:Q", axis=alt.Axis(zindex=1)),
x2=alt.X2("year_end")
)
area = base_wheat.mark_area(**{"color": "#a4cedb", "opacity": 0.7}).encode(
x=alt.X("year:Q"),
y=alt.Y("wages:Q")
)
area_line_1 = area.mark_line(**{"color": "#000", "opacity": 0.7})
area_line_2 = area.mark_line(**{"yOffset": -2, "color": "#EE8182"})
top_bars = base_monarchs.mark_bar(stroke="#000").encode(
x=alt.X("start:Q"),
x2=alt.X2("end"),
y=alt.Y("y:Q"),
y2=alt.Y2("offset"),
fill=alt.Fill("commonwealth:N", legend=None, scale=alt.Scale(range=["black", "white"]))
)
top_text = base_monarchs.mark_text(**{"yOffset": 14, "fontSize": 9, "fontStyle": "italic"}).encode(
x=alt.X("x:Q"),
y=alt.Y("off2:Q"),
text=alt.Text("name:N")
)
(bars + area + area_line_1 + area_line_2 + top_bars + top_text).properties(
width=900, height=400
).configure_axis(
title=None, gridColor="white", gridOpacity=0.25, domain=False
).configure_view(
stroke="transparent"
)
|
<commit_before><commit_msg>ENH: Add William Playfair Wheat and Wages example<commit_after>"""
Wheat and Wages
---------------
A recreation of William Playfair's classic chart visualizing
the price of wheat, the wages of a mechanic, and the reigning British monarch.
"""
# category: case studies
import altair as alt
from vega_datasets import data
base_wheat = alt.Chart(data.wheat.url).transform_calculate(
year_end="+datum.year + 5")
base_monarchs = alt.Chart(data.monarchs.url).transform_calculate(
offset="((!datum.commonwealth && datum.index % 2) ? -1: 1) * 2 + 95",
off2="((!datum.commonwealth && datum.index % 2) ? -1: 1) + 95",
y="95",
x="+datum.start + (+datum.end - +datum.start)/2"
)
bars = base_wheat.mark_bar(**{"fill": "#aaa", "stroke": "#999"}).encode(
x=alt.X("year:Q", axis=alt.Axis(format='d', tickCount=5)),
y=alt.Y("wheat:Q", axis=alt.Axis(zindex=1)),
x2=alt.X2("year_end")
)
area = base_wheat.mark_area(**{"color": "#a4cedb", "opacity": 0.7}).encode(
x=alt.X("year:Q"),
y=alt.Y("wages:Q")
)
area_line_1 = area.mark_line(**{"color": "#000", "opacity": 0.7})
area_line_2 = area.mark_line(**{"yOffset": -2, "color": "#EE8182"})
top_bars = base_monarchs.mark_bar(stroke="#000").encode(
x=alt.X("start:Q"),
x2=alt.X2("end"),
y=alt.Y("y:Q"),
y2=alt.Y2("offset"),
fill=alt.Fill("commonwealth:N", legend=None, scale=alt.Scale(range=["black", "white"]))
)
top_text = base_monarchs.mark_text(**{"yOffset": 14, "fontSize": 9, "fontStyle": "italic"}).encode(
x=alt.X("x:Q"),
y=alt.Y("off2:Q"),
text=alt.Text("name:N")
)
(bars + area + area_line_1 + area_line_2 + top_bars + top_text).properties(
width=900, height=400
).configure_axis(
title=None, gridColor="white", gridOpacity=0.25, domain=False
).configure_view(
stroke="transparent"
)
|
|
8e8c1b326c71ad1e2810bc806d443bf3e9e0a8ed
|
csunplugged/utils/errors/TextBoxDrawerErrors.py
|
csunplugged/utils/errors/TextBoxDrawerErrors.py
|
class TextBoxDrawerError(Exception):
pass
class MissingSVGFile(TextBoxDrawerError):
pass
class TextBoxNotFoundInSVG(TextBoxDrawerError):
pass
class MissingSVGViewBox(TextBoxDrawerError):
pass
|
Add custom exceptions for TextBoxDrawer
|
Add custom exceptions for TextBoxDrawer
This is a skeleton for now, with docstrings etc. to follow
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add custom exceptions for TextBoxDrawer
This is a skeleton for now, with docstrings etc. to follow
|
class TextBoxDrawerError(Exception):
pass
class MissingSVGFile(TextBoxDrawerError):
pass
class TextBoxNotFoundInSVG(TextBoxDrawerError):
pass
class MissingSVGViewBox(TextBoxDrawerError):
pass
|
<commit_before><commit_msg>Add custom exceptions for TextBoxDrawer
This is a skeleton for now, with docstrings etc. to follow<commit_after>
|
class TextBoxDrawerError(Exception):
pass
class MissingSVGFile(TextBoxDrawerError):
pass
class TextBoxNotFoundInSVG(TextBoxDrawerError):
pass
class MissingSVGViewBox(TextBoxDrawerError):
pass
|
Add custom exceptions for TextBoxDrawer
This is a skeleton for now, with docstrings etc. to followclass TextBoxDrawerError(Exception):
pass
class MissingSVGFile(TextBoxDrawerError):
pass
class TextBoxNotFoundInSVG(TextBoxDrawerError):
pass
class MissingSVGViewBox(TextBoxDrawerError):
pass
|
<commit_before><commit_msg>Add custom exceptions for TextBoxDrawer
This is a skeleton for now, with docstrings etc. to follow<commit_after>class TextBoxDrawerError(Exception):
pass
class MissingSVGFile(TextBoxDrawerError):
pass
class TextBoxNotFoundInSVG(TextBoxDrawerError):
pass
class MissingSVGViewBox(TextBoxDrawerError):
pass
|
|
97fe29ff36438fb2e39b24d518bccada54371d6f
|
extra/vwtags.py
|
extra/vwtags.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
if len(sys.argv) < 3:
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
result = []
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.split('|')[0].strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h" if not '|' in line else "v"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
result.append([cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope])
for i in range(len(result)):
if i != len(result) - 1:
if len(result[i+1][5]) <= len(result[i][5]) and len(result[i][5]) != 0:
result[i][3] = 'i'
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
result[i][0],
result[i][1],
result[i][2],
result[i][3],
result[i][4],
result[i][5],
))
|
Add script to generate tags, taken from vimwiki/utils
|
Add script to generate tags, taken from vimwiki/utils
|
Python
|
mit
|
phha/taskwiki,Spirotot/taskwiki
|
Add script to generate tags, taken from vimwiki/utils
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
if len(sys.argv) < 3:
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
result = []
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.split('|')[0].strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h" if not '|' in line else "v"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
result.append([cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope])
for i in range(len(result)):
if i != len(result) - 1:
if len(result[i+1][5]) <= len(result[i][5]) and len(result[i][5]) != 0:
result[i][3] = 'i'
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
result[i][0],
result[i][1],
result[i][2],
result[i][3],
result[i][4],
result[i][5],
))
|
<commit_before><commit_msg>Add script to generate tags, taken from vimwiki/utils<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
if len(sys.argv) < 3:
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
result = []
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.split('|')[0].strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h" if not '|' in line else "v"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
result.append([cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope])
for i in range(len(result)):
if i != len(result) - 1:
if len(result[i+1][5]) <= len(result[i][5]) and len(result[i][5]) != 0:
result[i][3] = 'i'
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
result[i][0],
result[i][1],
result[i][2],
result[i][3],
result[i][4],
result[i][5],
))
|
Add script to generate tags, taken from vimwiki/utils#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
if len(sys.argv) < 3:
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
result = []
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.split('|')[0].strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h" if not '|' in line else "v"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
result.append([cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope])
for i in range(len(result)):
if i != len(result) - 1:
if len(result[i+1][5]) <= len(result[i][5]) and len(result[i][5]) != 0:
result[i][3] = 'i'
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
result[i][0],
result[i][1],
result[i][2],
result[i][3],
result[i][4],
result[i][5],
))
|
<commit_before><commit_msg>Add script to generate tags, taken from vimwiki/utils<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import re
if len(sys.argv) < 3:
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
result = []
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.split('|')[0].strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h" if not '|' in line else "v"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
result.append([cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope])
for i in range(len(result)):
if i != len(result) - 1:
if len(result[i+1][5]) <= len(result[i][5]) and len(result[i][5]) != 0:
result[i][3] = 'i'
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
result[i][0],
result[i][1],
result[i][2],
result[i][3],
result[i][4],
result[i][5],
))
|
|
6b7ae3c74708a3ed666e76e6ba779be7423a022d
|
myhdl/test/conversion/numeric/test_numass.py
|
myhdl/test/conversion/numeric/test_numass.py
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
Revert "Revert "Added the number assignment test for numeric.""
|
Revert "Revert "Added the number assignment test for numeric.""
This reverts commit acbd86165ea6c5cd566928292c095ae570aa00ce.
|
Python
|
lgpl-2.1
|
jmgc/myhdl-numeric,jmgc/myhdl-numeric,jmgc/myhdl-numeric
|
Revert "Revert "Added the number assignment test for numeric.""
This reverts commit acbd86165ea6c5cd566928292c095ae570aa00ce.
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
<commit_before><commit_msg>Revert "Revert "Added the number assignment test for numeric.""
This reverts commit acbd86165ea6c5cd566928292c095ae570aa00ce.<commit_after>
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
Revert "Revert "Added the number assignment test for numeric.""
This reverts commit acbd86165ea6c5cd566928292c095ae570aa00ce.from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
<commit_before><commit_msg>Revert "Revert "Added the number assignment test for numeric.""
This reverts commit acbd86165ea6c5cd566928292c095ae570aa00ce.<commit_after>from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
|
ee907d14ab905ca347807d8787c9146cd28f6d7d
|
dit/algorithms/tests/test_lattice.py
|
dit/algorithms/tests/test_lattice.py
|
from nose.tools import *
from dit.algorithms.lattice import sigma_algebra_sort
def test_sigalg_sort():
sigalg = frozenset([
frozenset([]),
frozenset([1]),
frozenset([2]),
frozenset([1,2])
])
sigalg_ = [(), (1,), (2,), (1,2)]
assert_equal( sigalg_, sigma_algebra_sort(sigalg) )
|
Add a test for sorting sigma-algebras.
|
Add a test for sorting sigma-algebras.
|
Python
|
bsd-3-clause
|
Autoplectic/dit,dit/dit,Autoplectic/dit,Autoplectic/dit,dit/dit,dit/dit,chebee7i/dit,Autoplectic/dit,chebee7i/dit,chebee7i/dit,Autoplectic/dit,dit/dit,dit/dit,chebee7i/dit
|
Add a test for sorting sigma-algebras.
|
from nose.tools import *
from dit.algorithms.lattice import sigma_algebra_sort
def test_sigalg_sort():
sigalg = frozenset([
frozenset([]),
frozenset([1]),
frozenset([2]),
frozenset([1,2])
])
sigalg_ = [(), (1,), (2,), (1,2)]
assert_equal( sigalg_, sigma_algebra_sort(sigalg) )
|
<commit_before><commit_msg>Add a test for sorting sigma-algebras.<commit_after>
|
from nose.tools import *
from dit.algorithms.lattice import sigma_algebra_sort
def test_sigalg_sort():
sigalg = frozenset([
frozenset([]),
frozenset([1]),
frozenset([2]),
frozenset([1,2])
])
sigalg_ = [(), (1,), (2,), (1,2)]
assert_equal( sigalg_, sigma_algebra_sort(sigalg) )
|
Add a test for sorting sigma-algebras.
from nose.tools import *
from dit.algorithms.lattice import sigma_algebra_sort
def test_sigalg_sort():
sigalg = frozenset([
frozenset([]),
frozenset([1]),
frozenset([2]),
frozenset([1,2])
])
sigalg_ = [(), (1,), (2,), (1,2)]
assert_equal( sigalg_, sigma_algebra_sort(sigalg) )
|
<commit_before><commit_msg>Add a test for sorting sigma-algebras.<commit_after>
from nose.tools import *
from dit.algorithms.lattice import sigma_algebra_sort
def test_sigalg_sort():
sigalg = frozenset([
frozenset([]),
frozenset([1]),
frozenset([2]),
frozenset([1,2])
])
sigalg_ = [(), (1,), (2,), (1,2)]
assert_equal( sigalg_, sigma_algebra_sort(sigalg) )
|
|
ea7bbb23d8818fbc3c06467e500e6b9f9be85c84
|
dash/orgs/migrations/0026_fix_org_config_rapidpro.py
|
dash/orgs/migrations/0026_fix_org_config_rapidpro.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 12:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def fix_org_config_rapidpro(apps, schema_editor):
Org = apps.get_model("orgs", "Org")
orgs = Org.objects.all()
for org in orgs:
if not org.config:
continue
config = org.config
rapidpro_config = config.get('rapidpro', dict())
del rapidpro_config['api_token']
del rapidpro_config['rapipro'] # remove the mistakenly added key by typo in 0024_populate_org_backend
config['rapidpro'] = rapidpro_config
org.config = config
org.save()
def noop(apps, schema_editor):
pass
dependencies = [
('orgs', '0025_auto_20180322_1415'),
]
operations = [
migrations.RunPython(fix_org_config_rapidpro, noop)
]
|
Add migrations to clean up org config
|
Add migrations to clean up org config
|
Python
|
bsd-3-clause
|
rapidpro/dash,rapidpro/dash
|
Add migrations to clean up org config
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 12:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def fix_org_config_rapidpro(apps, schema_editor):
Org = apps.get_model("orgs", "Org")
orgs = Org.objects.all()
for org in orgs:
if not org.config:
continue
config = org.config
rapidpro_config = config.get('rapidpro', dict())
del rapidpro_config['api_token']
del rapidpro_config['rapipro'] # remove the mistakenly added key by typo in 0024_populate_org_backend
config['rapidpro'] = rapidpro_config
org.config = config
org.save()
def noop(apps, schema_editor):
pass
dependencies = [
('orgs', '0025_auto_20180322_1415'),
]
operations = [
migrations.RunPython(fix_org_config_rapidpro, noop)
]
|
<commit_before><commit_msg>Add migrations to clean up org config<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 12:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def fix_org_config_rapidpro(apps, schema_editor):
Org = apps.get_model("orgs", "Org")
orgs = Org.objects.all()
for org in orgs:
if not org.config:
continue
config = org.config
rapidpro_config = config.get('rapidpro', dict())
del rapidpro_config['api_token']
del rapidpro_config['rapipro'] # remove the mistakenly added key by typo in 0024_populate_org_backend
config['rapidpro'] = rapidpro_config
org.config = config
org.save()
def noop(apps, schema_editor):
pass
dependencies = [
('orgs', '0025_auto_20180322_1415'),
]
operations = [
migrations.RunPython(fix_org_config_rapidpro, noop)
]
|
Add migrations to clean up org config# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 12:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def fix_org_config_rapidpro(apps, schema_editor):
Org = apps.get_model("orgs", "Org")
orgs = Org.objects.all()
for org in orgs:
if not org.config:
continue
config = org.config
rapidpro_config = config.get('rapidpro', dict())
del rapidpro_config['api_token']
del rapidpro_config['rapipro'] # remove the mistakenly added key by typo in 0024_populate_org_backend
config['rapidpro'] = rapidpro_config
org.config = config
org.save()
def noop(apps, schema_editor):
pass
dependencies = [
('orgs', '0025_auto_20180322_1415'),
]
operations = [
migrations.RunPython(fix_org_config_rapidpro, noop)
]
|
<commit_before><commit_msg>Add migrations to clean up org config<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-27 12:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def fix_org_config_rapidpro(apps, schema_editor):
Org = apps.get_model("orgs", "Org")
orgs = Org.objects.all()
for org in orgs:
if not org.config:
continue
config = org.config
rapidpro_config = config.get('rapidpro', dict())
del rapidpro_config['api_token']
del rapidpro_config['rapipro'] # remove the mistakenly added key by typo in 0024_populate_org_backend
config['rapidpro'] = rapidpro_config
org.config = config
org.save()
def noop(apps, schema_editor):
pass
dependencies = [
('orgs', '0025_auto_20180322_1415'),
]
operations = [
migrations.RunPython(fix_org_config_rapidpro, noop)
]
|
|
f2fb791df5023a39c82561ceb79b92b0b584e5af
|
scripts/fetch_data.py
|
scripts/fetch_data.py
|
import json
import requests
import dataset
def fetch_builds_data(jenkins_url):
'''Get json data of all Jenkins builds
JENKINS_URL/api/json?
tree=jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]
Return builds_data dictionary with following schema::
[
{
"name" : <job_name>,
"builds" : [
{
"duration" : <build_duration>,
"timestamp" : <build_timestamp>,
"id" : <build_id>,
"number" : <build_number>,
"result" : <build_result>,
"builtOn" : <node>,
"fullDisplayName": <build_display_name>,
}
],
...
},
...
]
'''
params = {}
params['tree'] = 'jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]'
r = requests.get(
"%s/api/json" % jenkins_url,
params=params
)
builds_data = json.loads(r.text)["jobs"]
return builds_data
def store_builds_data(builds_data, dbname):
'''Saves builds_data in SQLite database
Database has one table `builds` with following columns::
index = 5
builtOn = slave01.example.com
name = check_shell
timestamp = 1432739306340
number = 113
id = 2015-05-27_15-08-26
result = SUCCESS
duration = 3796
fullDisplayName = check_shell #113
'''
db = dataset.connect('sqlite:///%s' % dbname)
table = db.get_table('builds', primary_id='index')
db.begin()
for job_entry in builds_data:
for build in job_entry['builds']:
# FIXME: Build DB is append-only, thus, for efficiency, we
# should process only new builds with timestamp later than
# the last one which alredy exists in db
build['name'] = job_entry['name']
table.upsert(build, ['name','number'])
db.commit()
return len(db['builds'])
def update_builds_db(dbname, source_file=None, source_url=None):
if source_file:
with open(source_file, 'r') as f:
builds_data = json.loads(f.readline())['jobs']
elif source_url:
builds_data = fetch_builds_data(source_url)
else:
raise ValueError("No URL and no source file specified")
return store_builds_data(builds_data, dbname)
if __name__ == '__main__':
update_builds_db(source_file='test.data', dbname='test.db')
|
Add script to fetch builds from Jenkins
|
Add script to fetch builds from Jenkins
- uses dataset library to store data in sqlite database,
- supports fetching data from Jenkins via JSON API or from file.
|
Python
|
mit
|
bookwar/jenkins-report,bookwar/jenkins-report
|
Add script to fetch builds from Jenkins
- uses dataset library to store data in sqlite database,
- supports fetching data from Jenkins via JSON API or from file.
|
import json
import requests
import dataset
def fetch_builds_data(jenkins_url):
'''Get json data of all Jenkins builds
JENKINS_URL/api/json?
tree=jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]
Return builds_data dictionary with following schema::
[
{
"name" : <job_name>,
"builds" : [
{
"duration" : <build_duration>,
"timestamp" : <build_timestamp>,
"id" : <build_id>,
"number" : <build_number>,
"result" : <build_result>,
"builtOn" : <node>,
"fullDisplayName": <build_display_name>,
}
],
...
},
...
]
'''
params = {}
params['tree'] = 'jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]'
r = requests.get(
"%s/api/json" % jenkins_url,
params=params
)
builds_data = json.loads(r.text)["jobs"]
return builds_data
def store_builds_data(builds_data, dbname):
'''Saves builds_data in SQLite database
Database has one table `builds` with following columns::
index = 5
builtOn = slave01.example.com
name = check_shell
timestamp = 1432739306340
number = 113
id = 2015-05-27_15-08-26
result = SUCCESS
duration = 3796
fullDisplayName = check_shell #113
'''
db = dataset.connect('sqlite:///%s' % dbname)
table = db.get_table('builds', primary_id='index')
db.begin()
for job_entry in builds_data:
for build in job_entry['builds']:
# FIXME: Build DB is append-only, thus, for efficiency, we
# should process only new builds with timestamp later than
# the last one which alredy exists in db
build['name'] = job_entry['name']
table.upsert(build, ['name','number'])
db.commit()
return len(db['builds'])
def update_builds_db(dbname, source_file=None, source_url=None):
if source_file:
with open(source_file, 'r') as f:
builds_data = json.loads(f.readline())['jobs']
elif source_url:
builds_data = fetch_builds_data(source_url)
else:
raise ValueError("No URL and no source file specified")
return store_builds_data(builds_data, dbname)
if __name__ == '__main__':
update_builds_db(source_file='test.data', dbname='test.db')
|
<commit_before><commit_msg>Add script to fetch builds from Jenkins
- uses dataset library to store data in sqlite database,
- supports fetching data from Jenkins via JSON API or from file.<commit_after>
|
import json
import requests
import dataset
def fetch_builds_data(jenkins_url):
'''Get json data of all Jenkins builds
JENKINS_URL/api/json?
tree=jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]
Return builds_data dictionary with following schema::
[
{
"name" : <job_name>,
"builds" : [
{
"duration" : <build_duration>,
"timestamp" : <build_timestamp>,
"id" : <build_id>,
"number" : <build_number>,
"result" : <build_result>,
"builtOn" : <node>,
"fullDisplayName": <build_display_name>,
}
],
...
},
...
]
'''
params = {}
params['tree'] = 'jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]'
r = requests.get(
"%s/api/json" % jenkins_url,
params=params
)
builds_data = json.loads(r.text)["jobs"]
return builds_data
def store_builds_data(builds_data, dbname):
'''Saves builds_data in SQLite database
Database has one table `builds` with following columns::
index = 5
builtOn = slave01.example.com
name = check_shell
timestamp = 1432739306340
number = 113
id = 2015-05-27_15-08-26
result = SUCCESS
duration = 3796
fullDisplayName = check_shell #113
'''
db = dataset.connect('sqlite:///%s' % dbname)
table = db.get_table('builds', primary_id='index')
db.begin()
for job_entry in builds_data:
for build in job_entry['builds']:
# FIXME: Build DB is append-only, thus, for efficiency, we
# should process only new builds with timestamp later than
# the last one which alredy exists in db
build['name'] = job_entry['name']
table.upsert(build, ['name','number'])
db.commit()
return len(db['builds'])
def update_builds_db(dbname, source_file=None, source_url=None):
if source_file:
with open(source_file, 'r') as f:
builds_data = json.loads(f.readline())['jobs']
elif source_url:
builds_data = fetch_builds_data(source_url)
else:
raise ValueError("No URL and no source file specified")
return store_builds_data(builds_data, dbname)
if __name__ == '__main__':
update_builds_db(source_file='test.data', dbname='test.db')
|
Add script to fetch builds from Jenkins
- uses dataset library to store data in sqlite database,
- supports fetching data from Jenkins via JSON API or from file.import json
import requests
import dataset
def fetch_builds_data(jenkins_url):
'''Get json data of all Jenkins builds
JENKINS_URL/api/json?
tree=jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]
Return builds_data dictionary with following schema::
[
{
"name" : <job_name>,
"builds" : [
{
"duration" : <build_duration>,
"timestamp" : <build_timestamp>,
"id" : <build_id>,
"number" : <build_number>,
"result" : <build_result>,
"builtOn" : <node>,
"fullDisplayName": <build_display_name>,
}
],
...
},
...
]
'''
params = {}
params['tree'] = 'jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]'
r = requests.get(
"%s/api/json" % jenkins_url,
params=params
)
builds_data = json.loads(r.text)["jobs"]
return builds_data
def store_builds_data(builds_data, dbname):
'''Saves builds_data in SQLite database
Database has one table `builds` with following columns::
index = 5
builtOn = slave01.example.com
name = check_shell
timestamp = 1432739306340
number = 113
id = 2015-05-27_15-08-26
result = SUCCESS
duration = 3796
fullDisplayName = check_shell #113
'''
db = dataset.connect('sqlite:///%s' % dbname)
table = db.get_table('builds', primary_id='index')
db.begin()
for job_entry in builds_data:
for build in job_entry['builds']:
# FIXME: Build DB is append-only, thus, for efficiency, we
# should process only new builds with timestamp later than
# the last one which alredy exists in db
build['name'] = job_entry['name']
table.upsert(build, ['name','number'])
db.commit()
return len(db['builds'])
def update_builds_db(dbname, source_file=None, source_url=None):
if source_file:
with open(source_file, 'r') as f:
builds_data = json.loads(f.readline())['jobs']
elif source_url:
builds_data = fetch_builds_data(source_url)
else:
raise ValueError("No URL and no source file specified")
return store_builds_data(builds_data, dbname)
if __name__ == '__main__':
update_builds_db(source_file='test.data', dbname='test.db')
|
<commit_before><commit_msg>Add script to fetch builds from Jenkins
- uses dataset library to store data in sqlite database,
- supports fetching data from Jenkins via JSON API or from file.<commit_after>import json
import requests
import dataset
def fetch_builds_data(jenkins_url):
'''Get json data of all Jenkins builds
JENKINS_URL/api/json?
tree=jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]
Return builds_data dictionary with following schema::
[
{
"name" : <job_name>,
"builds" : [
{
"duration" : <build_duration>,
"timestamp" : <build_timestamp>,
"id" : <build_id>,
"number" : <build_number>,
"result" : <build_result>,
"builtOn" : <node>,
"fullDisplayName": <build_display_name>,
}
],
...
},
...
]
'''
params = {}
params['tree'] = 'jobs[name,builds[number,result,duration,builtOn,id,timestamp,fullDisplayName]]'
r = requests.get(
"%s/api/json" % jenkins_url,
params=params
)
builds_data = json.loads(r.text)["jobs"]
return builds_data
def store_builds_data(builds_data, dbname):
'''Saves builds_data in SQLite database
Database has one table `builds` with following columns::
index = 5
builtOn = slave01.example.com
name = check_shell
timestamp = 1432739306340
number = 113
id = 2015-05-27_15-08-26
result = SUCCESS
duration = 3796
fullDisplayName = check_shell #113
'''
db = dataset.connect('sqlite:///%s' % dbname)
table = db.get_table('builds', primary_id='index')
db.begin()
for job_entry in builds_data:
for build in job_entry['builds']:
# FIXME: Build DB is append-only, thus, for efficiency, we
# should process only new builds with timestamp later than
# the last one which alredy exists in db
build['name'] = job_entry['name']
table.upsert(build, ['name','number'])
db.commit()
return len(db['builds'])
def update_builds_db(dbname, source_file=None, source_url=None):
if source_file:
with open(source_file, 'r') as f:
builds_data = json.loads(f.readline())['jobs']
elif source_url:
builds_data = fetch_builds_data(source_url)
else:
raise ValueError("No URL and no source file specified")
return store_builds_data(builds_data, dbname)
if __name__ == '__main__':
update_builds_db(source_file='test.data', dbname='test.db')
|
|
632f29f25e2e639ba3d41e0c12443a7e208cfd9c
|
web/ext/debug/__init__.py
|
web/ext/debug/__init__.py
|
# encoding: utf-8
"""Interactive tracebacks for WebCore."""
class DebuggingExtension(object):
provides = ['debug']
last = True
def __init__(self):
"""Executed to configure the extension."""
super(DebuggingExtension, self).__init__()
def __call__(self, context, app):
"""Executed to wrap the application in middleware."""
pass
|
Debug extension plan is once again an interactive debugger.
|
Debug extension plan is once again an interactive debugger.
|
Python
|
mit
|
marrow/WebCore,marrow/WebCore
|
Debug extension plan is once again an interactive debugger.
|
# encoding: utf-8
"""Interactive tracebacks for WebCore."""
class DebuggingExtension(object):
provides = ['debug']
last = True
def __init__(self):
"""Executed to configure the extension."""
super(DebuggingExtension, self).__init__()
def __call__(self, context, app):
"""Executed to wrap the application in middleware."""
pass
|
<commit_before><commit_msg>Debug extension plan is once again an interactive debugger.<commit_after>
|
# encoding: utf-8
"""Interactive tracebacks for WebCore."""
class DebuggingExtension(object):
provides = ['debug']
last = True
def __init__(self):
"""Executed to configure the extension."""
super(DebuggingExtension, self).__init__()
def __call__(self, context, app):
"""Executed to wrap the application in middleware."""
pass
|
Debug extension plan is once again an interactive debugger.# encoding: utf-8
"""Interactive tracebacks for WebCore."""
class DebuggingExtension(object):
provides = ['debug']
last = True
def __init__(self):
"""Executed to configure the extension."""
super(DebuggingExtension, self).__init__()
def __call__(self, context, app):
"""Executed to wrap the application in middleware."""
pass
|
<commit_before><commit_msg>Debug extension plan is once again an interactive debugger.<commit_after># encoding: utf-8
"""Interactive tracebacks for WebCore."""
class DebuggingExtension(object):
provides = ['debug']
last = True
def __init__(self):
"""Executed to configure the extension."""
super(DebuggingExtension, self).__init__()
def __call__(self, context, app):
"""Executed to wrap the application in middleware."""
pass
|
|
82574e953dcb2ff3bd47b7ae1a70d956a06633de
|
examples/demo/basic/scatter_alpha.py
|
examples/demo/basic/scatter_alpha.py
|
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import DelegatesTo, HasTraits, Instance
from traitsui.api import Item, Group, View, RangeEditor
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
scatter_renderer = Instance(Component)
alpha = DelegatesTo('scatter_renderer')
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
Group(
Item('alpha', editor=RangeEditor(low=0.0, high=1.0)),
),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
def _scatter_renderer_default(self):
plot = self.plot
renderer = plot.plots.values()[0][0]
print renderer
return renderer
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
Add example which demonstrates issue (works in this branch, fails otherwise).
|
Add example which demonstrates issue (works in this branch, fails otherwise).
|
Python
|
bsd-3-clause
|
tommy-u/chaco,tommy-u/chaco,tommy-u/chaco
|
Add example which demonstrates issue (works in this branch, fails otherwise).
|
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import DelegatesTo, HasTraits, Instance
from traitsui.api import Item, Group, View, RangeEditor
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
scatter_renderer = Instance(Component)
alpha = DelegatesTo('scatter_renderer')
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
Group(
Item('alpha', editor=RangeEditor(low=0.0, high=1.0)),
),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
def _scatter_renderer_default(self):
plot = self.plot
renderer = plot.plots.values()[0][0]
print renderer
return renderer
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
<commit_before><commit_msg>Add example which demonstrates issue (works in this branch, fails otherwise).<commit_after>
|
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import DelegatesTo, HasTraits, Instance
from traitsui.api import Item, Group, View, RangeEditor
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
scatter_renderer = Instance(Component)
alpha = DelegatesTo('scatter_renderer')
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
Group(
Item('alpha', editor=RangeEditor(low=0.0, high=1.0)),
),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
def _scatter_renderer_default(self):
plot = self.plot
renderer = plot.plots.values()[0][0]
print renderer
return renderer
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
Add example which demonstrates issue (works in this branch, fails otherwise)."""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import DelegatesTo, HasTraits, Instance
from traitsui.api import Item, Group, View, RangeEditor
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
scatter_renderer = Instance(Component)
alpha = DelegatesTo('scatter_renderer')
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
Group(
Item('alpha', editor=RangeEditor(low=0.0, high=1.0)),
),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
def _scatter_renderer_default(self):
plot = self.plot
renderer = plot.plots.values()[0][0]
print renderer
return renderer
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
<commit_before><commit_msg>Add example which demonstrates issue (works in this branch, fails otherwise).<commit_after>"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import DelegatesTo, HasTraits, Instance
from traitsui.api import Item, Group, View, RangeEditor
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
scatter_renderer = Instance(Component)
alpha = DelegatesTo('scatter_renderer')
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
Group(
Item('alpha', editor=RangeEditor(low=0.0, high=1.0)),
),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
def _scatter_renderer_default(self):
plot = self.plot
renderer = plot.plots.values()[0][0]
print renderer
return renderer
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
|
3ab8796bef7900a8f8799d85cecbd37c7db259db
|
ghnames.py
|
ghnames.py
|
# Run this script with `python ghnames.py` until it is correct then
# run `python ghnames.py >> _config.yml` to add it its output to the
# end of our _config.yml. All students will be added as site authors.
# In Python """ starts a string that can span multiple lines
# Add student github names here.
names = """"""
# The data format our blog uses is called YAML. It's a common way of
# getting simple data into a program that processes text.
for name in sorted(names.split("\n"), key=str.lower): # the .split() method makes a list out of a string, split at a character. The sorted() function alphabetizes them.
print """ {0}:
name: {0}
prof: false
gravatar:
website:
github: {0}
twitter:
about: "Here's a little about {0}"
""".format(name) # The format method inserts the nth argument at {n} (starting with the zeroth)
|
Add student author names script
|
Add student author names script
|
Python
|
mit
|
silshack/summer2017,silshack/summer2017,silshack/summer2017,silshack/summer2017,silshack/summer2017
|
Add student author names script
|
# Run this script with `python ghnames.py` until it is correct then
# run `python ghnames.py >> _config.yml` to add it its output to the
# end of our _config.yml. All students will be added as site authors.
# In Python """ starts a string that can span multiple lines
# Add student github names here.
names = """"""
# The data format our blog uses is called YAML. It's a common way of
# getting simple data into a program that processes text.
for name in sorted(names.split("\n"), key=str.lower): # the .split() method makes a list out of a string, split at a character. The sorted() function alphabetizes them.
print """ {0}:
name: {0}
prof: false
gravatar:
website:
github: {0}
twitter:
about: "Here's a little about {0}"
""".format(name) # The format method inserts the nth argument at {n} (starting with the zeroth)
|
<commit_before><commit_msg>Add student author names script<commit_after>
|
# Run this script with `python ghnames.py` until it is correct then
# run `python ghnames.py >> _config.yml` to add it its output to the
# end of our _config.yml. All students will be added as site authors.
# In Python """ starts a string that can span multiple lines
# Add student github names here.
names = """"""
# The data format our blog uses is called YAML. It's a common way of
# getting simple data into a program that processes text.
for name in sorted(names.split("\n"), key=str.lower): # the .split() method makes a list out of a string, split at a character. The sorted() function alphabetizes them.
print """ {0}:
name: {0}
prof: false
gravatar:
website:
github: {0}
twitter:
about: "Here's a little about {0}"
""".format(name) # The format method inserts the nth argument at {n} (starting with the zeroth)
|
Add student author names script# Run this script with `python ghnames.py` until it is correct then
# run `python ghnames.py >> _config.yml` to add it its output to the
# end of our _config.yml. All students will be added as site authors.
# In Python """ starts a string that can span multiple lines
# Add student github names here.
names = """"""
# The data format our blog uses is called YAML. It's a common way of
# getting simple data into a program that processes text.
for name in sorted(names.split("\n"), key=str.lower): # the .split() method makes a list out of a string, split at a character. The sorted() function alphabetizes them.
print """ {0}:
name: {0}
prof: false
gravatar:
website:
github: {0}
twitter:
about: "Here's a little about {0}"
""".format(name) # The format method inserts the nth argument at {n} (starting with the zeroth)
|
<commit_before><commit_msg>Add student author names script<commit_after># Run this script with `python ghnames.py` until it is correct then
# run `python ghnames.py >> _config.yml` to add it its output to the
# end of our _config.yml. All students will be added as site authors.
# In Python """ starts a string that can span multiple lines
# Add student github names here.
names = """"""
# The data format our blog uses is called YAML. It's a common way of
# getting simple data into a program that processes text.
for name in sorted(names.split("\n"), key=str.lower): # the .split() method makes a list out of a string, split at a character. The sorted() function alphabetizes them.
print """ {0}:
name: {0}
prof: false
gravatar:
website:
github: {0}
twitter:
about: "Here's a little about {0}"
""".format(name) # The format method inserts the nth argument at {n} (starting with the zeroth)
|
|
e90d0c585dde96a780dbd1f4109d03dba651b9c2
|
extension/test/server/right/system_tests_preferred_masters.py
|
extension/test/server/right/system_tests_preferred_masters.py
|
"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
from .. import system_tests_common as Common
import nose.tools as NT
import time
import logging
@Common.with_custom_setup(Common.setup_3_nodes, Common.basic_teardown)
def test_prefered_master():
cluster = Common.q.manage.arakoon.getCluster(Common.cluster_id)
cluster.stop()
pm = Common.node_names[0]
cluster.preferredMasters([pm])
cluster.start()
Common.assert_running_nodes(3)
time.sleep(4.0 * Common.lease_duration)
logging.info("all should be well")
client = Common.get_client()
master = client.whoMaster()
NT.assert_equals(pm, master)
logging.info("master indeed is `%s`", pm)
client.dropConnections()
del client
cluster.stopOne(pm)
delay = 8.0 * Common.lease_duration
logging.info("stopped master, waiting for things to settle (%fs)", delay)
time.sleep(delay)
client = Common.get_client()
master2 = client.whoMaster()
logging.info("master2 = %s",master2)
NT.assert_equals(True, master2 != pm)
logging.info("node %s took over, now restarting preferred master", master2)
cluster.startOne(pm)
logging.info("waiting for things to settle")
time.sleep(4.0* Common.lease_duration)
client = Common.get_client()
master3 = client.whoMaster()
logging.info("master3 = %s", master3)
NT.assert_equals(master3,pm)
|
Add first test for 'preferred_masters'
|
Tests: Add first test for 'preferred_masters'
This commit adds a first test for 'preferred_masters', by copying the
existing 'system_tests_preferred' test, and changing it slightly to use
'preferredMasters' instead of 'forceMaster' with 'preferred=True'.
See: 98d74321f5d7c2f54d5faaba85f379b2a572c35c
See: d8a6072cbaf5ef66b5229035c6a60c97b3509347
See: ARAKOON-360
See: http://jira.incubaid.com/browse/ARAKOON-360
|
Python
|
apache-2.0
|
openvstorage/arakoon,openvstorage/arakoon,Incubaid/arakoon,Incubaid/arakoon,Incubaid/arakoon,openvstorage/arakoon
|
Tests: Add first test for 'preferred_masters'
This commit adds a first test for 'preferred_masters', by copying the
existing 'system_tests_preferred' test, and changing it slightly to use
'preferredMasters' instead of 'forceMaster' with 'preferred=True'.
See: 98d74321f5d7c2f54d5faaba85f379b2a572c35c
See: d8a6072cbaf5ef66b5229035c6a60c97b3509347
See: ARAKOON-360
See: http://jira.incubaid.com/browse/ARAKOON-360
|
"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
from .. import system_tests_common as Common
import nose.tools as NT
import time
import logging
@Common.with_custom_setup(Common.setup_3_nodes, Common.basic_teardown)
def test_prefered_master():
cluster = Common.q.manage.arakoon.getCluster(Common.cluster_id)
cluster.stop()
pm = Common.node_names[0]
cluster.preferredMasters([pm])
cluster.start()
Common.assert_running_nodes(3)
time.sleep(4.0 * Common.lease_duration)
logging.info("all should be well")
client = Common.get_client()
master = client.whoMaster()
NT.assert_equals(pm, master)
logging.info("master indeed is `%s`", pm)
client.dropConnections()
del client
cluster.stopOne(pm)
delay = 8.0 * Common.lease_duration
logging.info("stopped master, waiting for things to settle (%fs)", delay)
time.sleep(delay)
client = Common.get_client()
master2 = client.whoMaster()
logging.info("master2 = %s",master2)
NT.assert_equals(True, master2 != pm)
logging.info("node %s took over, now restarting preferred master", master2)
cluster.startOne(pm)
logging.info("waiting for things to settle")
time.sleep(4.0* Common.lease_duration)
client = Common.get_client()
master3 = client.whoMaster()
logging.info("master3 = %s", master3)
NT.assert_equals(master3,pm)
|
<commit_before><commit_msg>Tests: Add first test for 'preferred_masters'
This commit adds a first test for 'preferred_masters', by copying the
existing 'system_tests_preferred' test, and changing it slightly to use
'preferredMasters' instead of 'forceMaster' with 'preferred=True'.
See: 98d74321f5d7c2f54d5faaba85f379b2a572c35c
See: d8a6072cbaf5ef66b5229035c6a60c97b3509347
See: ARAKOON-360
See: http://jira.incubaid.com/browse/ARAKOON-360<commit_after>
|
"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
from .. import system_tests_common as Common
import nose.tools as NT
import time
import logging
@Common.with_custom_setup(Common.setup_3_nodes, Common.basic_teardown)
def test_prefered_master():
cluster = Common.q.manage.arakoon.getCluster(Common.cluster_id)
cluster.stop()
pm = Common.node_names[0]
cluster.preferredMasters([pm])
cluster.start()
Common.assert_running_nodes(3)
time.sleep(4.0 * Common.lease_duration)
logging.info("all should be well")
client = Common.get_client()
master = client.whoMaster()
NT.assert_equals(pm, master)
logging.info("master indeed is `%s`", pm)
client.dropConnections()
del client
cluster.stopOne(pm)
delay = 8.0 * Common.lease_duration
logging.info("stopped master, waiting for things to settle (%fs)", delay)
time.sleep(delay)
client = Common.get_client()
master2 = client.whoMaster()
logging.info("master2 = %s",master2)
NT.assert_equals(True, master2 != pm)
logging.info("node %s took over, now restarting preferred master", master2)
cluster.startOne(pm)
logging.info("waiting for things to settle")
time.sleep(4.0* Common.lease_duration)
client = Common.get_client()
master3 = client.whoMaster()
logging.info("master3 = %s", master3)
NT.assert_equals(master3,pm)
|
Tests: Add first test for 'preferred_masters'
This commit adds a first test for 'preferred_masters', by copying the
existing 'system_tests_preferred' test, and changing it slightly to use
'preferredMasters' instead of 'forceMaster' with 'preferred=True'.
See: 98d74321f5d7c2f54d5faaba85f379b2a572c35c
See: d8a6072cbaf5ef66b5229035c6a60c97b3509347
See: ARAKOON-360
See: http://jira.incubaid.com/browse/ARAKOON-360"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
from .. import system_tests_common as Common
import nose.tools as NT
import time
import logging
@Common.with_custom_setup(Common.setup_3_nodes, Common.basic_teardown)
def test_prefered_master():
cluster = Common.q.manage.arakoon.getCluster(Common.cluster_id)
cluster.stop()
pm = Common.node_names[0]
cluster.preferredMasters([pm])
cluster.start()
Common.assert_running_nodes(3)
time.sleep(4.0 * Common.lease_duration)
logging.info("all should be well")
client = Common.get_client()
master = client.whoMaster()
NT.assert_equals(pm, master)
logging.info("master indeed is `%s`", pm)
client.dropConnections()
del client
cluster.stopOne(pm)
delay = 8.0 * Common.lease_duration
logging.info("stopped master, waiting for things to settle (%fs)", delay)
time.sleep(delay)
client = Common.get_client()
master2 = client.whoMaster()
logging.info("master2 = %s",master2)
NT.assert_equals(True, master2 != pm)
logging.info("node %s took over, now restarting preferred master", master2)
cluster.startOne(pm)
logging.info("waiting for things to settle")
time.sleep(4.0* Common.lease_duration)
client = Common.get_client()
master3 = client.whoMaster()
logging.info("master3 = %s", master3)
NT.assert_equals(master3,pm)
|
<commit_before><commit_msg>Tests: Add first test for 'preferred_masters'
This commit adds a first test for 'preferred_masters', by copying the
existing 'system_tests_preferred' test, and changing it slightly to use
'preferredMasters' instead of 'forceMaster' with 'preferred=True'.
See: 98d74321f5d7c2f54d5faaba85f379b2a572c35c
See: d8a6072cbaf5ef66b5229035c6a60c97b3509347
See: ARAKOON-360
See: http://jira.incubaid.com/browse/ARAKOON-360<commit_after>"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
from .. import system_tests_common as Common
import nose.tools as NT
import time
import logging
@Common.with_custom_setup(Common.setup_3_nodes, Common.basic_teardown)
def test_prefered_master():
cluster = Common.q.manage.arakoon.getCluster(Common.cluster_id)
cluster.stop()
pm = Common.node_names[0]
cluster.preferredMasters([pm])
cluster.start()
Common.assert_running_nodes(3)
time.sleep(4.0 * Common.lease_duration)
logging.info("all should be well")
client = Common.get_client()
master = client.whoMaster()
NT.assert_equals(pm, master)
logging.info("master indeed is `%s`", pm)
client.dropConnections()
del client
cluster.stopOne(pm)
delay = 8.0 * Common.lease_duration
logging.info("stopped master, waiting for things to settle (%fs)", delay)
time.sleep(delay)
client = Common.get_client()
master2 = client.whoMaster()
logging.info("master2 = %s",master2)
NT.assert_equals(True, master2 != pm)
logging.info("node %s took over, now restarting preferred master", master2)
cluster.startOne(pm)
logging.info("waiting for things to settle")
time.sleep(4.0* Common.lease_duration)
client = Common.get_client()
master3 = client.whoMaster()
logging.info("master3 = %s", master3)
NT.assert_equals(master3,pm)
|
|
953278ec93184bad7586b69aa55ef1f087419edd
|
Orange/widgets/visualize/tests/test_owboxplot.py
|
Orange/widgets/visualize/tests/test_owboxplot.py
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import skip
import numpy as np
from Orange.data import Table, ContinuousVariable
from Orange.widgets.visualize.owboxplot import OWBoxPlot
from Orange.widgets.tests.base import WidgetTest
class TestOWBoxPlot(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
cls.zoo = Table("zoo")
cls.housing = Table("housing")
def setUp(self):
self.widget = self.create_widget(OWBoxPlot)
@skip("Known bug, FIXME!")
def test_input_data(self):
"""Check widget's data"""
self.send_signal("Data", self.iris)
self.assertGreater(len(self.widget.attrs), 0)
self.send_signal("Data", None)
self.assertEqual(len(self.widget.attrs), 0)
def test_input_data_missings_cont_group_var(self):
"""Check widget with continuous data with missing values and group variable"""
data = self.iris
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_cont_no_group_var(self):
"""Check widget with continuous data with missing values and no group variable"""
data = self.housing
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_disc_group_var(self):
"""Check widget with discrete data with missing values and group variable"""
data = self.zoo
data.X[:, 0] = np.nan
self.send_signal("Data", data)
def test_input_data_missings_disc_no_group_var(self):
"""Check widget discrete data with missing values and no group variable"""
data = self.zoo
data.domain.class_var = ContinuousVariable("cls")
data.X[:, 0] = np.nan
self.send_signal("Data", data)
|
Fix crash with missing values
|
OWBoxPlot: Fix crash with missing values
|
Python
|
bsd-2-clause
|
cheral/orange3,cheral/orange3,cheral/orange3,cheral/orange3,cheral/orange3,cheral/orange3
|
OWBoxPlot: Fix crash with missing values
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import skip
import numpy as np
from Orange.data import Table, ContinuousVariable
from Orange.widgets.visualize.owboxplot import OWBoxPlot
from Orange.widgets.tests.base import WidgetTest
class TestOWBoxPlot(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
cls.zoo = Table("zoo")
cls.housing = Table("housing")
def setUp(self):
self.widget = self.create_widget(OWBoxPlot)
@skip("Known bug, FIXME!")
def test_input_data(self):
"""Check widget's data"""
self.send_signal("Data", self.iris)
self.assertGreater(len(self.widget.attrs), 0)
self.send_signal("Data", None)
self.assertEqual(len(self.widget.attrs), 0)
def test_input_data_missings_cont_group_var(self):
"""Check widget with continuous data with missing values and group variable"""
data = self.iris
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_cont_no_group_var(self):
"""Check widget with continuous data with missing values and no group variable"""
data = self.housing
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_disc_group_var(self):
"""Check widget with discrete data with missing values and group variable"""
data = self.zoo
data.X[:, 0] = np.nan
self.send_signal("Data", data)
def test_input_data_missings_disc_no_group_var(self):
"""Check widget discrete data with missing values and no group variable"""
data = self.zoo
data.domain.class_var = ContinuousVariable("cls")
data.X[:, 0] = np.nan
self.send_signal("Data", data)
|
<commit_before><commit_msg>OWBoxPlot: Fix crash with missing values<commit_after>
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import skip
import numpy as np
from Orange.data import Table, ContinuousVariable
from Orange.widgets.visualize.owboxplot import OWBoxPlot
from Orange.widgets.tests.base import WidgetTest
class TestOWBoxPlot(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
cls.zoo = Table("zoo")
cls.housing = Table("housing")
def setUp(self):
self.widget = self.create_widget(OWBoxPlot)
@skip("Known bug, FIXME!")
def test_input_data(self):
"""Check widget's data"""
self.send_signal("Data", self.iris)
self.assertGreater(len(self.widget.attrs), 0)
self.send_signal("Data", None)
self.assertEqual(len(self.widget.attrs), 0)
def test_input_data_missings_cont_group_var(self):
"""Check widget with continuous data with missing values and group variable"""
data = self.iris
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_cont_no_group_var(self):
"""Check widget with continuous data with missing values and no group variable"""
data = self.housing
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_disc_group_var(self):
"""Check widget with discrete data with missing values and group variable"""
data = self.zoo
data.X[:, 0] = np.nan
self.send_signal("Data", data)
def test_input_data_missings_disc_no_group_var(self):
"""Check widget discrete data with missing values and no group variable"""
data = self.zoo
data.domain.class_var = ContinuousVariable("cls")
data.X[:, 0] = np.nan
self.send_signal("Data", data)
|
OWBoxPlot: Fix crash with missing values# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import skip
import numpy as np
from Orange.data import Table, ContinuousVariable
from Orange.widgets.visualize.owboxplot import OWBoxPlot
from Orange.widgets.tests.base import WidgetTest
class TestOWBoxPlot(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
cls.zoo = Table("zoo")
cls.housing = Table("housing")
def setUp(self):
self.widget = self.create_widget(OWBoxPlot)
@skip("Known bug, FIXME!")
def test_input_data(self):
"""Check widget's data"""
self.send_signal("Data", self.iris)
self.assertGreater(len(self.widget.attrs), 0)
self.send_signal("Data", None)
self.assertEqual(len(self.widget.attrs), 0)
def test_input_data_missings_cont_group_var(self):
"""Check widget with continuous data with missing values and group variable"""
data = self.iris
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_cont_no_group_var(self):
"""Check widget with continuous data with missing values and no group variable"""
data = self.housing
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_disc_group_var(self):
"""Check widget with discrete data with missing values and group variable"""
data = self.zoo
data.X[:, 0] = np.nan
self.send_signal("Data", data)
def test_input_data_missings_disc_no_group_var(self):
"""Check widget discrete data with missing values and no group variable"""
data = self.zoo
data.domain.class_var = ContinuousVariable("cls")
data.X[:, 0] = np.nan
self.send_signal("Data", data)
|
<commit_before><commit_msg>OWBoxPlot: Fix crash with missing values<commit_after># Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
from unittest import skip
import numpy as np
from Orange.data import Table, ContinuousVariable
from Orange.widgets.visualize.owboxplot import OWBoxPlot
from Orange.widgets.tests.base import WidgetTest
class TestOWBoxPlot(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
cls.zoo = Table("zoo")
cls.housing = Table("housing")
def setUp(self):
self.widget = self.create_widget(OWBoxPlot)
@skip("Known bug, FIXME!")
def test_input_data(self):
"""Check widget's data"""
self.send_signal("Data", self.iris)
self.assertGreater(len(self.widget.attrs), 0)
self.send_signal("Data", None)
self.assertEqual(len(self.widget.attrs), 0)
def test_input_data_missings_cont_group_var(self):
"""Check widget with continuous data with missing values and group variable"""
data = self.iris
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_cont_no_group_var(self):
"""Check widget with continuous data with missing values and no group variable"""
data = self.housing
data.X[:, 0] = np.nan
self.send_signal("Data", data)
# used to crash, see #1568
def test_input_data_missings_disc_group_var(self):
"""Check widget with discrete data with missing values and group variable"""
data = self.zoo
data.X[:, 0] = np.nan
self.send_signal("Data", data)
def test_input_data_missings_disc_no_group_var(self):
"""Check widget discrete data with missing values and no group variable"""
data = self.zoo
data.domain.class_var = ContinuousVariable("cls")
data.X[:, 0] = np.nan
self.send_signal("Data", data)
|
|
3ea6f9f96606d267c98b89f8ab3853eaa026bad8
|
haas_rest_test/plugins/tests/test_test_parameters.py
|
haas_rest_test/plugins/tests/test_test_parameters.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from haas.testing import unittest
from haas_rest_test.config import Config
from haas_rest_test.exceptions import YamlParseError
from ..test_parameters import MethodTestParameter
class TestMethodTestParameter(unittest.TestCase):
def test_load(self):
# Given
config = Config.from_dict({'host': 'name.domain'}, __file__)
spec = {'method': 'POST'}
parameter = MethodTestParameter.from_dict(spec)
# When
loaded = parameter.load(config)
# Then
self.assertEqual(loaded, spec)
def test_invalid_method(self):
# Given
spec = {'method': 'OTHER'}
# When/Then
with self.assertRaises(YamlParseError):
MethodTestParameter.from_dict(spec)
|
Add test for method plugin
|
Add test for method plugin
|
Python
|
bsd-3-clause
|
sjagoe/usagi
|
Add test for method plugin
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from haas.testing import unittest
from haas_rest_test.config import Config
from haas_rest_test.exceptions import YamlParseError
from ..test_parameters import MethodTestParameter
class TestMethodTestParameter(unittest.TestCase):
def test_load(self):
# Given
config = Config.from_dict({'host': 'name.domain'}, __file__)
spec = {'method': 'POST'}
parameter = MethodTestParameter.from_dict(spec)
# When
loaded = parameter.load(config)
# Then
self.assertEqual(loaded, spec)
def test_invalid_method(self):
# Given
spec = {'method': 'OTHER'}
# When/Then
with self.assertRaises(YamlParseError):
MethodTestParameter.from_dict(spec)
|
<commit_before><commit_msg>Add test for method plugin<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from haas.testing import unittest
from haas_rest_test.config import Config
from haas_rest_test.exceptions import YamlParseError
from ..test_parameters import MethodTestParameter
class TestMethodTestParameter(unittest.TestCase):
def test_load(self):
# Given
config = Config.from_dict({'host': 'name.domain'}, __file__)
spec = {'method': 'POST'}
parameter = MethodTestParameter.from_dict(spec)
# When
loaded = parameter.load(config)
# Then
self.assertEqual(loaded, spec)
def test_invalid_method(self):
# Given
spec = {'method': 'OTHER'}
# When/Then
with self.assertRaises(YamlParseError):
MethodTestParameter.from_dict(spec)
|
Add test for method plugin# -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from haas.testing import unittest
from haas_rest_test.config import Config
from haas_rest_test.exceptions import YamlParseError
from ..test_parameters import MethodTestParameter
class TestMethodTestParameter(unittest.TestCase):
def test_load(self):
# Given
config = Config.from_dict({'host': 'name.domain'}, __file__)
spec = {'method': 'POST'}
parameter = MethodTestParameter.from_dict(spec)
# When
loaded = parameter.load(config)
# Then
self.assertEqual(loaded, spec)
def test_invalid_method(self):
# Given
spec = {'method': 'OTHER'}
# When/Then
with self.assertRaises(YamlParseError):
MethodTestParameter.from_dict(spec)
|
<commit_before><commit_msg>Add test for method plugin<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2014 Simon Jagoe
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from haas.testing import unittest
from haas_rest_test.config import Config
from haas_rest_test.exceptions import YamlParseError
from ..test_parameters import MethodTestParameter
class TestMethodTestParameter(unittest.TestCase):
def test_load(self):
# Given
config = Config.from_dict({'host': 'name.domain'}, __file__)
spec = {'method': 'POST'}
parameter = MethodTestParameter.from_dict(spec)
# When
loaded = parameter.load(config)
# Then
self.assertEqual(loaded, spec)
def test_invalid_method(self):
# Given
spec = {'method': 'OTHER'}
# When/Then
with self.assertRaises(YamlParseError):
MethodTestParameter.from_dict(spec)
|
|
37d90e01e52fbb627f93d0dd2eb0ace3df6131b4
|
andalusian/migrations/0005_auto_20190709_1132.py
|
andalusian/migrations/0005_auto_20190709_1132.py
|
# Generated by Django 2.1.7 on 2019-07-09 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0006_auto_20190709_1045'),
]
operations = [
migrations.RemoveField(
model_name='instrumentalias',
name='instrument',
),
migrations.AlterModelManagers(
name='instrument',
managers=[
],
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='instrument',
name='name',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='instrument',
name='percussion',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='InstrumentAlias',
),
]
|
Add migrations file for adding andausian instruments
|
Add migrations file for adding andausian instruments
|
Python
|
agpl-3.0
|
MTG/dunya,MTG/dunya,MTG/dunya,MTG/dunya
|
Add migrations file for adding andausian instruments
|
# Generated by Django 2.1.7 on 2019-07-09 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0006_auto_20190709_1045'),
]
operations = [
migrations.RemoveField(
model_name='instrumentalias',
name='instrument',
),
migrations.AlterModelManagers(
name='instrument',
managers=[
],
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='instrument',
name='name',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='instrument',
name='percussion',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='InstrumentAlias',
),
]
|
<commit_before><commit_msg>Add migrations file for adding andausian instruments<commit_after>
|
# Generated by Django 2.1.7 on 2019-07-09 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0006_auto_20190709_1045'),
]
operations = [
migrations.RemoveField(
model_name='instrumentalias',
name='instrument',
),
migrations.AlterModelManagers(
name='instrument',
managers=[
],
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='instrument',
name='name',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='instrument',
name='percussion',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='InstrumentAlias',
),
]
|
Add migrations file for adding andausian instruments# Generated by Django 2.1.7 on 2019-07-09 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0006_auto_20190709_1045'),
]
operations = [
migrations.RemoveField(
model_name='instrumentalias',
name='instrument',
),
migrations.AlterModelManagers(
name='instrument',
managers=[
],
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='instrument',
name='name',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='instrument',
name='percussion',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='InstrumentAlias',
),
]
|
<commit_before><commit_msg>Add migrations file for adding andausian instruments<commit_after># Generated by Django 2.1.7 on 2019-07-09 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0006_auto_20190709_1045'),
]
operations = [
migrations.RemoveField(
model_name='instrumentalias',
name='instrument',
),
migrations.AlterModelManagers(
name='instrument',
managers=[
],
),
migrations.AddField(
model_name='instrument',
name='mbid',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='instrument',
name='name',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='instrument',
name='percussion',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='InstrumentAlias',
),
]
|
|
51187fd8c39a1a05aade20c416084e5b69973176
|
tests/test_curtailment.py
|
tests/test_curtailment.py
|
"""
This "test" runs curtailment for different curtailment requirements and
methods `voltage-based` and `feedin-proportional`.
It requires a ding0 grid called ding0_grid_example.pkl in the same directory.
"""
import pandas as pd
import numpy as np
from edisgo import EDisGo
def get_generator_feedins(edisgo_grid):
generator_feedins = {}
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator'):
generator_feedins[i] = i.timeseries['p']
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator_agg'):
generator_feedins[i] = i.timeseries['p']
for lvgd in edisgo_grid.network.mv_grid.lv_grids:
for i in lvgd.graph.nodes_by_attribute('generator'):
generator_feedins[i] = i.timeseries['p']
for i in lvgd.graph.nodes_by_attribute('generator_agg'):
generator_feedins[i] = i.timeseries['p']
return pd.DataFrame(generator_feedins)
timeindex = pd.date_range('2011-01-01 00:00', periods=8, freq='H')
feedin_pu = pd.DataFrame(
{'solar': np.array([0.0, 0.0, 0.5, 0.5, 0.8, 0.8, 1.0, 1.0]),
'wind': np.array([0.0, 1.0, 0.5, 1.0, 0.6, 1.0, 0.0, 1.0])},
index=timeindex)
gen_dispatchable_df = pd.DataFrame(
{'other': [0.0] * len(timeindex)},
index=timeindex)
edisgo = EDisGo(
ding0_grid="ding0_grid_example.pkl",
generator_scenario='ego100',
timeseries_generation_fluctuating=feedin_pu,
timeseries_generation_dispatchable=gen_dispatchable_df,
timeseries_load='demandlib',
timeindex=timeindex)
for cm in ['voltage-based', 'feedin-proportional']:
for curtailment_percent in [0, 50, 100]:
# curtail
feedin_gens = get_generator_feedins(edisgo)
curtailment = feedin_gens.sum(axis=1) * curtailment_percent / 100.0
edisgo.curtail(curtailment_timeseries=curtailment,
methodology=cm, voltage_threshold=0.0)
|
Test script for running curtailment
|
Test script for running curtailment
|
Python
|
agpl-3.0
|
openego/eDisGo,openego/eDisGo
|
Test script for running curtailment
|
"""
This "test" runs curtailment for different curtailment requirements and
methods `voltage-based` and `feedin-proportional`.
It requires a ding0 grid called ding0_grid_example.pkl in the same directory.
"""
import pandas as pd
import numpy as np
from edisgo import EDisGo
def get_generator_feedins(edisgo_grid):
generator_feedins = {}
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator'):
generator_feedins[i] = i.timeseries['p']
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator_agg'):
generator_feedins[i] = i.timeseries['p']
for lvgd in edisgo_grid.network.mv_grid.lv_grids:
for i in lvgd.graph.nodes_by_attribute('generator'):
generator_feedins[i] = i.timeseries['p']
for i in lvgd.graph.nodes_by_attribute('generator_agg'):
generator_feedins[i] = i.timeseries['p']
return pd.DataFrame(generator_feedins)
timeindex = pd.date_range('2011-01-01 00:00', periods=8, freq='H')
feedin_pu = pd.DataFrame(
{'solar': np.array([0.0, 0.0, 0.5, 0.5, 0.8, 0.8, 1.0, 1.0]),
'wind': np.array([0.0, 1.0, 0.5, 1.0, 0.6, 1.0, 0.0, 1.0])},
index=timeindex)
gen_dispatchable_df = pd.DataFrame(
{'other': [0.0] * len(timeindex)},
index=timeindex)
edisgo = EDisGo(
ding0_grid="ding0_grid_example.pkl",
generator_scenario='ego100',
timeseries_generation_fluctuating=feedin_pu,
timeseries_generation_dispatchable=gen_dispatchable_df,
timeseries_load='demandlib',
timeindex=timeindex)
for cm in ['voltage-based', 'feedin-proportional']:
for curtailment_percent in [0, 50, 100]:
# curtail
feedin_gens = get_generator_feedins(edisgo)
curtailment = feedin_gens.sum(axis=1) * curtailment_percent / 100.0
edisgo.curtail(curtailment_timeseries=curtailment,
methodology=cm, voltage_threshold=0.0)
|
<commit_before><commit_msg>Test script for running curtailment<commit_after>
|
"""
This "test" runs curtailment for different curtailment requirements and
methods `voltage-based` and `feedin-proportional`.
It requires a ding0 grid called ding0_grid_example.pkl in the same directory.
"""
import pandas as pd
import numpy as np
from edisgo import EDisGo
def get_generator_feedins(edisgo_grid):
generator_feedins = {}
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator'):
generator_feedins[i] = i.timeseries['p']
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator_agg'):
generator_feedins[i] = i.timeseries['p']
for lvgd in edisgo_grid.network.mv_grid.lv_grids:
for i in lvgd.graph.nodes_by_attribute('generator'):
generator_feedins[i] = i.timeseries['p']
for i in lvgd.graph.nodes_by_attribute('generator_agg'):
generator_feedins[i] = i.timeseries['p']
return pd.DataFrame(generator_feedins)
timeindex = pd.date_range('2011-01-01 00:00', periods=8, freq='H')
feedin_pu = pd.DataFrame(
{'solar': np.array([0.0, 0.0, 0.5, 0.5, 0.8, 0.8, 1.0, 1.0]),
'wind': np.array([0.0, 1.0, 0.5, 1.0, 0.6, 1.0, 0.0, 1.0])},
index=timeindex)
gen_dispatchable_df = pd.DataFrame(
{'other': [0.0] * len(timeindex)},
index=timeindex)
edisgo = EDisGo(
ding0_grid="ding0_grid_example.pkl",
generator_scenario='ego100',
timeseries_generation_fluctuating=feedin_pu,
timeseries_generation_dispatchable=gen_dispatchable_df,
timeseries_load='demandlib',
timeindex=timeindex)
for cm in ['voltage-based', 'feedin-proportional']:
for curtailment_percent in [0, 50, 100]:
# curtail
feedin_gens = get_generator_feedins(edisgo)
curtailment = feedin_gens.sum(axis=1) * curtailment_percent / 100.0
edisgo.curtail(curtailment_timeseries=curtailment,
methodology=cm, voltage_threshold=0.0)
|
Test script for running curtailment"""
This "test" runs curtailment for different curtailment requirements and
methods `voltage-based` and `feedin-proportional`.
It requires a ding0 grid called ding0_grid_example.pkl in the same directory.
"""
import pandas as pd
import numpy as np
from edisgo import EDisGo
def get_generator_feedins(edisgo_grid):
generator_feedins = {}
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator'):
generator_feedins[i] = i.timeseries['p']
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator_agg'):
generator_feedins[i] = i.timeseries['p']
for lvgd in edisgo_grid.network.mv_grid.lv_grids:
for i in lvgd.graph.nodes_by_attribute('generator'):
generator_feedins[i] = i.timeseries['p']
for i in lvgd.graph.nodes_by_attribute('generator_agg'):
generator_feedins[i] = i.timeseries['p']
return pd.DataFrame(generator_feedins)
timeindex = pd.date_range('2011-01-01 00:00', periods=8, freq='H')
feedin_pu = pd.DataFrame(
{'solar': np.array([0.0, 0.0, 0.5, 0.5, 0.8, 0.8, 1.0, 1.0]),
'wind': np.array([0.0, 1.0, 0.5, 1.0, 0.6, 1.0, 0.0, 1.0])},
index=timeindex)
gen_dispatchable_df = pd.DataFrame(
{'other': [0.0] * len(timeindex)},
index=timeindex)
edisgo = EDisGo(
ding0_grid="ding0_grid_example.pkl",
generator_scenario='ego100',
timeseries_generation_fluctuating=feedin_pu,
timeseries_generation_dispatchable=gen_dispatchable_df,
timeseries_load='demandlib',
timeindex=timeindex)
for cm in ['voltage-based', 'feedin-proportional']:
for curtailment_percent in [0, 50, 100]:
# curtail
feedin_gens = get_generator_feedins(edisgo)
curtailment = feedin_gens.sum(axis=1) * curtailment_percent / 100.0
edisgo.curtail(curtailment_timeseries=curtailment,
methodology=cm, voltage_threshold=0.0)
|
<commit_before><commit_msg>Test script for running curtailment<commit_after>"""
This "test" runs curtailment for different curtailment requirements and
methods `voltage-based` and `feedin-proportional`.
It requires a ding0 grid called ding0_grid_example.pkl in the same directory.
"""
import pandas as pd
import numpy as np
from edisgo import EDisGo
def get_generator_feedins(edisgo_grid):
generator_feedins = {}
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator'):
generator_feedins[i] = i.timeseries['p']
for i in edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
'generator_agg'):
generator_feedins[i] = i.timeseries['p']
for lvgd in edisgo_grid.network.mv_grid.lv_grids:
for i in lvgd.graph.nodes_by_attribute('generator'):
generator_feedins[i] = i.timeseries['p']
for i in lvgd.graph.nodes_by_attribute('generator_agg'):
generator_feedins[i] = i.timeseries['p']
return pd.DataFrame(generator_feedins)
timeindex = pd.date_range('2011-01-01 00:00', periods=8, freq='H')
feedin_pu = pd.DataFrame(
{'solar': np.array([0.0, 0.0, 0.5, 0.5, 0.8, 0.8, 1.0, 1.0]),
'wind': np.array([0.0, 1.0, 0.5, 1.0, 0.6, 1.0, 0.0, 1.0])},
index=timeindex)
gen_dispatchable_df = pd.DataFrame(
{'other': [0.0] * len(timeindex)},
index=timeindex)
edisgo = EDisGo(
ding0_grid="ding0_grid_example.pkl",
generator_scenario='ego100',
timeseries_generation_fluctuating=feedin_pu,
timeseries_generation_dispatchable=gen_dispatchable_df,
timeseries_load='demandlib',
timeindex=timeindex)
for cm in ['voltage-based', 'feedin-proportional']:
for curtailment_percent in [0, 50, 100]:
# curtail
feedin_gens = get_generator_feedins(edisgo)
curtailment = feedin_gens.sum(axis=1) * curtailment_percent / 100.0
edisgo.curtail(curtailment_timeseries=curtailment,
methodology=cm, voltage_threshold=0.0)
|
|
197fb886cba673b385189809a1a90032032f5c26
|
keras/legacy/models.py
|
keras/legacy/models.py
|
from .layers import Merge
def needs_legacy_support(model):
return isinstance(model.layers[0], Merge)
def legacy_sequential_layers(model):
layers = []
if model.layers:
if isinstance(model.layers[0], Merge):
merge = model.layers[0]
for layer in merge.layers:
if hasattr(layer, 'layers'):
for sublayer in layer.layers:
if sublayer not in layers:
layers.append(sublayer)
else:
if layer not in layers:
layers.append(layer)
else:
if model.layers[0] not in layers:
layers.append(model.layers[0])
for layer in model.layers[1:]:
if layer not in layers:
layers.append(layer)
return layers
|
Add missing legacy support file.
|
Add missing legacy support file.
|
Python
|
apache-2.0
|
keras-team/keras,keras-team/keras
|
Add missing legacy support file.
|
from .layers import Merge
def needs_legacy_support(model):
return isinstance(model.layers[0], Merge)
def legacy_sequential_layers(model):
layers = []
if model.layers:
if isinstance(model.layers[0], Merge):
merge = model.layers[0]
for layer in merge.layers:
if hasattr(layer, 'layers'):
for sublayer in layer.layers:
if sublayer not in layers:
layers.append(sublayer)
else:
if layer not in layers:
layers.append(layer)
else:
if model.layers[0] not in layers:
layers.append(model.layers[0])
for layer in model.layers[1:]:
if layer not in layers:
layers.append(layer)
return layers
|
<commit_before><commit_msg>Add missing legacy support file.<commit_after>
|
from .layers import Merge
def needs_legacy_support(model):
return isinstance(model.layers[0], Merge)
def legacy_sequential_layers(model):
layers = []
if model.layers:
if isinstance(model.layers[0], Merge):
merge = model.layers[0]
for layer in merge.layers:
if hasattr(layer, 'layers'):
for sublayer in layer.layers:
if sublayer not in layers:
layers.append(sublayer)
else:
if layer not in layers:
layers.append(layer)
else:
if model.layers[0] not in layers:
layers.append(model.layers[0])
for layer in model.layers[1:]:
if layer not in layers:
layers.append(layer)
return layers
|
Add missing legacy support file.from .layers import Merge
def needs_legacy_support(model):
return isinstance(model.layers[0], Merge)
def legacy_sequential_layers(model):
layers = []
if model.layers:
if isinstance(model.layers[0], Merge):
merge = model.layers[0]
for layer in merge.layers:
if hasattr(layer, 'layers'):
for sublayer in layer.layers:
if sublayer not in layers:
layers.append(sublayer)
else:
if layer not in layers:
layers.append(layer)
else:
if model.layers[0] not in layers:
layers.append(model.layers[0])
for layer in model.layers[1:]:
if layer not in layers:
layers.append(layer)
return layers
|
<commit_before><commit_msg>Add missing legacy support file.<commit_after>from .layers import Merge
def needs_legacy_support(model):
return isinstance(model.layers[0], Merge)
def legacy_sequential_layers(model):
layers = []
if model.layers:
if isinstance(model.layers[0], Merge):
merge = model.layers[0]
for layer in merge.layers:
if hasattr(layer, 'layers'):
for sublayer in layer.layers:
if sublayer not in layers:
layers.append(sublayer)
else:
if layer not in layers:
layers.append(layer)
else:
if model.layers[0] not in layers:
layers.append(model.layers[0])
for layer in model.layers[1:]:
if layer not in layers:
layers.append(layer)
return layers
|
|
324f9ca2728614567d038a0ad3c7354655099b59
|
tests/test_connect_cells.py
|
tests/test_connect_cells.py
|
"""
This unit test tests the cells connected in series or parallel
"""
import unittest
import numpy as np
from pypvcell.solarcell import ResistorCell, SeriesConnect, ParallelConnect
class SPTestCase(unittest.TestCase):
def setUp(self):
self.r1 = 1.0
self.r2 = 2.0
self.r1cell = ResistorCell(self.r1)
self.r2cell = ResistorCell(self.r2)
def test_parallel(self):
pc = ParallelConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = pc.get_v_from_j(test_c)
expected_r = 1 / (1 / self.r1 + 1 / self.r2)
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
def test_series(self):
sc = SeriesConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = sc.get_v_from_j(test_c)
expected_r = self.r1 + self.r2
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
if __name__ == '__main__':
unittest.main()
|
Add unit test of SeriesConnect() and ParallelConnect()
|
Add unit test of SeriesConnect() and ParallelConnect()
|
Python
|
apache-2.0
|
kanhua/pypvcell
|
Add unit test of SeriesConnect() and ParallelConnect()
|
"""
This unit test tests the cells connected in series or parallel
"""
import unittest
import numpy as np
from pypvcell.solarcell import ResistorCell, SeriesConnect, ParallelConnect
class SPTestCase(unittest.TestCase):
def setUp(self):
self.r1 = 1.0
self.r2 = 2.0
self.r1cell = ResistorCell(self.r1)
self.r2cell = ResistorCell(self.r2)
def test_parallel(self):
pc = ParallelConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = pc.get_v_from_j(test_c)
expected_r = 1 / (1 / self.r1 + 1 / self.r2)
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
def test_series(self):
sc = SeriesConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = sc.get_v_from_j(test_c)
expected_r = self.r1 + self.r2
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test of SeriesConnect() and ParallelConnect()<commit_after>
|
"""
This unit test tests the cells connected in series or parallel
"""
import unittest
import numpy as np
from pypvcell.solarcell import ResistorCell, SeriesConnect, ParallelConnect
class SPTestCase(unittest.TestCase):
def setUp(self):
self.r1 = 1.0
self.r2 = 2.0
self.r1cell = ResistorCell(self.r1)
self.r2cell = ResistorCell(self.r2)
def test_parallel(self):
pc = ParallelConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = pc.get_v_from_j(test_c)
expected_r = 1 / (1 / self.r1 + 1 / self.r2)
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
def test_series(self):
sc = SeriesConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = sc.get_v_from_j(test_c)
expected_r = self.r1 + self.r2
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
if __name__ == '__main__':
unittest.main()
|
Add unit test of SeriesConnect() and ParallelConnect()"""
This unit test tests the cells connected in series or parallel
"""
import unittest
import numpy as np
from pypvcell.solarcell import ResistorCell, SeriesConnect, ParallelConnect
class SPTestCase(unittest.TestCase):
def setUp(self):
self.r1 = 1.0
self.r2 = 2.0
self.r1cell = ResistorCell(self.r1)
self.r2cell = ResistorCell(self.r2)
def test_parallel(self):
pc = ParallelConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = pc.get_v_from_j(test_c)
expected_r = 1 / (1 / self.r1 + 1 / self.r2)
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
def test_series(self):
sc = SeriesConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = sc.get_v_from_j(test_c)
expected_r = self.r1 + self.r2
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test of SeriesConnect() and ParallelConnect()<commit_after>"""
This unit test tests the cells connected in series or parallel
"""
import unittest
import numpy as np
from pypvcell.solarcell import ResistorCell, SeriesConnect, ParallelConnect
class SPTestCase(unittest.TestCase):
def setUp(self):
self.r1 = 1.0
self.r2 = 2.0
self.r1cell = ResistorCell(self.r1)
self.r2cell = ResistorCell(self.r2)
def test_parallel(self):
pc = ParallelConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = pc.get_v_from_j(test_c)
expected_r = 1 / (1 / self.r1 + 1 / self.r2)
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
def test_series(self):
sc = SeriesConnect([self.r1cell, self.r2cell])
test_c = np.linspace(0.1, 2, 10)
test_v = sc.get_v_from_j(test_c)
expected_r = self.r1 + self.r2
calc_r = np.mean(test_v / test_c)
self.assertTrue(np.isclose(expected_r, calc_r))
if __name__ == '__main__':
unittest.main()
|
|
5b3835b3ed49833d3792e50ef54f23cd50c1b907
|
oedb_datamodels/versions/b4e662a73272_nullable_message.py
|
oedb_datamodels/versions/b4e662a73272_nullable_message.py
|
"""Make message nullable
Revision ID: b4e662a73272
Revises: 1a73867b1e79
Create Date: 2019-04-30 09:04:34.330485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4e662a73272'
down_revision = '1a73867b1e79'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=False)
def downgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=True)
|
Add migration for nullable messages
|
Add migration for nullable messages
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
|
Add migration for nullable messages
|
"""Make message nullable
Revision ID: b4e662a73272
Revises: 1a73867b1e79
Create Date: 2019-04-30 09:04:34.330485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4e662a73272'
down_revision = '1a73867b1e79'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=False)
def downgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=True)
|
<commit_before><commit_msg>Add migration for nullable messages<commit_after>
|
"""Make message nullable
Revision ID: b4e662a73272
Revises: 1a73867b1e79
Create Date: 2019-04-30 09:04:34.330485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4e662a73272'
down_revision = '1a73867b1e79'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=False)
def downgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=True)
|
Add migration for nullable messages"""Make message nullable
Revision ID: b4e662a73272
Revises: 1a73867b1e79
Create Date: 2019-04-30 09:04:34.330485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4e662a73272'
down_revision = '1a73867b1e79'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=False)
def downgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=True)
|
<commit_before><commit_msg>Add migration for nullable messages<commit_after>"""Make message nullable
Revision ID: b4e662a73272
Revises: 1a73867b1e79
Create Date: 2019-04-30 09:04:34.330485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4e662a73272'
down_revision = '1a73867b1e79'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=False)
def downgrade():
op.alter_column('_edit_base', '_message',
existing_type=sa.VARCHAR(length=500),
nullable=True)
|
|
5a1f481a57d356d8995ccce9ad29cd6e83765b10
|
scripts/tag_manylinux.py
|
scripts/tag_manylinux.py
|
from auditwheel.wheeltools import InWheelCtx, add_platforms
import click
import os
@click.command()
@click.argument('wheel', type=click.Path(exists=True))
def main(wheel):
dir = os.path.dirname(os.path.abspath(wheel))
with InWheelCtx(wheel) as ctx:
try:
new_wheel = add_platforms(ctx, ['manylinux1_x86_64'], remove_platforms=('linux_x86_64',))
except WheelToolsError as e:
click.echo(str(e), err=True)
raise
if new_wheel:
ctx.out_wheel = os.path.normpath(os.path.join(dir, new_wheel))
click.echo('Updated wheel written to %s' % ctx.out_wheel)
if __name__ == "__main__":
main()
|
Add script to fix up wheel tag to manylinux
|
Add script to fix up wheel tag to manylinux
We use this instead of auditwheel directly because we have taken
care of the RPATH and auditwheel get confused.
Signed-off-by: Chris Harris <a361e89d1eba6c570561222d75facbbf7aaeeafe@kitware.com>
|
Python
|
bsd-3-clause
|
ghutchis/avogadrolibs,OpenChemistry/avogadrolibs,OpenChemistry/avogadrolibs,ghutchis/avogadrolibs,ghutchis/avogadrolibs,ghutchis/avogadrolibs,OpenChemistry/avogadrolibs,ghutchis/avogadrolibs,OpenChemistry/avogadrolibs,OpenChemistry/avogadrolibs
|
Add script to fix up wheel tag to manylinux
We use this instead of auditwheel directly because we have taken
care of the RPATH and auditwheel get confused.
Signed-off-by: Chris Harris <a361e89d1eba6c570561222d75facbbf7aaeeafe@kitware.com>
|
from auditwheel.wheeltools import InWheelCtx, add_platforms
import click
import os
@click.command()
@click.argument('wheel', type=click.Path(exists=True))
def main(wheel):
dir = os.path.dirname(os.path.abspath(wheel))
with InWheelCtx(wheel) as ctx:
try:
new_wheel = add_platforms(ctx, ['manylinux1_x86_64'], remove_platforms=('linux_x86_64',))
except WheelToolsError as e:
click.echo(str(e), err=True)
raise
if new_wheel:
ctx.out_wheel = os.path.normpath(os.path.join(dir, new_wheel))
click.echo('Updated wheel written to %s' % ctx.out_wheel)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to fix up wheel tag to manylinux
We use this instead of auditwheel directly because we have taken
care of the RPATH and auditwheel get confused.
Signed-off-by: Chris Harris <a361e89d1eba6c570561222d75facbbf7aaeeafe@kitware.com><commit_after>
|
from auditwheel.wheeltools import InWheelCtx, add_platforms
import click
import os
@click.command()
@click.argument('wheel', type=click.Path(exists=True))
def main(wheel):
dir = os.path.dirname(os.path.abspath(wheel))
with InWheelCtx(wheel) as ctx:
try:
new_wheel = add_platforms(ctx, ['manylinux1_x86_64'], remove_platforms=('linux_x86_64',))
except WheelToolsError as e:
click.echo(str(e), err=True)
raise
if new_wheel:
ctx.out_wheel = os.path.normpath(os.path.join(dir, new_wheel))
click.echo('Updated wheel written to %s' % ctx.out_wheel)
if __name__ == "__main__":
main()
|
Add script to fix up wheel tag to manylinux
We use this instead of auditwheel directly because we have taken
care of the RPATH and auditwheel get confused.
Signed-off-by: Chris Harris <a361e89d1eba6c570561222d75facbbf7aaeeafe@kitware.com>from auditwheel.wheeltools import InWheelCtx, add_platforms
import click
import os
@click.command()
@click.argument('wheel', type=click.Path(exists=True))
def main(wheel):
dir = os.path.dirname(os.path.abspath(wheel))
with InWheelCtx(wheel) as ctx:
try:
new_wheel = add_platforms(ctx, ['manylinux1_x86_64'], remove_platforms=('linux_x86_64',))
except WheelToolsError as e:
click.echo(str(e), err=True)
raise
if new_wheel:
ctx.out_wheel = os.path.normpath(os.path.join(dir, new_wheel))
click.echo('Updated wheel written to %s' % ctx.out_wheel)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to fix up wheel tag to manylinux
We use this instead of auditwheel directly because we have taken
care of the RPATH and auditwheel get confused.
Signed-off-by: Chris Harris <a361e89d1eba6c570561222d75facbbf7aaeeafe@kitware.com><commit_after>from auditwheel.wheeltools import InWheelCtx, add_platforms
import click
import os
@click.command()
@click.argument('wheel', type=click.Path(exists=True))
def main(wheel):
dir = os.path.dirname(os.path.abspath(wheel))
with InWheelCtx(wheel) as ctx:
try:
new_wheel = add_platforms(ctx, ['manylinux1_x86_64'], remove_platforms=('linux_x86_64',))
except WheelToolsError as e:
click.echo(str(e), err=True)
raise
if new_wheel:
ctx.out_wheel = os.path.normpath(os.path.join(dir, new_wheel))
click.echo('Updated wheel written to %s' % ctx.out_wheel)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.