commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ebd42aabb39495f5ba22726073eeca5f9159ac7f
|
test/unit/sorting/test_merge_sort.py
|
test/unit/sorting/test_merge_sort.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.merge_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
Add unit test for merge sort implementation.
|
Add unit test for merge sort implementation.
|
Python
|
mit
|
weichen2046/algorithm-study,weichen2046/algorithm-study
|
Add unit test for merge sort implementation.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.merge_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for merge sort implementation.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.merge_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
Add unit test for merge sort implementation.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.merge_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for merge sort implementation.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.merge_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
|
a9f3a9213268fe41489b58e6ce61bf8d726b9e6f
|
shopcart.py
|
shopcart.py
|
class Shopcart(object):
"""
This is model for the shop carts
Assumptions:
- In memory persistence
- The fields of a shopcart include:
- user id
- list of products (product id, quantity of product )
- The total price of the shopcart will be dynamically calculated
- Model should only initially include `init` method and shopcart fields
"""
__data = []
__index = 0
__products = dict()
def __init__(self, uid=0, products=None):
"""
:param uid: user id
:param products: dict of products <products id, quantity of product>
"""
self.uid = int(uid)
self.products = products
|
Add the data model of Shopcart
|
Add the data model of Shopcart
|
Python
|
apache-2.0
|
nyu-devops-echo/shopcarts,nyu-devops-echo/shopcarts,nyu-devops-echo/shopcarts,nyu-devops-echo/shopcarts
|
Add the data model of Shopcart
|
class Shopcart(object):
"""
This is model for the shop carts
Assumptions:
- In memory persistence
- The fields of a shopcart include:
- user id
- list of products (product id, quantity of product )
- The total price of the shopcart will be dynamically calculated
- Model should only initially include `init` method and shopcart fields
"""
__data = []
__index = 0
__products = dict()
def __init__(self, uid=0, products=None):
"""
:param uid: user id
:param products: dict of products <products id, quantity of product>
"""
self.uid = int(uid)
self.products = products
|
<commit_before><commit_msg>Add the data model of Shopcart<commit_after>
|
class Shopcart(object):
"""
This is model for the shop carts
Assumptions:
- In memory persistence
- The fields of a shopcart include:
- user id
- list of products (product id, quantity of product )
- The total price of the shopcart will be dynamically calculated
- Model should only initially include `init` method and shopcart fields
"""
__data = []
__index = 0
__products = dict()
def __init__(self, uid=0, products=None):
"""
:param uid: user id
:param products: dict of products <products id, quantity of product>
"""
self.uid = int(uid)
self.products = products
|
Add the data model of Shopcartclass Shopcart(object):
"""
This is model for the shop carts
Assumptions:
- In memory persistence
- The fields of a shopcart include:
- user id
- list of products (product id, quantity of product )
- The total price of the shopcart will be dynamically calculated
- Model should only initially include `init` method and shopcart fields
"""
__data = []
__index = 0
__products = dict()
def __init__(self, uid=0, products=None):
"""
:param uid: user id
:param products: dict of products <products id, quantity of product>
"""
self.uid = int(uid)
self.products = products
|
<commit_before><commit_msg>Add the data model of Shopcart<commit_after>class Shopcart(object):
"""
This is model for the shop carts
Assumptions:
- In memory persistence
- The fields of a shopcart include:
- user id
- list of products (product id, quantity of product )
- The total price of the shopcart will be dynamically calculated
- Model should only initially include `init` method and shopcart fields
"""
__data = []
__index = 0
__products = dict()
def __init__(self, uid=0, products=None):
"""
:param uid: user id
:param products: dict of products <products id, quantity of product>
"""
self.uid = int(uid)
self.products = products
|
|
2d2d186002a9b7bcb8f0b5c1e9325c9bc9354f26
|
Ex03/chebNodesWeights.py
|
Ex03/chebNodesWeights.py
|
import numpy as np
from scipy.interpolate import lagrange
def chebNodes(N):
"Returns N Chebyshev nodes in (-1,1)."
return np.array([np.cos(
np.pi * (2*k-1)/(2*N)) for k in range(1,N+1)])
def l(j,q):
"Returns the j-th lagrange basis polynomial on the nodes q."
myf = len(q)*[0]
myf[j] = 1
return lagrange(q, myf)
def intL(j,q):
"""Returns the definite integral between -1, 1 of the the
j-th lagrange basis polynomial on the nodes q."""
return l(j,q).integ()(1) - l(j,q).integ()(-1)
def getWeights(q):
"Returns weights associated with nodes q, using lagrange interp."
return [intL(j,q) for j in range(len(q))]
# Documentation states that lagrange polynomials become numerically instable
# after 20 nodes; we try up to 30.
for n in range(30):
for x in getWeights(chebNodes(n)):
if x <= 0:
print(n, x)
raise Exception("Found negative weight.")
print("No negative weights found for n<30.\n")
# Actually for 37 nodes we get the first negative weights.
print("Negative weights for 37 Cheb nodes:")
cheb37 = getWeights(chebNodes(37))
for x in cheb37:
if x <= 0:
print(x)
# Is this due to numerical errors? To answer this question, notice that Cheb
# weights, as they were defined, should be symmetric with respect to 0.
# However for n=37:
print("\nThe following pairs should be equal:")
for j in range(18):
print(cheb37[j],cheb37[36-j])
|
Add minimal standalone code to test Cheb weights negativity
|
Add minimal standalone code to test Cheb weights negativity
|
Python
|
mit
|
adabrow/NumAnEx2014
|
Add minimal standalone code to test Cheb weights negativity
|
import numpy as np
from scipy.interpolate import lagrange
def chebNodes(N):
"Returns N Chebyshev nodes in (-1,1)."
return np.array([np.cos(
np.pi * (2*k-1)/(2*N)) for k in range(1,N+1)])
def l(j,q):
"Returns the j-th lagrange basis polynomial on the nodes q."
myf = len(q)*[0]
myf[j] = 1
return lagrange(q, myf)
def intL(j,q):
"""Returns the definite integral between -1, 1 of the the
j-th lagrange basis polynomial on the nodes q."""
return l(j,q).integ()(1) - l(j,q).integ()(-1)
def getWeights(q):
"Returns weights associated with nodes q, using lagrange interp."
return [intL(j,q) for j in range(len(q))]
# Documentation states that lagrange polynomials become numerically instable
# after 20 nodes; we try up to 30.
for n in range(30):
for x in getWeights(chebNodes(n)):
if x <= 0:
print(n, x)
raise Exception("Found negative weight.")
print("No negative weights found for n<30.\n")
# Actually for 37 nodes we get the first negative weights.
print("Negative weights for 37 Cheb nodes:")
cheb37 = getWeights(chebNodes(37))
for x in cheb37:
if x <= 0:
print(x)
# Is this due to numerical errors? To answer this question, notice that Cheb
# weights, as they were defined, should be symmetric with respect to 0.
# However for n=37:
print("\nThe following pairs should be equal:")
for j in range(18):
print(cheb37[j],cheb37[36-j])
|
<commit_before><commit_msg>Add minimal standalone code to test Cheb weights negativity<commit_after>
|
import numpy as np
from scipy.interpolate import lagrange
def chebNodes(N):
"Returns N Chebyshev nodes in (-1,1)."
return np.array([np.cos(
np.pi * (2*k-1)/(2*N)) for k in range(1,N+1)])
def l(j,q):
"Returns the j-th lagrange basis polynomial on the nodes q."
myf = len(q)*[0]
myf[j] = 1
return lagrange(q, myf)
def intL(j,q):
"""Returns the definite integral between -1, 1 of the the
j-th lagrange basis polynomial on the nodes q."""
return l(j,q).integ()(1) - l(j,q).integ()(-1)
def getWeights(q):
"Returns weights associated with nodes q, using lagrange interp."
return [intL(j,q) for j in range(len(q))]
# Documentation states that lagrange polynomials become numerically instable
# after 20 nodes; we try up to 30.
for n in range(30):
for x in getWeights(chebNodes(n)):
if x <= 0:
print(n, x)
raise Exception("Found negative weight.")
print("No negative weights found for n<30.\n")
# Actually for 37 nodes we get the first negative weights.
print("Negative weights for 37 Cheb nodes:")
cheb37 = getWeights(chebNodes(37))
for x in cheb37:
if x <= 0:
print(x)
# Is this due to numerical errors? To answer this question, notice that Cheb
# weights, as they were defined, should be symmetric with respect to 0.
# However for n=37:
print("\nThe following pairs should be equal:")
for j in range(18):
print(cheb37[j],cheb37[36-j])
|
Add minimal standalone code to test Cheb weights negativityimport numpy as np
from scipy.interpolate import lagrange
def chebNodes(N):
"Returns N Chebyshev nodes in (-1,1)."
return np.array([np.cos(
np.pi * (2*k-1)/(2*N)) for k in range(1,N+1)])
def l(j,q):
"Returns the j-th lagrange basis polynomial on the nodes q."
myf = len(q)*[0]
myf[j] = 1
return lagrange(q, myf)
def intL(j,q):
"""Returns the definite integral between -1, 1 of the the
j-th lagrange basis polynomial on the nodes q."""
return l(j,q).integ()(1) - l(j,q).integ()(-1)
def getWeights(q):
"Returns weights associated with nodes q, using lagrange interp."
return [intL(j,q) for j in range(len(q))]
# Documentation states that lagrange polynomials become numerically instable
# after 20 nodes; we try up to 30.
for n in range(30):
for x in getWeights(chebNodes(n)):
if x <= 0:
print(n, x)
raise Exception("Found negative weight.")
print("No negative weights found for n<30.\n")
# Actually for 37 nodes we get the first negative weights.
print("Negative weights for 37 Cheb nodes:")
cheb37 = getWeights(chebNodes(37))
for x in cheb37:
if x <= 0:
print(x)
# Is this due to numerical errors? To answer this question, notice that Cheb
# weights, as they were defined, should be symmetric with respect to 0.
# However for n=37:
print("\nThe following pairs should be equal:")
for j in range(18):
print(cheb37[j],cheb37[36-j])
|
<commit_before><commit_msg>Add minimal standalone code to test Cheb weights negativity<commit_after>import numpy as np
from scipy.interpolate import lagrange
def chebNodes(N):
"Returns N Chebyshev nodes in (-1,1)."
return np.array([np.cos(
np.pi * (2*k-1)/(2*N)) for k in range(1,N+1)])
def l(j,q):
"Returns the j-th lagrange basis polynomial on the nodes q."
myf = len(q)*[0]
myf[j] = 1
return lagrange(q, myf)
def intL(j,q):
"""Returns the definite integral between -1, 1 of the the
j-th lagrange basis polynomial on the nodes q."""
return l(j,q).integ()(1) - l(j,q).integ()(-1)
def getWeights(q):
"Returns weights associated with nodes q, using lagrange interp."
return [intL(j,q) for j in range(len(q))]
# Documentation states that lagrange polynomials become numerically instable
# after 20 nodes; we try up to 30.
for n in range(30):
for x in getWeights(chebNodes(n)):
if x <= 0:
print(n, x)
raise Exception("Found negative weight.")
print("No negative weights found for n<30.\n")
# Actually for 37 nodes we get the first negative weights.
print("Negative weights for 37 Cheb nodes:")
cheb37 = getWeights(chebNodes(37))
for x in cheb37:
if x <= 0:
print(x)
# Is this due to numerical errors? To answer this question, notice that Cheb
# weights, as they were defined, should be symmetric with respect to 0.
# However for n=37:
print("\nThe following pairs should be equal:")
for j in range(18):
print(cheb37[j],cheb37[36-j])
|
|
8cbc27c7c3af16c53fe1033530e69c359606d5d6
|
scripts/find_pubchem_removables.py
|
scripts/find_pubchem_removables.py
|
"""This script helps identify entries in PubChem.tsv that systematically
lead to incorrect groundings and should therefore be removed."""
import os
import re
from indra.databases import chebi_client
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'PubChem.tsv')
keep_rows = []
with open(resource_fname, 'r') as fh:
for row in fh.readlines():
if '\t' not in row:
continue
txt, id = [x.strip() for x in row.split('\t')]
if re.match(r'^[A-Z][A-Z]$', txt):
chebi_id = chebi_client.get_chebi_id_from_pubchem(id)
name = chebi_client.get_chebi_name_from_id(chebi_id)
if name and '-' in name and len(name) == 7:
continue
keep_rows.append(row)
with open(resource_fname, 'w') as fh:
for row in keep_rows:
fh.write(row)
|
Add script to find removables from PubChem
|
Add script to find removables from PubChem
|
Python
|
apache-2.0
|
clulab/bioresources
|
Add script to find removables from PubChem
|
"""This script helps identify entries in PubChem.tsv that systematically
lead to incorrect groundings and should therefore be removed."""
import os
import re
from indra.databases import chebi_client
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'PubChem.tsv')
keep_rows = []
with open(resource_fname, 'r') as fh:
for row in fh.readlines():
if '\t' not in row:
continue
txt, id = [x.strip() for x in row.split('\t')]
if re.match(r'^[A-Z][A-Z]$', txt):
chebi_id = chebi_client.get_chebi_id_from_pubchem(id)
name = chebi_client.get_chebi_name_from_id(chebi_id)
if name and '-' in name and len(name) == 7:
continue
keep_rows.append(row)
with open(resource_fname, 'w') as fh:
for row in keep_rows:
fh.write(row)
|
<commit_before><commit_msg>Add script to find removables from PubChem<commit_after>
|
"""This script helps identify entries in PubChem.tsv that systematically
lead to incorrect groundings and should therefore be removed."""
import os
import re
from indra.databases import chebi_client
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'PubChem.tsv')
keep_rows = []
with open(resource_fname, 'r') as fh:
for row in fh.readlines():
if '\t' not in row:
continue
txt, id = [x.strip() for x in row.split('\t')]
if re.match(r'^[A-Z][A-Z]$', txt):
chebi_id = chebi_client.get_chebi_id_from_pubchem(id)
name = chebi_client.get_chebi_name_from_id(chebi_id)
if name and '-' in name and len(name) == 7:
continue
keep_rows.append(row)
with open(resource_fname, 'w') as fh:
for row in keep_rows:
fh.write(row)
|
Add script to find removables from PubChem"""This script helps identify entries in PubChem.tsv that systematically
lead to incorrect groundings and should therefore be removed."""
import os
import re
from indra.databases import chebi_client
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'PubChem.tsv')
keep_rows = []
with open(resource_fname, 'r') as fh:
for row in fh.readlines():
if '\t' not in row:
continue
txt, id = [x.strip() for x in row.split('\t')]
if re.match(r'^[A-Z][A-Z]$', txt):
chebi_id = chebi_client.get_chebi_id_from_pubchem(id)
name = chebi_client.get_chebi_name_from_id(chebi_id)
if name and '-' in name and len(name) == 7:
continue
keep_rows.append(row)
with open(resource_fname, 'w') as fh:
for row in keep_rows:
fh.write(row)
|
<commit_before><commit_msg>Add script to find removables from PubChem<commit_after>"""This script helps identify entries in PubChem.tsv that systematically
lead to incorrect groundings and should therefore be removed."""
import os
import re
from indra.databases import chebi_client
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'PubChem.tsv')
keep_rows = []
with open(resource_fname, 'r') as fh:
for row in fh.readlines():
if '\t' not in row:
continue
txt, id = [x.strip() for x in row.split('\t')]
if re.match(r'^[A-Z][A-Z]$', txt):
chebi_id = chebi_client.get_chebi_id_from_pubchem(id)
name = chebi_client.get_chebi_name_from_id(chebi_id)
if name and '-' in name and len(name) == 7:
continue
keep_rows.append(row)
with open(resource_fname, 'w') as fh:
for row in keep_rows:
fh.write(row)
|
|
a9f11f6c7ba8f9d91e388bb8dd93e600f521a814
|
app/api/cruds/event_crud.py
|
app/api/cruds/event_crud.py
|
import graphene
from graphene_django import DjangoObjectType
from django_filters import OrderingFilter, FilterSet
from app.timetables.models import Event
from .timetable_crud import TimetableNode
class EventNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
class EventFilter(FilterSet):
order_by = OrderingFilter(fields=[('name', 'name'),
('timetable', 'timetable'),
('action', 'action'),
('start_date', 'start_date'),
('end_date', 'end_date')])
class Meta:
fields = {
'name': ['icontains']
}
model = Event
class EventNode(DjangoObjectType):
original_id = graphene.Int()
timetable = DjangoConnectionField(lambda: TimetableNode)
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
|
Add Event filter and Node
|
Add Event filter and Node
|
Python
|
mit
|
teamtaverna/core
|
Add Event filter and Node
|
import graphene
from graphene_django import DjangoObjectType
from django_filters import OrderingFilter, FilterSet
from app.timetables.models import Event
from .timetable_crud import TimetableNode
class EventNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
class EventFilter(FilterSet):
order_by = OrderingFilter(fields=[('name', 'name'),
('timetable', 'timetable'),
('action', 'action'),
('start_date', 'start_date'),
('end_date', 'end_date')])
class Meta:
fields = {
'name': ['icontains']
}
model = Event
class EventNode(DjangoObjectType):
original_id = graphene.Int()
timetable = DjangoConnectionField(lambda: TimetableNode)
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
|
<commit_before><commit_msg>Add Event filter and Node<commit_after>
|
import graphene
from graphene_django import DjangoObjectType
from django_filters import OrderingFilter, FilterSet
from app.timetables.models import Event
from .timetable_crud import TimetableNode
class EventNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
class EventFilter(FilterSet):
order_by = OrderingFilter(fields=[('name', 'name'),
('timetable', 'timetable'),
('action', 'action'),
('start_date', 'start_date'),
('end_date', 'end_date')])
class Meta:
fields = {
'name': ['icontains']
}
model = Event
class EventNode(DjangoObjectType):
original_id = graphene.Int()
timetable = DjangoConnectionField(lambda: TimetableNode)
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
|
Add Event filter and Nodeimport graphene
from graphene_django import DjangoObjectType
from django_filters import OrderingFilter, FilterSet
from app.timetables.models import Event
from .timetable_crud import TimetableNode
class EventNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
class EventFilter(FilterSet):
order_by = OrderingFilter(fields=[('name', 'name'),
('timetable', 'timetable'),
('action', 'action'),
('start_date', 'start_date'),
('end_date', 'end_date')])
class Meta:
fields = {
'name': ['icontains']
}
model = Event
class EventNode(DjangoObjectType):
original_id = graphene.Int()
timetable = DjangoConnectionField(lambda: TimetableNode)
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
|
<commit_before><commit_msg>Add Event filter and Node<commit_after>import graphene
from graphene_django import DjangoObjectType
from django_filters import OrderingFilter, FilterSet
from app.timetables.models import Event
from .timetable_crud import TimetableNode
class EventNode(DjangoObjectType):
original_id = graphene.Int()
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
class EventFilter(FilterSet):
order_by = OrderingFilter(fields=[('name', 'name'),
('timetable', 'timetable'),
('action', 'action'),
('start_date', 'start_date'),
('end_date', 'end_date')])
class Meta:
fields = {
'name': ['icontains']
}
model = Event
class EventNode(DjangoObjectType):
original_id = graphene.Int()
timetable = DjangoConnectionField(lambda: TimetableNode)
class Meta:
model = Event
interfaces = (graphene.relay.Node, )
def resolve_original_id(self, args, context, info):
return self.id
|
|
7d60c4af17f59a63a2d861bc20d3eecececea1b1
|
pymanopt/tools/autodiff/_pytorch.py
|
pymanopt/tools/autodiff/_pytorch.py
|
"""
Module containing functions to differentiate functions using pytorch.
"""
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend, assert_backend_available
class PyTorchBackend(Backend):
def __str__(self):
return "pytorch"
@staticmethod
def is_available():
# XXX: PyTorch 0.4 unified the Tensor and Variable API. Higher-order
# derivatives to compute Hessian-vector products were introduced
# in 0.2 so we should make that the first supported version.
# However, supporting both Tensor and Variable requires a bit more
# work that we'll skip for now.
return torch is not None and torch.__version__ >= "0.4"
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
# PyTorch unboxes scalars automatically, but we still need to get a
# numpy view of the data when "compiling" gradients or Hessians.
f = objective(torch.from_numpy(x))
try:
return f.numpy()
except AttributeError:
pass
return f
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
def grad(x):
x = torch.from_numpy(x)
x.requires_grad_(True)
objective(x).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
def hess(x, v):
x = torch.from_numpy(x)
v = torch.from_numpy(v)
x.requires_grad_(True)
fx = objective(x)
grad_fx, *_ = autograd.grad(fx, x, create_graph=True)
grad_fx.matmul(v).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return hess
|
Add first implementation of pytorch backend
|
Add first implementation of pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
Python
|
bsd-3-clause
|
pymanopt/pymanopt,pymanopt/pymanopt,nkoep/pymanopt,nkoep/pymanopt,nkoep/pymanopt
|
Add first implementation of pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
"""
Module containing functions to differentiate functions using pytorch.
"""
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend, assert_backend_available
class PyTorchBackend(Backend):
def __str__(self):
return "pytorch"
@staticmethod
def is_available():
# XXX: PyTorch 0.4 unified the Tensor and Variable API. Higher-order
# derivatives to compute Hessian-vector products were introduced
# in 0.2 so we should make that the first supported version.
# However, supporting both Tensor and Variable requires a bit more
# work that we'll skip for now.
return torch is not None and torch.__version__ >= "0.4"
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
# PyTorch unboxes scalars automatically, but we still need to get a
# numpy view of the data when "compiling" gradients or Hessians.
f = objective(torch.from_numpy(x))
try:
return f.numpy()
except AttributeError:
pass
return f
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
def grad(x):
x = torch.from_numpy(x)
x.requires_grad_(True)
objective(x).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
def hess(x, v):
x = torch.from_numpy(x)
v = torch.from_numpy(v)
x.requires_grad_(True)
fx = objective(x)
grad_fx, *_ = autograd.grad(fx, x, create_graph=True)
grad_fx.matmul(v).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return hess
|
<commit_before><commit_msg>Add first implementation of pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>
|
"""
Module containing functions to differentiate functions using pytorch.
"""
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend, assert_backend_available
class PyTorchBackend(Backend):
def __str__(self):
return "pytorch"
@staticmethod
def is_available():
# XXX: PyTorch 0.4 unified the Tensor and Variable API. Higher-order
# derivatives to compute Hessian-vector products were introduced
# in 0.2 so we should make that the first supported version.
# However, supporting both Tensor and Variable requires a bit more
# work that we'll skip for now.
return torch is not None and torch.__version__ >= "0.4"
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
# PyTorch unboxes scalars automatically, but we still need to get a
# numpy view of the data when "compiling" gradients or Hessians.
f = objective(torch.from_numpy(x))
try:
return f.numpy()
except AttributeError:
pass
return f
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
def grad(x):
x = torch.from_numpy(x)
x.requires_grad_(True)
objective(x).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
def hess(x, v):
x = torch.from_numpy(x)
v = torch.from_numpy(v)
x.requires_grad_(True)
fx = objective(x)
grad_fx, *_ = autograd.grad(fx, x, create_graph=True)
grad_fx.matmul(v).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return hess
|
Add first implementation of pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>"""
Module containing functions to differentiate functions using pytorch.
"""
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend, assert_backend_available
class PyTorchBackend(Backend):
def __str__(self):
return "pytorch"
@staticmethod
def is_available():
# XXX: PyTorch 0.4 unified the Tensor and Variable API. Higher-order
# derivatives to compute Hessian-vector products were introduced
# in 0.2 so we should make that the first supported version.
# However, supporting both Tensor and Variable requires a bit more
# work that we'll skip for now.
return torch is not None and torch.__version__ >= "0.4"
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
# PyTorch unboxes scalars automatically, but we still need to get a
# numpy view of the data when "compiling" gradients or Hessians.
f = objective(torch.from_numpy(x))
try:
return f.numpy()
except AttributeError:
pass
return f
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
def grad(x):
x = torch.from_numpy(x)
x.requires_grad_(True)
objective(x).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
def hess(x, v):
x = torch.from_numpy(x)
v = torch.from_numpy(v)
x.requires_grad_(True)
fx = objective(x)
grad_fx, *_ = autograd.grad(fx, x, create_graph=True)
grad_fx.matmul(v).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return hess
|
<commit_before><commit_msg>Add first implementation of pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>"""
Module containing functions to differentiate functions using pytorch.
"""
try:
import torch
except ImportError:
torch = None
else:
from torch import autograd
from ._backend import Backend, assert_backend_available
class PyTorchBackend(Backend):
def __str__(self):
return "pytorch"
@staticmethod
def is_available():
# XXX: PyTorch 0.4 unified the Tensor and Variable API. Higher-order
# derivatives to compute Hessian-vector products were introduced
# in 0.2 so we should make that the first supported version.
# However, supporting both Tensor and Variable requires a bit more
# work that we'll skip for now.
return torch is not None and torch.__version__ >= "0.4"
@assert_backend_available
def is_compatible(self, objective, argument):
return callable(objective)
@assert_backend_available
def compile_function(self, objective, argument):
def func(x):
# PyTorch unboxes scalars automatically, but we still need to get a
# numpy view of the data when "compiling" gradients or Hessians.
f = objective(torch.from_numpy(x))
try:
return f.numpy()
except AttributeError:
pass
return f
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
def grad(x):
x = torch.from_numpy(x)
x.requires_grad_(True)
objective(x).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
def hess(x, v):
x = torch.from_numpy(x)
v = torch.from_numpy(v)
x.requires_grad_(True)
fx = objective(x)
grad_fx, *_ = autograd.grad(fx, x, create_graph=True)
grad_fx.matmul(v).backward()
g = x.grad
# See above.
try:
return g.numpy()
except AttributeError:
pass
return g
return hess
|
|
30a0787e8ec184aa1d274a11234ed37d41807df5
|
django_split/templatetags/django_split.py
|
django_split/templatetags/django_split.py
|
from django import template
from ..base import EXPERIMENTS
register = template.Library()
@register.filter
def experiment(user, experiment, group='experiment'):
return EXPERIMENTS[experiment].in_group(user, group)
|
Add template tag for experiment group membership
|
Add template tag for experiment group membership
|
Python
|
mit
|
prophile/django_split
|
Add template tag for experiment group membership
|
from django import template
from ..base import EXPERIMENTS
register = template.Library()
@register.filter
def experiment(user, experiment, group='experiment'):
return EXPERIMENTS[experiment].in_group(user, group)
|
<commit_before><commit_msg>Add template tag for experiment group membership<commit_after>
|
from django import template
from ..base import EXPERIMENTS
register = template.Library()
@register.filter
def experiment(user, experiment, group='experiment'):
return EXPERIMENTS[experiment].in_group(user, group)
|
Add template tag for experiment group membershipfrom django import template
from ..base import EXPERIMENTS
register = template.Library()
@register.filter
def experiment(user, experiment, group='experiment'):
return EXPERIMENTS[experiment].in_group(user, group)
|
<commit_before><commit_msg>Add template tag for experiment group membership<commit_after>from django import template
from ..base import EXPERIMENTS
register = template.Library()
@register.filter
def experiment(user, experiment, group='experiment'):
return EXPERIMENTS[experiment].in_group(user, group)
|
|
1466e8227b847c5e926eb6b745da5d7192cd31f7
|
scripts/read_fk.py
|
scripts/read_fk.py
|
import poppy_inverse_kinematics.creature as model_creature
import numpy as np
import poppy_inverse_kinematics.meta_creature as meta_creature
import time
# parameters
activate_follow = True
interface_type = "vrep"
plot = True
waiting_time = 5
# Create creatures
right_arm = model_creature.creature("torso_right_arm")
left_arm = model_creature.creature("torso_left_arm")
torso = meta_creature.MetaCreature(interface_type=interface_type, creature_type="torso")
torso.add_model(right_arm)
torso.add_model(left_arm)
# Init pypot robot
if interface_type == "robot":
print("Initializing robot")
for m in torso.pypot_object.motors:
m.compliant = False
m.goal_position = 0
print("Waiting 10 seconds")
time.sleep(10)
# The left arm is now compliant, so it can be moved
left_arm.set_compliance(compliance=True)
# Choose right arm target
if activate_follow:
try:
while True:
print("Waiting %s seconds" % waiting_time)
time.sleep(waiting_time)
left_arm.sync_current_joints()
print(left_arm.forward_kinematic())
except KeyboardInterrupt:
# Plot result
if plot:
torso.plot_meta_model()
if interface_type == "robot":
torso.pypot_object.close()
|
Add helper script for hand_follow
|
Add helper script for hand_follow
|
Python
|
apache-2.0
|
Phylliade/ikpy
|
Add helper script for hand_follow
|
import poppy_inverse_kinematics.creature as model_creature
import numpy as np
import poppy_inverse_kinematics.meta_creature as meta_creature
import time
# parameters
activate_follow = True
interface_type = "vrep"
plot = True
waiting_time = 5
# Create creatures
right_arm = model_creature.creature("torso_right_arm")
left_arm = model_creature.creature("torso_left_arm")
torso = meta_creature.MetaCreature(interface_type=interface_type, creature_type="torso")
torso.add_model(right_arm)
torso.add_model(left_arm)
# Init pypot robot
if interface_type == "robot":
print("Initializing robot")
for m in torso.pypot_object.motors:
m.compliant = False
m.goal_position = 0
print("Waiting 10 seconds")
time.sleep(10)
# The left arm is now compliant, so it can be moved
left_arm.set_compliance(compliance=True)
# Choose right arm target
if activate_follow:
try:
while True:
print("Waiting %s seconds" % waiting_time)
time.sleep(waiting_time)
left_arm.sync_current_joints()
print(left_arm.forward_kinematic())
except KeyboardInterrupt:
# Plot result
if plot:
torso.plot_meta_model()
if interface_type == "robot":
torso.pypot_object.close()
|
<commit_before><commit_msg>Add helper script for hand_follow<commit_after>
|
import poppy_inverse_kinematics.creature as model_creature
import numpy as np
import poppy_inverse_kinematics.meta_creature as meta_creature
import time
# parameters
activate_follow = True
interface_type = "vrep"
plot = True
waiting_time = 5
# Create creatures
right_arm = model_creature.creature("torso_right_arm")
left_arm = model_creature.creature("torso_left_arm")
torso = meta_creature.MetaCreature(interface_type=interface_type, creature_type="torso")
torso.add_model(right_arm)
torso.add_model(left_arm)
# Init pypot robot
if interface_type == "robot":
print("Initializing robot")
for m in torso.pypot_object.motors:
m.compliant = False
m.goal_position = 0
print("Waiting 10 seconds")
time.sleep(10)
# The left arm is now compliant, so it can be moved
left_arm.set_compliance(compliance=True)
# Choose right arm target
if activate_follow:
try:
while True:
print("Waiting %s seconds" % waiting_time)
time.sleep(waiting_time)
left_arm.sync_current_joints()
print(left_arm.forward_kinematic())
except KeyboardInterrupt:
# Plot result
if plot:
torso.plot_meta_model()
if interface_type == "robot":
torso.pypot_object.close()
|
Add helper script for hand_followimport poppy_inverse_kinematics.creature as model_creature
import numpy as np
import poppy_inverse_kinematics.meta_creature as meta_creature
import time
# parameters
activate_follow = True
interface_type = "vrep"
plot = True
waiting_time = 5
# Create creatures
right_arm = model_creature.creature("torso_right_arm")
left_arm = model_creature.creature("torso_left_arm")
torso = meta_creature.MetaCreature(interface_type=interface_type, creature_type="torso")
torso.add_model(right_arm)
torso.add_model(left_arm)
# Init pypot robot
if interface_type == "robot":
print("Initializing robot")
for m in torso.pypot_object.motors:
m.compliant = False
m.goal_position = 0
print("Waiting 10 seconds")
time.sleep(10)
# The left arm is now compliant, so it can be moved
left_arm.set_compliance(compliance=True)
# Choose right arm target
if activate_follow:
try:
while True:
print("Waiting %s seconds" % waiting_time)
time.sleep(waiting_time)
left_arm.sync_current_joints()
print(left_arm.forward_kinematic())
except KeyboardInterrupt:
# Plot result
if plot:
torso.plot_meta_model()
if interface_type == "robot":
torso.pypot_object.close()
|
<commit_before><commit_msg>Add helper script for hand_follow<commit_after>import poppy_inverse_kinematics.creature as model_creature
import numpy as np
import poppy_inverse_kinematics.meta_creature as meta_creature
import time
# parameters
activate_follow = True
interface_type = "vrep"
plot = True
waiting_time = 5
# Create creatures
right_arm = model_creature.creature("torso_right_arm")
left_arm = model_creature.creature("torso_left_arm")
torso = meta_creature.MetaCreature(interface_type=interface_type, creature_type="torso")
torso.add_model(right_arm)
torso.add_model(left_arm)
# Init pypot robot
if interface_type == "robot":
print("Initializing robot")
for m in torso.pypot_object.motors:
m.compliant = False
m.goal_position = 0
print("Waiting 10 seconds")
time.sleep(10)
# The left arm is now compliant, so it can be moved
left_arm.set_compliance(compliance=True)
# Choose right arm target
if activate_follow:
try:
while True:
print("Waiting %s seconds" % waiting_time)
time.sleep(waiting_time)
left_arm.sync_current_joints()
print(left_arm.forward_kinematic())
except KeyboardInterrupt:
# Plot result
if plot:
torso.plot_meta_model()
if interface_type == "robot":
torso.pypot_object.close()
|
|
a078868a80c8cc48740c8caea0fb6f4906e490a5
|
test/downhill_test.py
|
test/downhill_test.py
|
import downhill
import numpy as np
import theano
class TestMinimize:
def test_minimize(self):
x = theano.shared(-3 + np.zeros((2, ), 'f'), name='x')
data = downhill.Dataset(np.zeros((1, 1)), batch_size=1)
data._batches = [[]]
downhill.minimize(
(100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
data,
algo='nag',
learning_rate=0.001,
momentum=0.9,
patience=1,
min_improvement=0.1,
max_gradient_norm=1,
)
assert np.allclose(x.get_value(), [1, 1]), x.get_value()
|
Add a test for global minimize wrapper.
|
Add a test for global minimize wrapper.
|
Python
|
mit
|
lmjohns3/downhill,rodrigob/downhill
|
Add a test for global minimize wrapper.
|
import downhill
import numpy as np
import theano
class TestMinimize:
def test_minimize(self):
x = theano.shared(-3 + np.zeros((2, ), 'f'), name='x')
data = downhill.Dataset(np.zeros((1, 1)), batch_size=1)
data._batches = [[]]
downhill.minimize(
(100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
data,
algo='nag',
learning_rate=0.001,
momentum=0.9,
patience=1,
min_improvement=0.1,
max_gradient_norm=1,
)
assert np.allclose(x.get_value(), [1, 1]), x.get_value()
|
<commit_before><commit_msg>Add a test for global minimize wrapper.<commit_after>
|
import downhill
import numpy as np
import theano
class TestMinimize:
def test_minimize(self):
x = theano.shared(-3 + np.zeros((2, ), 'f'), name='x')
data = downhill.Dataset(np.zeros((1, 1)), batch_size=1)
data._batches = [[]]
downhill.minimize(
(100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
data,
algo='nag',
learning_rate=0.001,
momentum=0.9,
patience=1,
min_improvement=0.1,
max_gradient_norm=1,
)
assert np.allclose(x.get_value(), [1, 1]), x.get_value()
|
Add a test for global minimize wrapper.import downhill
import numpy as np
import theano
class TestMinimize:
def test_minimize(self):
x = theano.shared(-3 + np.zeros((2, ), 'f'), name='x')
data = downhill.Dataset(np.zeros((1, 1)), batch_size=1)
data._batches = [[]]
downhill.minimize(
(100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
data,
algo='nag',
learning_rate=0.001,
momentum=0.9,
patience=1,
min_improvement=0.1,
max_gradient_norm=1,
)
assert np.allclose(x.get_value(), [1, 1]), x.get_value()
|
<commit_before><commit_msg>Add a test for global minimize wrapper.<commit_after>import downhill
import numpy as np
import theano
class TestMinimize:
def test_minimize(self):
x = theano.shared(-3 + np.zeros((2, ), 'f'), name='x')
data = downhill.Dataset(np.zeros((1, 1)), batch_size=1)
data._batches = [[]]
downhill.minimize(
(100 * (x[1:] - x[:-1] ** 2) ** 2 + (1 - x[:-1]) ** 2).sum(),
data,
algo='nag',
learning_rate=0.001,
momentum=0.9,
patience=1,
min_improvement=0.1,
max_gradient_norm=1,
)
assert np.allclose(x.get_value(), [1, 1]), x.get_value()
|
|
484ca8e8e8ab53b0fe62062758a3f74d4194d3c0
|
examples/compass_high_angle_hist.py
|
examples/compass_high_angle_hist.py
|
#!/usr/bin/env python
"""
Prints a histogram of inclinations to show what proportion are high-angle.
usage: compass_high_angle_hist.py DATFILE...
"""
import sys
from davies import compass
def compass_stats(datfiles, bin_size=5, display_scale=3):
histogram = [0 for bin in range(0, 91, bin_size)]
for datfile in datfiles:
for survey in compass.CompassDatParser(datfile).parse():
for shot in survey:
histogram[int(abs(shot.inc) // bin_size)] += 1
n = sum(histogram)
high_n = sum(histogram[60/5:-1])
high_n_percent = high_n / float(n) * 100.0
for bin, count in enumerate(histogram):
percent = count / float(n) * 100.0
print '%02d\t%4d\t%5.1f%%\t%s' % (bin * bin_size, count, percent, '#' * int(round(percent * display_scale)))
print '\t%d\t100.0%%' % n
print 'Summary: %d (%0.1f%%) shots are high-angle 60-deg or greater' % (high_n, high_n_percent)
if __name__ == '__main__':
if len(sys.argv) == 1:
print >> sys.stderr, "usage: compass_high_angle_hist.py DATFILE..."
sys.exit(2)
compass_stats(sys.argv[1:])
|
Add example script that prints a histogram of inclinations
|
Add example script that prints a histogram of inclinations
|
Python
|
mit
|
riggsd/davies
|
Add example script that prints a histogram of inclinations
|
#!/usr/bin/env python
"""
Prints a histogram of inclinations to show what proportion are high-angle.
usage: compass_high_angle_hist.py DATFILE...
"""
import sys
from davies import compass
def compass_stats(datfiles, bin_size=5, display_scale=3):
histogram = [0 for bin in range(0, 91, bin_size)]
for datfile in datfiles:
for survey in compass.CompassDatParser(datfile).parse():
for shot in survey:
histogram[int(abs(shot.inc) // bin_size)] += 1
n = sum(histogram)
high_n = sum(histogram[60/5:-1])
high_n_percent = high_n / float(n) * 100.0
for bin, count in enumerate(histogram):
percent = count / float(n) * 100.0
print '%02d\t%4d\t%5.1f%%\t%s' % (bin * bin_size, count, percent, '#' * int(round(percent * display_scale)))
print '\t%d\t100.0%%' % n
print 'Summary: %d (%0.1f%%) shots are high-angle 60-deg or greater' % (high_n, high_n_percent)
if __name__ == '__main__':
if len(sys.argv) == 1:
print >> sys.stderr, "usage: compass_high_angle_hist.py DATFILE..."
sys.exit(2)
compass_stats(sys.argv[1:])
|
<commit_before><commit_msg>Add example script that prints a histogram of inclinations<commit_after>
|
#!/usr/bin/env python
"""
Prints a histogram of inclinations to show what proportion are high-angle.
usage: compass_high_angle_hist.py DATFILE...
"""
import sys
from davies import compass
def compass_stats(datfiles, bin_size=5, display_scale=3):
histogram = [0 for bin in range(0, 91, bin_size)]
for datfile in datfiles:
for survey in compass.CompassDatParser(datfile).parse():
for shot in survey:
histogram[int(abs(shot.inc) // bin_size)] += 1
n = sum(histogram)
high_n = sum(histogram[60/5:-1])
high_n_percent = high_n / float(n) * 100.0
for bin, count in enumerate(histogram):
percent = count / float(n) * 100.0
print '%02d\t%4d\t%5.1f%%\t%s' % (bin * bin_size, count, percent, '#' * int(round(percent * display_scale)))
print '\t%d\t100.0%%' % n
print 'Summary: %d (%0.1f%%) shots are high-angle 60-deg or greater' % (high_n, high_n_percent)
if __name__ == '__main__':
if len(sys.argv) == 1:
print >> sys.stderr, "usage: compass_high_angle_hist.py DATFILE..."
sys.exit(2)
compass_stats(sys.argv[1:])
|
Add example script that prints a histogram of inclinations#!/usr/bin/env python
"""
Prints a histogram of inclinations to show what proportion are high-angle.
usage: compass_high_angle_hist.py DATFILE...
"""
import sys
from davies import compass
def compass_stats(datfiles, bin_size=5, display_scale=3):
histogram = [0 for bin in range(0, 91, bin_size)]
for datfile in datfiles:
for survey in compass.CompassDatParser(datfile).parse():
for shot in survey:
histogram[int(abs(shot.inc) // bin_size)] += 1
n = sum(histogram)
high_n = sum(histogram[60/5:-1])
high_n_percent = high_n / float(n) * 100.0
for bin, count in enumerate(histogram):
percent = count / float(n) * 100.0
print '%02d\t%4d\t%5.1f%%\t%s' % (bin * bin_size, count, percent, '#' * int(round(percent * display_scale)))
print '\t%d\t100.0%%' % n
print 'Summary: %d (%0.1f%%) shots are high-angle 60-deg or greater' % (high_n, high_n_percent)
if __name__ == '__main__':
if len(sys.argv) == 1:
print >> sys.stderr, "usage: compass_high_angle_hist.py DATFILE..."
sys.exit(2)
compass_stats(sys.argv[1:])
|
<commit_before><commit_msg>Add example script that prints a histogram of inclinations<commit_after>#!/usr/bin/env python
"""
Prints a histogram of inclinations to show what proportion are high-angle.
usage: compass_high_angle_hist.py DATFILE...
"""
import sys
from davies import compass
def compass_stats(datfiles, bin_size=5, display_scale=3):
histogram = [0 for bin in range(0, 91, bin_size)]
for datfile in datfiles:
for survey in compass.CompassDatParser(datfile).parse():
for shot in survey:
histogram[int(abs(shot.inc) // bin_size)] += 1
n = sum(histogram)
high_n = sum(histogram[60/5:-1])
high_n_percent = high_n / float(n) * 100.0
for bin, count in enumerate(histogram):
percent = count / float(n) * 100.0
print '%02d\t%4d\t%5.1f%%\t%s' % (bin * bin_size, count, percent, '#' * int(round(percent * display_scale)))
print '\t%d\t100.0%%' % n
print 'Summary: %d (%0.1f%%) shots are high-angle 60-deg or greater' % (high_n, high_n_percent)
if __name__ == '__main__':
if len(sys.argv) == 1:
print >> sys.stderr, "usage: compass_high_angle_hist.py DATFILE..."
sys.exit(2)
compass_stats(sys.argv[1:])
|
|
a5fea9b538a309422f7503ff689f0dde932585f6
|
samples/migrations/0013_auto_20170526_1718.py
|
samples/migrations/0013_auto_20170526_1718.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 20:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0012_auto_20170512_1138'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='collectedsample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType', verbose_name='Método de coleta'),
),
]
|
Add migration file for samples app
|
:memo: Add migration file for samples app
|
Python
|
mit
|
gems-uff/labsys,gems-uff/labsys,gems-uff/labsys
|
:memo: Add migration file for samples app
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 20:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0012_auto_20170512_1138'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='collectedsample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType', verbose_name='Método de coleta'),
),
]
|
<commit_before><commit_msg>:memo: Add migration file for samples app<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 20:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0012_auto_20170512_1138'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='collectedsample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType', verbose_name='Método de coleta'),
),
]
|
:memo: Add migration file for samples app# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 20:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0012_auto_20170512_1138'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='collectedsample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType', verbose_name='Método de coleta'),
),
]
|
<commit_before><commit_msg>:memo: Add migration file for samples app<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 20:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0012_auto_20170512_1138'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='collectedsample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType', verbose_name='Método de coleta'),
),
]
|
|
3e25b09882fa57f7063cecd2e223741132f83854
|
muspelheim/src/test/python/newlinejson.py
|
muspelheim/src/test/python/newlinejson.py
|
#!/usr/bin/env python
import json
import sys
def newline_json(in_file, out_file):
for line in json.load(in_file):
json.dump(line, out_file)
out_file.write('\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: python newlinejson.py [path to ordinary json file]"
sys.exit(1)
f = open(sys.argv[1], 'r')
try:
newline_json(f, sys.stdout)
finally:
f.close()
|
Add script to convert JSON arrays to newline separated JSON
|
Add script to convert JSON arrays to newline separated JSON
[Finished #37633897]
|
Python
|
agpl-3.0
|
precog/platform,precog/platform,precog/platform,precog/platform
|
Add script to convert JSON arrays to newline separated JSON
[Finished #37633897]
|
#!/usr/bin/env python
import json
import sys
def newline_json(in_file, out_file):
for line in json.load(in_file):
json.dump(line, out_file)
out_file.write('\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: python newlinejson.py [path to ordinary json file]"
sys.exit(1)
f = open(sys.argv[1], 'r')
try:
newline_json(f, sys.stdout)
finally:
f.close()
|
<commit_before><commit_msg>Add script to convert JSON arrays to newline separated JSON
[Finished #37633897]<commit_after>
|
#!/usr/bin/env python
import json
import sys
def newline_json(in_file, out_file):
for line in json.load(in_file):
json.dump(line, out_file)
out_file.write('\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: python newlinejson.py [path to ordinary json file]"
sys.exit(1)
f = open(sys.argv[1], 'r')
try:
newline_json(f, sys.stdout)
finally:
f.close()
|
Add script to convert JSON arrays to newline separated JSON
[Finished #37633897]#!/usr/bin/env python
import json
import sys
def newline_json(in_file, out_file):
for line in json.load(in_file):
json.dump(line, out_file)
out_file.write('\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: python newlinejson.py [path to ordinary json file]"
sys.exit(1)
f = open(sys.argv[1], 'r')
try:
newline_json(f, sys.stdout)
finally:
f.close()
|
<commit_before><commit_msg>Add script to convert JSON arrays to newline separated JSON
[Finished #37633897]<commit_after>#!/usr/bin/env python
import json
import sys
def newline_json(in_file, out_file):
for line in json.load(in_file):
json.dump(line, out_file)
out_file.write('\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: python newlinejson.py [path to ordinary json file]"
sys.exit(1)
f = open(sys.argv[1], 'r')
try:
newline_json(f, sys.stdout)
finally:
f.close()
|
|
a64b5896b6bd8da36b3e2d2d5bd8b48c4c355935
|
poyo/__init__.py
|
poyo/__init__.py
|
# -*- coding: utf-8 -*-
__author__ = 'Raphael Pierzina'
__email__ = 'raphael@hackebrot.de'
__version__ = '0.1.0'
from .parser import parse_string
__all__ = ['parse_string']
|
Create an init file with meta information
|
Create an init file with meta information
|
Python
|
mit
|
hackebrot/poyo
|
Create an init file with meta information
|
# -*- coding: utf-8 -*-
__author__ = 'Raphael Pierzina'
__email__ = 'raphael@hackebrot.de'
__version__ = '0.1.0'
from .parser import parse_string
__all__ = ['parse_string']
|
<commit_before><commit_msg>Create an init file with meta information<commit_after>
|
# -*- coding: utf-8 -*-
__author__ = 'Raphael Pierzina'
__email__ = 'raphael@hackebrot.de'
__version__ = '0.1.0'
from .parser import parse_string
__all__ = ['parse_string']
|
Create an init file with meta information# -*- coding: utf-8 -*-
__author__ = 'Raphael Pierzina'
__email__ = 'raphael@hackebrot.de'
__version__ = '0.1.0'
from .parser import parse_string
__all__ = ['parse_string']
|
<commit_before><commit_msg>Create an init file with meta information<commit_after># -*- coding: utf-8 -*-
__author__ = 'Raphael Pierzina'
__email__ = 'raphael@hackebrot.de'
__version__ = '0.1.0'
from .parser import parse_string
__all__ = ['parse_string']
|
|
de026ce0b3070c7f17f37c4a1db039b834dc1ac8
|
commitlog_archiving_test.py
|
commitlog_archiving_test.py
|
from dtest import Tester, debug
from pytools import since, replace_in_file
import tempfile, shutil, glob, os, time
import distutils.dir_util
class CommitLogArchivingTest(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
def insert_rows(self, cursor, start, end):
for r in range(start, end):
cursor.execute("INSERT INTO ks.cf (key, val) VALUES ({r}, 'asdf');".format(r=r))
@since('2.0')
def test_active_commitlog_segments_archived_at_startup(self):
"""Test archive commit log segments are automatically archived at node startup"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = tempfile.mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(cursor, 0, 30000)
node1.stop()
# assert that we didn't yet archive the active commitlog segments
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
active_segments = set(os.listdir(commitlog_dir))
self.assertFalse(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
# start up the node & verify that the active commitlog segments were archived
node1.start()
self.assertTrue(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
|
Add test for commitlog archiving
|
Add test for commitlog archiving
|
Python
|
apache-2.0
|
pcmanus/cassandra-dtest,spodkowinski/cassandra-dtest,bdeggleston/cassandra-dtest,krummas/cassandra-dtest,yukim/cassandra-dtest,aweisberg/cassandra-dtest,beobal/cassandra-dtest,mambocab/cassandra-dtest,mambocab/cassandra-dtest,blerer/cassandra-dtest,bdeggleston/cassandra-dtest,carlyeks/cassandra-dtest,aweisberg/cassandra-dtest,stef1927/cassandra-dtest,thobbs/cassandra-dtest,beobal/cassandra-dtest,blerer/cassandra-dtest,riptano/cassandra-dtest,snazy/cassandra-dtest,riptano/cassandra-dtest,iamaleksey/cassandra-dtest,thobbs/cassandra-dtest,tjake/cassandra-dtest,spodkowinski/cassandra-dtest,snazy/cassandra-dtest,krummas/cassandra-dtest,pauloricardomg/cassandra-dtest,stef1927/cassandra-dtest,pauloricardomg/cassandra-dtest,iamaleksey/cassandra-dtest,carlyeks/cassandra-dtest
|
Add test for commitlog archiving
|
from dtest import Tester, debug
from pytools import since, replace_in_file
import tempfile, shutil, glob, os, time
import distutils.dir_util
class CommitLogArchivingTest(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
def insert_rows(self, cursor, start, end):
for r in range(start, end):
cursor.execute("INSERT INTO ks.cf (key, val) VALUES ({r}, 'asdf');".format(r=r))
@since('2.0')
def test_active_commitlog_segments_archived_at_startup(self):
"""Test archive commit log segments are automatically archived at node startup"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = tempfile.mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(cursor, 0, 30000)
node1.stop()
# assert that we didn't yet archive the active commitlog segments
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
active_segments = set(os.listdir(commitlog_dir))
self.assertFalse(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
# start up the node & verify that the active commitlog segments were archived
node1.start()
self.assertTrue(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
|
<commit_before><commit_msg>Add test for commitlog archiving<commit_after>
|
from dtest import Tester, debug
from pytools import since, replace_in_file
import tempfile, shutil, glob, os, time
import distutils.dir_util
class CommitLogArchivingTest(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
def insert_rows(self, cursor, start, end):
for r in range(start, end):
cursor.execute("INSERT INTO ks.cf (key, val) VALUES ({r}, 'asdf');".format(r=r))
@since('2.0')
def test_active_commitlog_segments_archived_at_startup(self):
"""Test archive commit log segments are automatically archived at node startup"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = tempfile.mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(cursor, 0, 30000)
node1.stop()
# assert that we didn't yet archive the active commitlog segments
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
active_segments = set(os.listdir(commitlog_dir))
self.assertFalse(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
# start up the node & verify that the active commitlog segments were archived
node1.start()
self.assertTrue(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
|
Add test for commitlog archivingfrom dtest import Tester, debug
from pytools import since, replace_in_file
import tempfile, shutil, glob, os, time
import distutils.dir_util
class CommitLogArchivingTest(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
def insert_rows(self, cursor, start, end):
for r in range(start, end):
cursor.execute("INSERT INTO ks.cf (key, val) VALUES ({r}, 'asdf');".format(r=r))
@since('2.0')
def test_active_commitlog_segments_archived_at_startup(self):
"""Test archive commit log segments are automatically archived at node startup"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = tempfile.mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(cursor, 0, 30000)
node1.stop()
# assert that we didn't yet archive the active commitlog segments
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
active_segments = set(os.listdir(commitlog_dir))
self.assertFalse(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
# start up the node & verify that the active commitlog segments were archived
node1.start()
self.assertTrue(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
|
<commit_before><commit_msg>Add test for commitlog archiving<commit_after>from dtest import Tester, debug
from pytools import since, replace_in_file
import tempfile, shutil, glob, os, time
import distutils.dir_util
class CommitLogArchivingTest(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
def insert_rows(self, cursor, start, end):
for r in range(start, end):
cursor.execute("INSERT INTO ks.cf (key, val) VALUES ({r}, 'asdf');".format(r=r))
@since('2.0')
def test_active_commitlog_segments_archived_at_startup(self):
"""Test archive commit log segments are automatically archived at node startup"""
cluster = self.cluster
cluster.populate(1)
(node1,) = cluster.nodelist()
# Create a temp directory for storing commitlog archives:
tmp_commitlog = tempfile.mkdtemp()
debug("tmp_commitlog: " + tmp_commitlog)
# Edit commitlog_archiving.properties and set an archive
# command:
replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'),
[(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format(
tmp_commitlog=tmp_commitlog))])
cluster.start()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
debug("Writing 30,000 rows...")
self.insert_rows(cursor, 0, 30000)
node1.stop()
# assert that we didn't yet archive the active commitlog segments
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
active_segments = set(os.listdir(commitlog_dir))
self.assertFalse(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
# start up the node & verify that the active commitlog segments were archived
node1.start()
self.assertTrue(set(os.listdir(tmp_commitlog)).issuperset(active_segments))
|
|
141d3f90dd52f89eb5846618b067c22db83a64a1
|
api/management/commands/verifyuser.py
|
api/management/commands/verifyuser.py
|
from django.core.management.base import BaseCommand, CommandError
from api.models import Account
import fileinput
import sys
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
if len(args) < 1:
print 'Specify account'
return
account_id = int(args[0])
account = Account.objects.get(pk=account_id)
print "Set verification level for the account " + account.nickname + " (user is " + account.owner.username + ")"
while True:
print 'Specify verification level (Gold = 2, Silver = 1, Bronze = 3): '
line = sys.stdin.readline()
try:
choice = int(line)
except ValueError:
continue
if choice != 1 and choice != 2 and choice != 3:
continue
else:
account.verified = choice
account.save()
break
return
|
Add a command to verify users
|
Add a command to verify users
|
Python
|
apache-2.0
|
SchoolIdolTomodachi/SchoolIdolAPI,rdsathene/SchoolIdolAPI,laurenor/SchoolIdolAPI,dburr/SchoolIdolAPI,laurenor/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,dburr/SchoolIdolAPI,rdsathene/SchoolIdolAPI,dburr/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,rdsathene/SchoolIdolAPI,laurenor/SchoolIdolAPI
|
Add a command to verify users
|
from django.core.management.base import BaseCommand, CommandError
from api.models import Account
import fileinput
import sys
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
if len(args) < 1:
print 'Specify account'
return
account_id = int(args[0])
account = Account.objects.get(pk=account_id)
print "Set verification level for the account " + account.nickname + " (user is " + account.owner.username + ")"
while True:
print 'Specify verification level (Gold = 2, Silver = 1, Bronze = 3): '
line = sys.stdin.readline()
try:
choice = int(line)
except ValueError:
continue
if choice != 1 and choice != 2 and choice != 3:
continue
else:
account.verified = choice
account.save()
break
return
|
<commit_before><commit_msg>Add a command to verify users<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from api.models import Account
import fileinput
import sys
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
if len(args) < 1:
print 'Specify account'
return
account_id = int(args[0])
account = Account.objects.get(pk=account_id)
print "Set verification level for the account " + account.nickname + " (user is " + account.owner.username + ")"
while True:
print 'Specify verification level (Gold = 2, Silver = 1, Bronze = 3): '
line = sys.stdin.readline()
try:
choice = int(line)
except ValueError:
continue
if choice != 1 and choice != 2 and choice != 3:
continue
else:
account.verified = choice
account.save()
break
return
|
Add a command to verify usersfrom django.core.management.base import BaseCommand, CommandError
from api.models import Account
import fileinput
import sys
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
if len(args) < 1:
print 'Specify account'
return
account_id = int(args[0])
account = Account.objects.get(pk=account_id)
print "Set verification level for the account " + account.nickname + " (user is " + account.owner.username + ")"
while True:
print 'Specify verification level (Gold = 2, Silver = 1, Bronze = 3): '
line = sys.stdin.readline()
try:
choice = int(line)
except ValueError:
continue
if choice != 1 and choice != 2 and choice != 3:
continue
else:
account.verified = choice
account.save()
break
return
|
<commit_before><commit_msg>Add a command to verify users<commit_after>from django.core.management.base import BaseCommand, CommandError
from api.models import Account
import fileinput
import sys
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
if len(args) < 1:
print 'Specify account'
return
account_id = int(args[0])
account = Account.objects.get(pk=account_id)
print "Set verification level for the account " + account.nickname + " (user is " + account.owner.username + ")"
while True:
print 'Specify verification level (Gold = 2, Silver = 1, Bronze = 3): '
line = sys.stdin.readline()
try:
choice = int(line)
except ValueError:
continue
if choice != 1 and choice != 2 and choice != 3:
continue
else:
account.verified = choice
account.save()
break
return
|
|
22924945638989ebf620c262ae8de37a8b4508c6
|
mistral/api/wsgi.py
|
mistral/api/wsgi.py
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.api import app
from mistral import config
from mistral.engine1 import rpc
config.parse_args()
transport = rpc.get_transport()
application = app.setup_app(transport=transport)
|
Add WSGI script for API server
|
Add WSGI script for API server
Include a WSGI script for the API server so it can be used with Apache
or Nginx deployment.
Change-Id: I16166f78b5975dad0480a7ee4ccde47f234f5802
Implements: blueprint mistral-api-wsgi-script
|
Python
|
apache-2.0
|
StackStorm/mistral,dennybaa/mistral,dennybaa/mistral,openstack/mistral,StackStorm/mistral,openstack/mistral
|
Add WSGI script for API server
Include a WSGI script for the API server so it can be used with Apache
or Nginx deployment.
Change-Id: I16166f78b5975dad0480a7ee4ccde47f234f5802
Implements: blueprint mistral-api-wsgi-script
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.api import app
from mistral import config
from mistral.engine1 import rpc
config.parse_args()
transport = rpc.get_transport()
application = app.setup_app(transport=transport)
|
<commit_before><commit_msg>Add WSGI script for API server
Include a WSGI script for the API server so it can be used with Apache
or Nginx deployment.
Change-Id: I16166f78b5975dad0480a7ee4ccde47f234f5802
Implements: blueprint mistral-api-wsgi-script<commit_after>
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.api import app
from mistral import config
from mistral.engine1 import rpc
config.parse_args()
transport = rpc.get_transport()
application = app.setup_app(transport=transport)
|
Add WSGI script for API server
Include a WSGI script for the API server so it can be used with Apache
or Nginx deployment.
Change-Id: I16166f78b5975dad0480a7ee4ccde47f234f5802
Implements: blueprint mistral-api-wsgi-script# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.api import app
from mistral import config
from mistral.engine1 import rpc
config.parse_args()
transport = rpc.get_transport()
application = app.setup_app(transport=transport)
|
<commit_before><commit_msg>Add WSGI script for API server
Include a WSGI script for the API server so it can be used with Apache
or Nginx deployment.
Change-Id: I16166f78b5975dad0480a7ee4ccde47f234f5802
Implements: blueprint mistral-api-wsgi-script<commit_after># Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.api import app
from mistral import config
from mistral.engine1 import rpc
config.parse_args()
transport = rpc.get_transport()
application = app.setup_app(transport=transport)
|
|
9b5dbc51c9d20fdbdaf63aa4cce0ac683c22013c
|
contrib/filter_redirects.py
|
contrib/filter_redirects.py
|
"""
A simple script to filter the output of the redirects spreadsheet we made into
a CSV suitable for loading into the redirects app with
python manage.py import_redirects_csv ...
Usage:
python filter_redirects.py <spreadsheet.csv >redirects.csv
"""
from __future__ import absolute_import, print_function
import csv
import sys
ARCHIVE_ROOT = 'http://webarchive.okfn.org/okfn.org/201404'
def main():
reader = csv.DictReader(sys.stdin,
['from',
'action',
'to'])
# Discard header
next(reader)
print('from,to')
for row in reader:
if row['action'] == 'keep':
continue
elif row['action'] == 'redirect':
if not row['to'].strip():
raise RuntimeError('to cannot be blank if action=redirect')
print('%s,%s' % (_from(row['from']), row['to']))
elif row['action'] == 'redirect to archive':
print('%s,%s' % (_from(row['from']), ARCHIVE_ROOT + row['from']))
elif row['action'] == 'gone':
print('%s,' % _from(row['from']))
else:
raise RuntimeError('unrecognised action: %s' % row['action'])
def _from(url):
if url.endswith('/'):
return url
else:
return url + '/'
if __name__ == '__main__':
main()
|
Add script for munging redirects
|
Add script for munging redirects
|
Python
|
mit
|
okfn/website,okfn/foundation,MjAbuz/foundation,okfn/website,okfn/foundation,okfn/foundation,MjAbuz/foundation,okfn/website,MjAbuz/foundation,okfn/website,okfn/foundation,MjAbuz/foundation
|
Add script for munging redirects
|
"""
A simple script to filter the output of the redirects spreadsheet we made into
a CSV suitable for loading into the redirects app with
python manage.py import_redirects_csv ...
Usage:
python filter_redirects.py <spreadsheet.csv >redirects.csv
"""
from __future__ import absolute_import, print_function
import csv
import sys
ARCHIVE_ROOT = 'http://webarchive.okfn.org/okfn.org/201404'
def main():
reader = csv.DictReader(sys.stdin,
['from',
'action',
'to'])
# Discard header
next(reader)
print('from,to')
for row in reader:
if row['action'] == 'keep':
continue
elif row['action'] == 'redirect':
if not row['to'].strip():
raise RuntimeError('to cannot be blank if action=redirect')
print('%s,%s' % (_from(row['from']), row['to']))
elif row['action'] == 'redirect to archive':
print('%s,%s' % (_from(row['from']), ARCHIVE_ROOT + row['from']))
elif row['action'] == 'gone':
print('%s,' % _from(row['from']))
else:
raise RuntimeError('unrecognised action: %s' % row['action'])
def _from(url):
if url.endswith('/'):
return url
else:
return url + '/'
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for munging redirects<commit_after>
|
"""
A simple script to filter the output of the redirects spreadsheet we made into
a CSV suitable for loading into the redirects app with
python manage.py import_redirects_csv ...
Usage:
python filter_redirects.py <spreadsheet.csv >redirects.csv
"""
from __future__ import absolute_import, print_function
import csv
import sys
ARCHIVE_ROOT = 'http://webarchive.okfn.org/okfn.org/201404'
def main():
reader = csv.DictReader(sys.stdin,
['from',
'action',
'to'])
# Discard header
next(reader)
print('from,to')
for row in reader:
if row['action'] == 'keep':
continue
elif row['action'] == 'redirect':
if not row['to'].strip():
raise RuntimeError('to cannot be blank if action=redirect')
print('%s,%s' % (_from(row['from']), row['to']))
elif row['action'] == 'redirect to archive':
print('%s,%s' % (_from(row['from']), ARCHIVE_ROOT + row['from']))
elif row['action'] == 'gone':
print('%s,' % _from(row['from']))
else:
raise RuntimeError('unrecognised action: %s' % row['action'])
def _from(url):
if url.endswith('/'):
return url
else:
return url + '/'
if __name__ == '__main__':
main()
|
Add script for munging redirects"""
A simple script to filter the output of the redirects spreadsheet we made into
a CSV suitable for loading into the redirects app with
python manage.py import_redirects_csv ...
Usage:
python filter_redirects.py <spreadsheet.csv >redirects.csv
"""
from __future__ import absolute_import, print_function
import csv
import sys
ARCHIVE_ROOT = 'http://webarchive.okfn.org/okfn.org/201404'
def main():
reader = csv.DictReader(sys.stdin,
['from',
'action',
'to'])
# Discard header
next(reader)
print('from,to')
for row in reader:
if row['action'] == 'keep':
continue
elif row['action'] == 'redirect':
if not row['to'].strip():
raise RuntimeError('to cannot be blank if action=redirect')
print('%s,%s' % (_from(row['from']), row['to']))
elif row['action'] == 'redirect to archive':
print('%s,%s' % (_from(row['from']), ARCHIVE_ROOT + row['from']))
elif row['action'] == 'gone':
print('%s,' % _from(row['from']))
else:
raise RuntimeError('unrecognised action: %s' % row['action'])
def _from(url):
if url.endswith('/'):
return url
else:
return url + '/'
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for munging redirects<commit_after>"""
A simple script to filter the output of the redirects spreadsheet we made into
a CSV suitable for loading into the redirects app with
python manage.py import_redirects_csv ...
Usage:
python filter_redirects.py <spreadsheet.csv >redirects.csv
"""
from __future__ import absolute_import, print_function
import csv
import sys
ARCHIVE_ROOT = 'http://webarchive.okfn.org/okfn.org/201404'
def main():
reader = csv.DictReader(sys.stdin,
['from',
'action',
'to'])
# Discard header
next(reader)
print('from,to')
for row in reader:
if row['action'] == 'keep':
continue
elif row['action'] == 'redirect':
if not row['to'].strip():
raise RuntimeError('to cannot be blank if action=redirect')
print('%s,%s' % (_from(row['from']), row['to']))
elif row['action'] == 'redirect to archive':
print('%s,%s' % (_from(row['from']), ARCHIVE_ROOT + row['from']))
elif row['action'] == 'gone':
print('%s,' % _from(row['from']))
else:
raise RuntimeError('unrecognised action: %s' % row['action'])
def _from(url):
if url.endswith('/'):
return url
else:
return url + '/'
if __name__ == '__main__':
main()
|
|
9835b069ea931cbadae92a9e10bcdff513ae425d
|
readthedocs/core/management/commands/build_files.py
|
readthedocs/core/management/commands/build_files.py
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', False):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
Index only latest by default.
|
Index only latest by default.
|
Python
|
mit
|
singingwolfboy/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,GovReady/readthedocs.org,CedarLogic/readthedocs.org,kenshinthebattosai/readthedocs.org,istresearch/readthedocs.org,laplaceliu/readthedocs.org,rtfd/readthedocs.org,kenshinthebattosai/readthedocs.org,fujita-shintaro/readthedocs.org,jerel/readthedocs.org,wanghaven/readthedocs.org,dirn/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,mhils/readthedocs.org,tddv/readthedocs.org,GovReady/readthedocs.org,tddv/readthedocs.org,mhils/readthedocs.org,soulshake/readthedocs.org,stevepiercy/readthedocs.org,sils1297/readthedocs.org,Carreau/readthedocs.org,kenwang76/readthedocs.org,clarkperkins/readthedocs.org,royalwang/readthedocs.org,laplaceliu/readthedocs.org,stevepiercy/readthedocs.org,sunnyzwh/readthedocs.org,atsuyim/readthedocs.org,takluyver/readthedocs.org,espdev/readthedocs.org,KamranMackey/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,SteveViss/readthedocs.org,hach-que/readthedocs.org,dirn/readthedocs.org,VishvajitP/readthedocs.org,wanghaven/readthedocs.org,KamranMackey/readthedocs.org,LukasBoersma/readthedocs.org,michaelmcandrew/readthedocs.org,takluyver/readthedocs.org,CedarLogic/readthedocs.org,espdev/readthedocs.org,nikolas/readthedocs.org,gjtorikian/readthedocs.org,jerel/readthedocs.org,kdkeyser/readthedocs.org,hach-que/readthedocs.org,stevepiercy/readthedocs.org,safwanrahman/readthedocs.org,royalwang/readthedocs.org,safwanrahman/readthedocs.org,singingwolfboy/readthedocs.org,kdkeyser/readthedocs.org,michaelmcandrew/readthedocs.org,titiushko/readthedocs.org,davidfischer/readthedocs.org,agjohnson/readthedocs.org,raven47git/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,LukasBoersma/readthedocs.org,davidfischer/readthedocs.org,Tazer/readthedocs.org,ojii/readthedocs.org,atsuyim/readthedocs.org,cgourlay/readthedocs.org,sunnyzwh/readthedocs.org,soulshake/readthedocs.org,rtfd/readthedocs.org,istresearch/readthedocs.org,asampat3090/readthedocs.org,michaelmcandrew/readthedocs.org,espdev/readthedocs.org,kenshinthebattosai/readthedocs.org,cgourlay/readthedocs.org,d0ugal/readthedocs.org,jerel/readthedocs.org,nikolas/readthedocs.org,royalwang/readthedocs.org,dirn/readthedocs.org,mhils/readthedocs.org,asampat3090/readthedocs.org,soulshake/readthedocs.org,takluyver/readthedocs.org,sunnyzwh/readthedocs.org,clarkperkins/readthedocs.org,raven47git/readthedocs.org,wijerasa/readthedocs.org,d0ugal/readthedocs.org,SteveViss/readthedocs.org,davidfischer/readthedocs.org,pombredanne/readthedocs.org,cgourlay/readthedocs.org,techtonik/readthedocs.org,mrshoki/readthedocs.org,espdev/readthedocs.org,techtonik/readthedocs.org,emawind84/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,nyergler/pythonslides,laplaceliu/readthedocs.org,sid-kap/readthedocs.org,hach-que/readthedocs.org,Tazer/readthedocs.org,wanghaven/readthedocs.org,mrshoki/readthedocs.org,royalwang/readthedocs.org,VishvajitP/readthedocs.org,gjtorikian/readthedocs.org,KamranMackey/readthedocs.org,stevepiercy/readthedocs.org,techtonik/readthedocs.org,VishvajitP/readthedocs.org,fujita-shintaro/readthedocs.org,cgourlay/readthedocs.org,singingwolfboy/readthedocs.org,kdkeyser/readthedocs.org,safwanrahman/readthedocs.org,sils1297/readthedocs.org,LukasBoersma/readthedocs.org,asampat3090/readthedocs.org,mrshoki/readthedocs.org,asampat3090/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,d0ugal/readthedocs.org,nyergler/pythonslides,Tazer/readthedocs.org,agjohnson/readthedocs.org,nyergler/pythonslides,mrshoki/readthedocs.org,emawind84/readthedocs.org,istresearch/readthedocs.org,d0ugal/readthedocs.org,fujita-shintaro/readthedocs.org,agjohnson/readthedocs.org,atsuyim/readthedocs.org,CedarLogic/readthedocs.org,sid-kap/readthedocs.org,agjohnson/readthedocs.org,jerel/readthedocs.org,wijerasa/readthedocs.org,soulshake/readthedocs.org,LukasBoersma/readthedocs.org,raven47git/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,ojii/readthedocs.org,pombredanne/readthedocs.org,tddv/readthedocs.org,rtfd/readthedocs.org,techtonik/readthedocs.org,kdkeyser/readthedocs.org,safwanrahman/readthedocs.org,takluyver/readthedocs.org,sils1297/readthedocs.org,emawind84/readthedocs.org,sunnyzwh/readthedocs.org,Carreau/readthedocs.org,nyergler/pythonslides,dirn/readthedocs.org,sils1297/readthedocs.org,KamranMackey/readthedocs.org,emawind84/readthedocs.org,wijerasa/readthedocs.org,nikolas/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,pombredanne/readthedocs.org,CedarLogic/readthedocs.org,GovReady/readthedocs.org,kenwang76/readthedocs.org,wijerasa/readthedocs.org,Carreau/readthedocs.org,raven47git/readthedocs.org,ojii/readthedocs.org,istresearch/readthedocs.org,clarkperkins/readthedocs.org,clarkperkins/readthedocs.org,davidfischer/readthedocs.org,nikolas/readthedocs.org,sid-kap/readthedocs.org,attakei/readthedocs-oauth,laplaceliu/readthedocs.org,attakei/readthedocs-oauth,GovReady/readthedocs.org,atsuyim/readthedocs.org,singingwolfboy/readthedocs.org,SteveViss/readthedocs.org,Carreau/readthedocs.org,titiushko/readthedocs.org,ojii/readthedocs.org,attakei/readthedocs-oauth,fujita-shintaro/readthedocs.org,titiushko/readthedocs.org,kenwang76/readthedocs.org
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', False):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
Index only latest by default.
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
<commit_before>import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', False):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
<commit_msg>Index only latest by default.<commit_after>
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', False):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
Index only latest by default.import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
<commit_before>import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', False):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
<commit_msg>Index only latest by default.<commit_after>import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import ImportedFile
from builds.models import Version
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''\
Delete and re-create ImportedFile objects for all latest Versions, such
that they can be added to the search index. This is accomplished by walking the
filesystem for each project.
'''
def handle(self, *args, **kwargs):
'''
Build/index all versions or a single project's version
'''
# Delete all existing as a cleanup for any deleted projects.
ImportedFile.objects.all().delete()
if getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.filter(slug='latst')
else:
queryset = Version.objects.public()
for v in queryset:
log.info("Building files for %s" % v)
try:
tasks.fileify(v)
except Exception:
log.error('Build failed for %s' % v, exc_info=True)
|
b6d253aac72abcbee53460b56e76aac7f33dd199
|
scripts/get_bank_registry_at.py
|
scripts/get_bank_registry_at.py
|
import json
import csv
import requests
URL = "https://www.oenb.at/docroot/downloads_observ/sepa-zv-vz_gesamt.csv"
def process():
registry = []
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 6:
count += 1
elif len(row.decode("latin1").split(";")) != 21:
continue
else:
registry.append(
{
"country_code": "AT",
"primary": True,
"bic": row.decode("latin1").split(";")[18].strip().upper(),
"bank_code": row.decode("latin1").split(";")[2].strip(),
"name": row.decode("latin1").split(";")[6].strip(),
"short_name": row.decode("latin1").split(";")[6].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_at.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate AT bank registry
|
Add script to generate AT bank registry
|
Python
|
mit
|
figo-connect/schwifty
|
Add script to generate AT bank registry
|
import json
import csv
import requests
URL = "https://www.oenb.at/docroot/downloads_observ/sepa-zv-vz_gesamt.csv"
def process():
registry = []
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 6:
count += 1
elif len(row.decode("latin1").split(";")) != 21:
continue
else:
registry.append(
{
"country_code": "AT",
"primary": True,
"bic": row.decode("latin1").split(";")[18].strip().upper(),
"bank_code": row.decode("latin1").split(";")[2].strip(),
"name": row.decode("latin1").split(";")[6].strip(),
"short_name": row.decode("latin1").split(";")[6].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_at.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate AT bank registry<commit_after>
|
import json
import csv
import requests
URL = "https://www.oenb.at/docroot/downloads_observ/sepa-zv-vz_gesamt.csv"
def process():
registry = []
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 6:
count += 1
elif len(row.decode("latin1").split(";")) != 21:
continue
else:
registry.append(
{
"country_code": "AT",
"primary": True,
"bic": row.decode("latin1").split(";")[18].strip().upper(),
"bank_code": row.decode("latin1").split(";")[2].strip(),
"name": row.decode("latin1").split(";")[6].strip(),
"short_name": row.decode("latin1").split(";")[6].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_at.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate AT bank registryimport json
import csv
import requests
URL = "https://www.oenb.at/docroot/downloads_observ/sepa-zv-vz_gesamt.csv"
def process():
registry = []
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 6:
count += 1
elif len(row.decode("latin1").split(";")) != 21:
continue
else:
registry.append(
{
"country_code": "AT",
"primary": True,
"bic": row.decode("latin1").split(";")[18].strip().upper(),
"bank_code": row.decode("latin1").split(";")[2].strip(),
"name": row.decode("latin1").split(";")[6].strip(),
"short_name": row.decode("latin1").split(";")[6].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_at.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate AT bank registry<commit_after>import json
import csv
import requests
URL = "https://www.oenb.at/docroot/downloads_observ/sepa-zv-vz_gesamt.csv"
def process():
registry = []
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 6:
count += 1
elif len(row.decode("latin1").split(";")) != 21:
continue
else:
registry.append(
{
"country_code": "AT",
"primary": True,
"bic": row.decode("latin1").split(";")[18].strip().upper(),
"bank_code": row.decode("latin1").split(";")[2].strip(),
"name": row.decode("latin1").split(";")[6].strip(),
"short_name": row.decode("latin1").split(";")[6].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_at.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
|
e6a8bd61ed82a1de862d8eadfdb31dda5577b1a8
|
cnxepub/tests/scripts/test_collated_single_html.py
|
cnxepub/tests/scripts/test_collated_single_html.py
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import mimetypes
import os.path
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ...html_parsers import HTML_DOCUMENT_NAMESPACES
from ...testing import TEST_DATA_DIR, captured_output
class CollatedSingleHTMLTestCase(unittest.TestCase):
maxDiff = None
@property
def target(self):
from ...scripts.collated_single_html.main import main
return main
@property
def path_to_html(self):
return os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
def test_valid(self):
return_code = self.target([self.path_to_html])
self.assertEqual(return_code, 0)
|
Add a test for the validate-collated script
|
Add a test for the validate-collated script
|
Python
|
agpl-3.0
|
Connexions/cnx-epub,Connexions/cnx-epub,Connexions/cnx-epub
|
Add a test for the validate-collated script
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import mimetypes
import os.path
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ...html_parsers import HTML_DOCUMENT_NAMESPACES
from ...testing import TEST_DATA_DIR, captured_output
class CollatedSingleHTMLTestCase(unittest.TestCase):
maxDiff = None
@property
def target(self):
from ...scripts.collated_single_html.main import main
return main
@property
def path_to_html(self):
return os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
def test_valid(self):
return_code = self.target([self.path_to_html])
self.assertEqual(return_code, 0)
|
<commit_before><commit_msg>Add a test for the validate-collated script<commit_after>
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import mimetypes
import os.path
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ...html_parsers import HTML_DOCUMENT_NAMESPACES
from ...testing import TEST_DATA_DIR, captured_output
class CollatedSingleHTMLTestCase(unittest.TestCase):
maxDiff = None
@property
def target(self):
from ...scripts.collated_single_html.main import main
return main
@property
def path_to_html(self):
return os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
def test_valid(self):
return_code = self.target([self.path_to_html])
self.assertEqual(return_code, 0)
|
Add a test for the validate-collated script# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import mimetypes
import os.path
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ...html_parsers import HTML_DOCUMENT_NAMESPACES
from ...testing import TEST_DATA_DIR, captured_output
class CollatedSingleHTMLTestCase(unittest.TestCase):
maxDiff = None
@property
def target(self):
from ...scripts.collated_single_html.main import main
return main
@property
def path_to_html(self):
return os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
def test_valid(self):
return_code = self.target([self.path_to_html])
self.assertEqual(return_code, 0)
|
<commit_before><commit_msg>Add a test for the validate-collated script<commit_after># -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import mimetypes
import os.path
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ...html_parsers import HTML_DOCUMENT_NAMESPACES
from ...testing import TEST_DATA_DIR, captured_output
class CollatedSingleHTMLTestCase(unittest.TestCase):
maxDiff = None
@property
def target(self):
from ...scripts.collated_single_html.main import main
return main
@property
def path_to_html(self):
return os.path.join(TEST_DATA_DIR, 'desserts-single-page.html')
def test_valid(self):
return_code = self.target([self.path_to_html])
self.assertEqual(return_code, 0)
|
|
aa777fd643f2eccebde94188d6179e820e84cc49
|
test/test_SimExample.py
|
test/test_SimExample.py
|
from os.path import dirname, realpath, sep, pardir
import sys
sys.path.append(dirname(realpath(__file__)) + sep + pardir + sep +
"examples")
import simExample
def test_sim():
simExample.run_simulation()
|
Add a test runner for simExample.
|
Add a test runner for simExample.
|
Python
|
mit
|
Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions
|
Add a test runner for simExample.
|
from os.path import dirname, realpath, sep, pardir
import sys
sys.path.append(dirname(realpath(__file__)) + sep + pardir + sep +
"examples")
import simExample
def test_sim():
simExample.run_simulation()
|
<commit_before><commit_msg>Add a test runner for simExample.<commit_after>
|
from os.path import dirname, realpath, sep, pardir
import sys
sys.path.append(dirname(realpath(__file__)) + sep + pardir + sep +
"examples")
import simExample
def test_sim():
simExample.run_simulation()
|
Add a test runner for simExample.from os.path import dirname, realpath, sep, pardir
import sys
sys.path.append(dirname(realpath(__file__)) + sep + pardir + sep +
"examples")
import simExample
def test_sim():
simExample.run_simulation()
|
<commit_before><commit_msg>Add a test runner for simExample.<commit_after>from os.path import dirname, realpath, sep, pardir
import sys
sys.path.append(dirname(realpath(__file__)) + sep + pardir + sep +
"examples")
import simExample
def test_sim():
simExample.run_simulation()
|
|
f115257cfc51b621554ac1d8de984a329f6a1942
|
tests/test_api_views.py
|
tests/test_api_views.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
Move api tests to test dir
|
Move api tests to test dir
|
Python
|
apache-2.0
|
CenterForOpenScience/scrapi,mehanig/scrapi,mehanig/scrapi,felliott/scrapi,erinspace/scrapi,fabianvf/scrapi,fabianvf/scrapi,felliott/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi
|
Move api tests to test dir
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Move api tests to test dir<commit_after>
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
Move api tests to test dirimport os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Move api tests to test dir<commit_after>import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
|
5a28eca2039bce9b6e8102c9330c3087ece9484a
|
cura/ProfileReader.py
|
cura/ProfileReader.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginObject import PluginObject
## A type of plug-ins that reads profiles from a file.
#
# The profile is then stored as instance container of the type user profile.
class ProfileReader(PluginObject):
def __init__(self):
super().__init__()
## Read profile data from a file and return a filled profile.
#
# \return \type{Profile} The profile that was obtained from the file.
def read(self, file_name):
raise NotImplementedError("Profile reader plug-in was not correctly implemented. The read function was not implemented.")
|
Add profile reader plug-in type
|
Add profile reader plug-in type
This type of plug-in will load a file as an instance container of the user profile type.
Contributes to issue CURA-1278.
|
Python
|
agpl-3.0
|
fieldOfView/Cura,Curahelper/Cura,ynotstartups/Wanhao,fieldOfView/Cura,totalretribution/Cura,Curahelper/Cura,hmflash/Cura,hmflash/Cura,senttech/Cura,totalretribution/Cura,senttech/Cura,ynotstartups/Wanhao
|
Add profile reader plug-in type
This type of plug-in will load a file as an instance container of the user profile type.
Contributes to issue CURA-1278.
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginObject import PluginObject
## A type of plug-ins that reads profiles from a file.
#
# The profile is then stored as instance container of the type user profile.
class ProfileReader(PluginObject):
def __init__(self):
super().__init__()
## Read profile data from a file and return a filled profile.
#
# \return \type{Profile} The profile that was obtained from the file.
def read(self, file_name):
raise NotImplementedError("Profile reader plug-in was not correctly implemented. The read function was not implemented.")
|
<commit_before><commit_msg>Add profile reader plug-in type
This type of plug-in will load a file as an instance container of the user profile type.
Contributes to issue CURA-1278.<commit_after>
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginObject import PluginObject
## A type of plug-ins that reads profiles from a file.
#
# The profile is then stored as instance container of the type user profile.
class ProfileReader(PluginObject):
def __init__(self):
super().__init__()
## Read profile data from a file and return a filled profile.
#
# \return \type{Profile} The profile that was obtained from the file.
def read(self, file_name):
raise NotImplementedError("Profile reader plug-in was not correctly implemented. The read function was not implemented.")
|
Add profile reader plug-in type
This type of plug-in will load a file as an instance container of the user profile type.
Contributes to issue CURA-1278.# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginObject import PluginObject
## A type of plug-ins that reads profiles from a file.
#
# The profile is then stored as instance container of the type user profile.
class ProfileReader(PluginObject):
def __init__(self):
super().__init__()
## Read profile data from a file and return a filled profile.
#
# \return \type{Profile} The profile that was obtained from the file.
def read(self, file_name):
raise NotImplementedError("Profile reader plug-in was not correctly implemented. The read function was not implemented.")
|
<commit_before><commit_msg>Add profile reader plug-in type
This type of plug-in will load a file as an instance container of the user profile type.
Contributes to issue CURA-1278.<commit_after># Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginObject import PluginObject
## A type of plug-ins that reads profiles from a file.
#
# The profile is then stored as instance container of the type user profile.
class ProfileReader(PluginObject):
def __init__(self):
super().__init__()
## Read profile data from a file and return a filled profile.
#
# \return \type{Profile} The profile that was obtained from the file.
def read(self, file_name):
raise NotImplementedError("Profile reader plug-in was not correctly implemented. The read function was not implemented.")
|
|
fbca1fe9e8df6cde95c7082bfc465f75ad0380b1
|
main.py
|
main.py
|
import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
class FEMM:
def __init__(self):
pass
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def plotans(self, ans):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j, math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())), cmap=plt.get_cmap("jet"))
plt.contour(grid_x, grid_y, grid)
plt.show()
#/home/drluke/.wine/drive_c/femm42/examples/test.ans
if __name__ == "__main__":
a = FEMM()
ansr = a.readans("/home/drluke/.wine/drive_c/femm42/examples/test.ans")
a.plotans(ansr)
|
Add reading and plotting of .ans files
|
Add reading and plotting of .ans files
|
Python
|
mit
|
DrLuke/FEMM-bode
|
Add reading and plotting of .ans files
|
import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
class FEMM:
def __init__(self):
pass
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def plotans(self, ans):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j, math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())), cmap=plt.get_cmap("jet"))
plt.contour(grid_x, grid_y, grid)
plt.show()
#/home/drluke/.wine/drive_c/femm42/examples/test.ans
if __name__ == "__main__":
a = FEMM()
ansr = a.readans("/home/drluke/.wine/drive_c/femm42/examples/test.ans")
a.plotans(ansr)
|
<commit_before><commit_msg>Add reading and plotting of .ans files<commit_after>
|
import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
class FEMM:
def __init__(self):
pass
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def plotans(self, ans):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j, math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())), cmap=plt.get_cmap("jet"))
plt.contour(grid_x, grid_y, grid)
plt.show()
#/home/drluke/.wine/drive_c/femm42/examples/test.ans
if __name__ == "__main__":
a = FEMM()
ansr = a.readans("/home/drluke/.wine/drive_c/femm42/examples/test.ans")
a.plotans(ansr)
|
Add reading and plotting of .ans filesimport subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
class FEMM:
def __init__(self):
pass
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def plotans(self, ans):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j, math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())), cmap=plt.get_cmap("jet"))
plt.contour(grid_x, grid_y, grid)
plt.show()
#/home/drluke/.wine/drive_c/femm42/examples/test.ans
if __name__ == "__main__":
a = FEMM()
ansr = a.readans("/home/drluke/.wine/drive_c/femm42/examples/test.ans")
a.plotans(ansr)
|
<commit_before><commit_msg>Add reading and plotting of .ans files<commit_after>import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import re
import math
from scipy.interpolate import griddata
class FEMMans:
def __init__(self, points, preamble):
self.points = points
self.preamble = preamble
self.x = np.zeros(points)
self.y = np.zeros(points)
self.B = np.zeros(points, dtype=np.complex64)
class FEMM:
def __init__(self):
pass
def readans(self, path):
with open(path, "r") as f:
firstline = f.readline()
match = re.search("\[Format\]\s*=\s*([\d\.]+)", firstline)
if match:
if match.group(1) == "4.0":
return self.readans40(f)
def readans40(self, f):
preamble = "" # Everything before the [Solution] tag
points = None # Number of datapoints to expect
ans = None
index = 0
dataregex = re.compile(r"^([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+([\d\.e-]+)\s+$")
aftersolution = False
for line in f:
if not aftersolution:
preamble += line
if line == ("[Solution]\n"):
aftersolution = True
elif points is None: # First line after [Solution] gives the number of points in the solution
points = int(line)
ans = FEMMans(points, preamble)
else: # Read data point and add to dataset
match = dataregex.search(line)
if match:
ans.x[index] = float(match.group(1))
ans.y[index] = float(match.group(2))
ans.B[index] = float(match.group(3)) + float(match.group(4)) * 1j
index += 1
return ans
def plotans(self, ans):
grid_x, grid_y = np.mgrid[math.floor(ans.x.min()):math.ceil(ans.x.max()):1000j, math.floor(ans.y.min()):math.ceil(ans.y.max()):1000j]
grid = griddata(np.vstack((ans.x, ans.y)).T, np.absolute(ans.B), (grid_x, grid_y), method='cubic')
plt.imshow(grid.T, extent=(math.floor(ans.x.min()), math.ceil(ans.x.max()), math.floor(ans.y.min()), math.ceil(ans.y.max())), cmap=plt.get_cmap("jet"))
plt.contour(grid_x, grid_y, grid)
plt.show()
#/home/drluke/.wine/drive_c/femm42/examples/test.ans
if __name__ == "__main__":
a = FEMM()
ansr = a.readans("/home/drluke/.wine/drive_c/femm42/examples/test.ans")
a.plotans(ansr)
|
|
228173482c273a49aa4df00a1bd524ec52be086b
|
examples/alert.py
|
examples/alert.py
|
import gtk
from sugar.graphics.alert import TimeoutAlert
window = gtk.Window()
box = gtk.VBox()
window.add(box)
def _timeout_alert_response_cb(alert, response_id):
if response_id is gtk.RESPONSE_OK:
print 'Ok or Timeout'
elif response_id is gtk.RESPONSE_CANCEL:
print 'Cancel'
box.remove(alert)
gtk.main_quit()
alert = TimeoutAlert(10)
alert.props.title='Title of TimeoutAlert'
alert.props.msg = 'Text of timeout alert, either button will quit'
alert.connect('response', _timeout_alert_response_cb)
box.add(alert)
window.show_all()
gtk.main()
|
Save As - add TimeoutAlert example
|
Save As - add TimeoutAlert example
|
Python
|
lgpl-2.1
|
sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit
|
Save As - add TimeoutAlert example
|
import gtk
from sugar.graphics.alert import TimeoutAlert
window = gtk.Window()
box = gtk.VBox()
window.add(box)
def _timeout_alert_response_cb(alert, response_id):
if response_id is gtk.RESPONSE_OK:
print 'Ok or Timeout'
elif response_id is gtk.RESPONSE_CANCEL:
print 'Cancel'
box.remove(alert)
gtk.main_quit()
alert = TimeoutAlert(10)
alert.props.title='Title of TimeoutAlert'
alert.props.msg = 'Text of timeout alert, either button will quit'
alert.connect('response', _timeout_alert_response_cb)
box.add(alert)
window.show_all()
gtk.main()
|
<commit_before><commit_msg>Save As - add TimeoutAlert example<commit_after>
|
import gtk
from sugar.graphics.alert import TimeoutAlert
window = gtk.Window()
box = gtk.VBox()
window.add(box)
def _timeout_alert_response_cb(alert, response_id):
if response_id is gtk.RESPONSE_OK:
print 'Ok or Timeout'
elif response_id is gtk.RESPONSE_CANCEL:
print 'Cancel'
box.remove(alert)
gtk.main_quit()
alert = TimeoutAlert(10)
alert.props.title='Title of TimeoutAlert'
alert.props.msg = 'Text of timeout alert, either button will quit'
alert.connect('response', _timeout_alert_response_cb)
box.add(alert)
window.show_all()
gtk.main()
|
Save As - add TimeoutAlert exampleimport gtk
from sugar.graphics.alert import TimeoutAlert
window = gtk.Window()
box = gtk.VBox()
window.add(box)
def _timeout_alert_response_cb(alert, response_id):
if response_id is gtk.RESPONSE_OK:
print 'Ok or Timeout'
elif response_id is gtk.RESPONSE_CANCEL:
print 'Cancel'
box.remove(alert)
gtk.main_quit()
alert = TimeoutAlert(10)
alert.props.title='Title of TimeoutAlert'
alert.props.msg = 'Text of timeout alert, either button will quit'
alert.connect('response', _timeout_alert_response_cb)
box.add(alert)
window.show_all()
gtk.main()
|
<commit_before><commit_msg>Save As - add TimeoutAlert example<commit_after>import gtk
from sugar.graphics.alert import TimeoutAlert
window = gtk.Window()
box = gtk.VBox()
window.add(box)
def _timeout_alert_response_cb(alert, response_id):
if response_id is gtk.RESPONSE_OK:
print 'Ok or Timeout'
elif response_id is gtk.RESPONSE_CANCEL:
print 'Cancel'
box.remove(alert)
gtk.main_quit()
alert = TimeoutAlert(10)
alert.props.title='Title of TimeoutAlert'
alert.props.msg = 'Text of timeout alert, either button will quit'
alert.connect('response', _timeout_alert_response_cb)
box.add(alert)
window.show_all()
gtk.main()
|
|
9f17c05916ed43d40e4fa21e156c8c0a9aac2b19
|
test/os_win7.py
|
test/os_win7.py
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.win7 = MbedLsToolsWin7()
def test_os_supported(self):
self.assertIn("Windows7", self.win7.os_supported)
if __name__ == '__main__':
unittest.main()
|
Add basic unit tests for Win7 specific platform
|
Add basic unit tests for Win7 specific platform
|
Python
|
apache-2.0
|
jjones646/mbed-ls,mazimkhan/mbed-ls,mtmtech/mbed-ls,jupe/mbed-ls,mazimkhan/mbed-ls,mtmtech/mbed-ls,jupe/mbed-ls,jjones646/mbed-ls
|
Add basic unit tests for Win7 specific platform
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.win7 = MbedLsToolsWin7()
def test_os_supported(self):
self.assertIn("Windows7", self.win7.os_supported)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit tests for Win7 specific platform<commit_after>
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.win7 = MbedLsToolsWin7()
def test_os_supported(self):
self.assertIn("Windows7", self.win7.os_supported)
if __name__ == '__main__':
unittest.main()
|
Add basic unit tests for Win7 specific platform#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.win7 = MbedLsToolsWin7()
def test_os_supported(self):
self.assertIn("Windows7", self.win7.os_supported)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit tests for Win7 specific platform<commit_after>#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.win7 = MbedLsToolsWin7()
def test_os_supported(self):
self.assertIn("Windows7", self.win7.os_supported)
if __name__ == '__main__':
unittest.main()
|
|
6e3d64ca0769732506fe4cb188b53e719645f641
|
ArcToolbox/Scripts/change_datasource_paths.py
|
ArcToolbox/Scripts/change_datasource_paths.py
|
'''
Tool: Change Datasource Paths
Source: change_datasource_paths.py
Author: Matt.Wilkie@gov.yk.ca
License: X/MIT, (c) 2011 Environment Yukon
When data has moved to a different workspace AND feature dataset the regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
Required Arguments:
- layer file to re-path
- new path to workspace containing the feature class
- where to save the layer files
More information:
http://gis.stackexchange.com/questions/6884/change-data-source-path-in-lyr-files-in-arcgis-10
'''
import arcpy, os
# layer file to re-path
fname = arcpy.GetParameterAsText(0)
# new path to workspace containing the feature class
target_wspace = arcpy.GetParameterAsText(1)
# where to save the layer files
savedir = arcpy.GetParameterAsText(2)
lyr = arcpy.mapping.Layer(fname)
fixed_fname = os.path.join(savedir, lyr.longName)
print '\nOld layer properties (%s)' % (fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
try:
lyr.replaceDataSource(target_wspace, 'FILEGDB_WORKSPACE', lyr.datasetName, True)
lyr.saveACopy(fixed_fname)
except:
print arcpy.GetMessages()
print '\nNew layer properties (%s)' % (fixed_fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
del lyr
|
Fix path where findAndReplaceWorkspacePath doesn't
|
Fix path where findAndReplaceWorkspacePath doesn't
When data has moved to a different workspace AND feature dataset the
regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
|
Python
|
mit
|
DougFirErickson/arcplus,maphew/arcplus,maphew/arcplus
|
Fix path where findAndReplaceWorkspacePath doesn't
When data has moved to a different workspace AND feature dataset the
regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
|
'''
Tool: Change Datasource Paths
Source: change_datasource_paths.py
Author: Matt.Wilkie@gov.yk.ca
License: X/MIT, (c) 2011 Environment Yukon
When data has moved to a different workspace AND feature dataset the regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
Required Arguments:
- layer file to re-path
- new path to workspace containing the feature class
- where to save the layer files
More information:
http://gis.stackexchange.com/questions/6884/change-data-source-path-in-lyr-files-in-arcgis-10
'''
import arcpy, os
# layer file to re-path
fname = arcpy.GetParameterAsText(0)
# new path to workspace containing the feature class
target_wspace = arcpy.GetParameterAsText(1)
# where to save the layer files
savedir = arcpy.GetParameterAsText(2)
lyr = arcpy.mapping.Layer(fname)
fixed_fname = os.path.join(savedir, lyr.longName)
print '\nOld layer properties (%s)' % (fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
try:
lyr.replaceDataSource(target_wspace, 'FILEGDB_WORKSPACE', lyr.datasetName, True)
lyr.saveACopy(fixed_fname)
except:
print arcpy.GetMessages()
print '\nNew layer properties (%s)' % (fixed_fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
del lyr
|
<commit_before><commit_msg>Fix path where findAndReplaceWorkspacePath doesn't
When data has moved to a different workspace AND feature dataset the
regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.<commit_after>
|
'''
Tool: Change Datasource Paths
Source: change_datasource_paths.py
Author: Matt.Wilkie@gov.yk.ca
License: X/MIT, (c) 2011 Environment Yukon
When data has moved to a different workspace AND feature dataset the regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
Required Arguments:
- layer file to re-path
- new path to workspace containing the feature class
- where to save the layer files
More information:
http://gis.stackexchange.com/questions/6884/change-data-source-path-in-lyr-files-in-arcgis-10
'''
import arcpy, os
# layer file to re-path
fname = arcpy.GetParameterAsText(0)
# new path to workspace containing the feature class
target_wspace = arcpy.GetParameterAsText(1)
# where to save the layer files
savedir = arcpy.GetParameterAsText(2)
lyr = arcpy.mapping.Layer(fname)
fixed_fname = os.path.join(savedir, lyr.longName)
print '\nOld layer properties (%s)' % (fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
try:
lyr.replaceDataSource(target_wspace, 'FILEGDB_WORKSPACE', lyr.datasetName, True)
lyr.saveACopy(fixed_fname)
except:
print arcpy.GetMessages()
print '\nNew layer properties (%s)' % (fixed_fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
del lyr
|
Fix path where findAndReplaceWorkspacePath doesn't
When data has moved to a different workspace AND feature dataset the
regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.'''
Tool: Change Datasource Paths
Source: change_datasource_paths.py
Author: Matt.Wilkie@gov.yk.ca
License: X/MIT, (c) 2011 Environment Yukon
When data has moved to a different workspace AND feature dataset the regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
Required Arguments:
- layer file to re-path
- new path to workspace containing the feature class
- where to save the layer files
More information:
http://gis.stackexchange.com/questions/6884/change-data-source-path-in-lyr-files-in-arcgis-10
'''
import arcpy, os
# layer file to re-path
fname = arcpy.GetParameterAsText(0)
# new path to workspace containing the feature class
target_wspace = arcpy.GetParameterAsText(1)
# where to save the layer files
savedir = arcpy.GetParameterAsText(2)
lyr = arcpy.mapping.Layer(fname)
fixed_fname = os.path.join(savedir, lyr.longName)
print '\nOld layer properties (%s)' % (fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
try:
lyr.replaceDataSource(target_wspace, 'FILEGDB_WORKSPACE', lyr.datasetName, True)
lyr.saveACopy(fixed_fname)
except:
print arcpy.GetMessages()
print '\nNew layer properties (%s)' % (fixed_fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
del lyr
|
<commit_before><commit_msg>Fix path where findAndReplaceWorkspacePath doesn't
When data has moved to a different workspace AND feature dataset the
regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.<commit_after>'''
Tool: Change Datasource Paths
Source: change_datasource_paths.py
Author: Matt.Wilkie@gov.yk.ca
License: X/MIT, (c) 2011 Environment Yukon
When data has moved to a different workspace AND feature dataset the regular
`findAndReplaceWorkspacePath` does not work. This script rectifies that.
Required Arguments:
- layer file to re-path
- new path to workspace containing the feature class
- where to save the layer files
More information:
http://gis.stackexchange.com/questions/6884/change-data-source-path-in-lyr-files-in-arcgis-10
'''
import arcpy, os
# layer file to re-path
fname = arcpy.GetParameterAsText(0)
# new path to workspace containing the feature class
target_wspace = arcpy.GetParameterAsText(1)
# where to save the layer files
savedir = arcpy.GetParameterAsText(2)
lyr = arcpy.mapping.Layer(fname)
fixed_fname = os.path.join(savedir, lyr.longName)
print '\nOld layer properties (%s)' % (fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
try:
lyr.replaceDataSource(target_wspace, 'FILEGDB_WORKSPACE', lyr.datasetName, True)
lyr.saveACopy(fixed_fname)
except:
print arcpy.GetMessages()
print '\nNew layer properties (%s)' % (fixed_fname)
print 'workspace:\t', lyr.workspacePath
print 'full path:\t', lyr.dataSource
del lyr
|
|
58fdd69ae5c493e3ae26278528fbff09c95c48cd
|
examples/client_auth.py
|
examples/client_auth.py
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/basic-auth/andrew/password')
resp = yield from session.get(
'http://httpbin.org/basic-auth/andrew/password')
print(resp.status)
try:
body = yield from resp.text()
print(body)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(auth=aiohttp.BasicAuth('andrew',
'password'),
loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
Add example for basic auth
|
Add example for basic auth
|
Python
|
apache-2.0
|
arthurdarcet/aiohttp,pathcl/aiohttp,elastic-coders/aiohttp,z2v/aiohttp,morgan-del/aiohttp,elastic-coders/aiohttp,moden-py/aiohttp,mind1master/aiohttp,danielnelson/aiohttp,rutsky/aiohttp,AlexLisovoy/aiohttp,panda73111/aiohttp,pfreixes/aiohttp,mind1master/aiohttp,AlexLisovoy/aiohttp,moden-py/aiohttp,AraHaanOrg/aiohttp,jashandeep-sohi/aiohttp,iksteen/aiohttp,mind1master/aiohttp,Srogozins/aiohttp,jettify/aiohttp,arthurdarcet/aiohttp,panda73111/aiohttp,vedun/aiohttp,KeepSafe/aiohttp,KeepSafe/aiohttp,flying-sheep/aiohttp,juliatem/aiohttp,pfreixes/aiohttp,sterwill/aiohttp,z2v/aiohttp,alex-eri/aiohttp-1,panda73111/aiohttp,vaskalas/aiohttp,Insoleet/aiohttp,KeepSafe/aiohttp,jettify/aiohttp,singulared/aiohttp,esaezgil/aiohttp,vaskalas/aiohttp,rutsky/aiohttp,esaezgil/aiohttp,jashandeep-sohi/aiohttp,vasylbo/aiohttp,jashandeep-sohi/aiohttp,noodle-learns-programming/aiohttp,vaskalas/aiohttp,moden-py/aiohttp,esaezgil/aiohttp,hellysmile/aiohttp,decentfox/aiohttp,alex-eri/aiohttp-1,hellysmile/aiohttp,iksteen/aiohttp,jettify/aiohttp,z2v/aiohttp,decentfox/aiohttp,rutsky/aiohttp,elastic-coders/aiohttp,andyaguiar/aiohttp,singulared/aiohttp,avanov/aiohttp,juliatem/aiohttp,Eyepea/aiohttp,arthurdarcet/aiohttp,playpauseandstop/aiohttp,singulared/aiohttp,alex-eri/aiohttp-1,AraHaanOrg/aiohttp,decentfox/aiohttp,alunduil/aiohttp
|
Add example for basic auth
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/basic-auth/andrew/password')
resp = yield from session.get(
'http://httpbin.org/basic-auth/andrew/password')
print(resp.status)
try:
body = yield from resp.text()
print(body)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(auth=aiohttp.BasicAuth('andrew',
'password'),
loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
<commit_before><commit_msg>Add example for basic auth<commit_after>
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/basic-auth/andrew/password')
resp = yield from session.get(
'http://httpbin.org/basic-auth/andrew/password')
print(resp.status)
try:
body = yield from resp.text()
print(body)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(auth=aiohttp.BasicAuth('andrew',
'password'),
loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
Add example for basic authimport aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/basic-auth/andrew/password')
resp = yield from session.get(
'http://httpbin.org/basic-auth/andrew/password')
print(resp.status)
try:
body = yield from resp.text()
print(body)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(auth=aiohttp.BasicAuth('andrew',
'password'),
loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
<commit_before><commit_msg>Add example for basic auth<commit_after>import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/basic-auth/andrew/password')
resp = yield from session.get(
'http://httpbin.org/basic-auth/andrew/password')
print(resp.status)
try:
body = yield from resp.text()
print(body)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(auth=aiohttp.BasicAuth('andrew',
'password'),
loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
|
2c5fb5a0bcf47e49c9862891730615f6c180462f
|
crmapp/subscribers/forms.py
|
crmapp/subscribers/forms.py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
class SubscriberForm(UserCreationForm):
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Subscriber
class AddressMixin(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('address_one', 'address_two', 'city', 'state',)
widgets = {
'address_one': forms.TextInput(attrs={'class':'form-control'}),
'address_two': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
class SubscriberForm(AddressMixin, UserCreationForm):
first_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
last_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
Create the Subscriber Form - Part II > Update the Form
|
Create the Subscriber Form - Part II > Update the Form
|
Python
|
mit
|
tabdon/crmeasyapp,tabdon/crmeasyapp,deenaariff/Django
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
class SubscriberForm(UserCreationForm):
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
Create the Subscriber Form - Part II > Update the Form
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Subscriber
class AddressMixin(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('address_one', 'address_two', 'city', 'state',)
widgets = {
'address_one': forms.TextInput(attrs={'class':'form-control'}),
'address_two': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
class SubscriberForm(AddressMixin, UserCreationForm):
first_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
last_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
<commit_before>from django import forms
from django.contrib.auth.forms import UserCreationForm
class SubscriberForm(UserCreationForm):
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
<commit_msg>Create the Subscriber Form - Part II > Update the Form<commit_after>
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Subscriber
class AddressMixin(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('address_one', 'address_two', 'city', 'state',)
widgets = {
'address_one': forms.TextInput(attrs={'class':'form-control'}),
'address_two': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
class SubscriberForm(AddressMixin, UserCreationForm):
first_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
last_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
class SubscriberForm(UserCreationForm):
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
Create the Subscriber Form - Part II > Update the Formfrom django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Subscriber
class AddressMixin(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('address_one', 'address_two', 'city', 'state',)
widgets = {
'address_one': forms.TextInput(attrs={'class':'form-control'}),
'address_two': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
class SubscriberForm(AddressMixin, UserCreationForm):
first_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
last_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
<commit_before>from django import forms
from django.contrib.auth.forms import UserCreationForm
class SubscriberForm(UserCreationForm):
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
<commit_msg>Create the Subscriber Form - Part II > Update the Form<commit_after>from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Subscriber
class AddressMixin(forms.ModelForm):
class Meta:
model = Subscriber
fields = ('address_one', 'address_two', 'city', 'state',)
widgets = {
'address_one': forms.TextInput(attrs={'class':'form-control'}),
'address_two': forms.TextInput(attrs={'class':'form-control'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
class SubscriberForm(AddressMixin, UserCreationForm):
first_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
last_name = forms.CharField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
email = forms.EmailField(
required=True, widget=forms.TextInput(attrs={'class':'form-control'})
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control'})
)
password1 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
password2 = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control', 'type':'password'})
)
|
def34fb7055824f57c3b9f93a40d9bc68b72f39d
|
pysingcells/abcstep.py
|
pysingcells/abcstep.py
|
# -*- coding: utf-8 -*-
# std import
from enum import Enum
from abc import ABCMeta, abstractmethod
class StepStat(Enum):
nostat = 1
load = 2
no_ready = 3
ready = 4
succes = 5
failled = 6
class AbcStep(metaclass=ABCMeta):
""" Abstract class for mapper """
def __init__(self):
self.state = StepStat.nostat
@abstractmethod
def read_configuration(self, configuration):
""" Read the configuration object and prepare object to run """
pass
@abstractmethod
def check_configuration(self):
""" Check if file, binary, other ressource is avaible for run mapper """
pass
@abstractmethod
def run(self):
""" Run the mapper effectively """
pass
|
Add abstract class for step management
|
Add abstract class for step management
|
Python
|
mit
|
Fougere87/pysingcells
|
Add abstract class for step management
|
# -*- coding: utf-8 -*-
# std import
from enum import Enum
from abc import ABCMeta, abstractmethod
class StepStat(Enum):
nostat = 1
load = 2
no_ready = 3
ready = 4
succes = 5
failled = 6
class AbcStep(metaclass=ABCMeta):
""" Abstract class for mapper """
def __init__(self):
self.state = StepStat.nostat
@abstractmethod
def read_configuration(self, configuration):
""" Read the configuration object and prepare object to run """
pass
@abstractmethod
def check_configuration(self):
""" Check if file, binary, other ressource is avaible for run mapper """
pass
@abstractmethod
def run(self):
""" Run the mapper effectively """
pass
|
<commit_before><commit_msg>Add abstract class for step management<commit_after>
|
# -*- coding: utf-8 -*-
# std import
from enum import Enum
from abc import ABCMeta, abstractmethod
class StepStat(Enum):
nostat = 1
load = 2
no_ready = 3
ready = 4
succes = 5
failled = 6
class AbcStep(metaclass=ABCMeta):
""" Abstract class for mapper """
def __init__(self):
self.state = StepStat.nostat
@abstractmethod
def read_configuration(self, configuration):
""" Read the configuration object and prepare object to run """
pass
@abstractmethod
def check_configuration(self):
""" Check if file, binary, other ressource is avaible for run mapper """
pass
@abstractmethod
def run(self):
""" Run the mapper effectively """
pass
|
Add abstract class for step management
# -*- coding: utf-8 -*-
# std import
from enum import Enum
from abc import ABCMeta, abstractmethod
class StepStat(Enum):
nostat = 1
load = 2
no_ready = 3
ready = 4
succes = 5
failled = 6
class AbcStep(metaclass=ABCMeta):
""" Abstract class for mapper """
def __init__(self):
self.state = StepStat.nostat
@abstractmethod
def read_configuration(self, configuration):
""" Read the configuration object and prepare object to run """
pass
@abstractmethod
def check_configuration(self):
""" Check if file, binary, other ressource is avaible for run mapper """
pass
@abstractmethod
def run(self):
""" Run the mapper effectively """
pass
|
<commit_before><commit_msg>Add abstract class for step management<commit_after>
# -*- coding: utf-8 -*-
# std import
from enum import Enum
from abc import ABCMeta, abstractmethod
class StepStat(Enum):
nostat = 1
load = 2
no_ready = 3
ready = 4
succes = 5
failled = 6
class AbcStep(metaclass=ABCMeta):
""" Abstract class for mapper """
def __init__(self):
self.state = StepStat.nostat
@abstractmethod
def read_configuration(self, configuration):
""" Read the configuration object and prepare object to run """
pass
@abstractmethod
def check_configuration(self):
""" Check if file, binary, other ressource is avaible for run mapper """
pass
@abstractmethod
def run(self):
""" Run the mapper effectively """
pass
|
|
e91682aee85d33b859f6f5be29ec6e518bd14544
|
pytac/enabled_bpm_x.py
|
pytac/enabled_bpm_x.py
|
import pytac.load_csv
import pytac.epics
def main():
lattice = pytac.load_csv.load('VMX', pytac.epics.EpicsControlSystem())
bpms = lattice.get_elements('BPM')
disabled_devices = 0
for bpm in bpms:
if not bpm._devices['x'].is_enabled():
disabled_devices += 1
if not bpm._devices['y'].is_enabled():
disabled_devices += 1
print 'There are {0} disabled bpm devices.'.format(disabled_devices)
if __name__=='__main__':
main()
|
Print to the screen how many bpm devices are disabled
|
Print to the screen how many bpm devices are disabled
|
Python
|
apache-2.0
|
razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects
|
Print to the screen how many bpm devices are disabled
|
import pytac.load_csv
import pytac.epics
def main():
lattice = pytac.load_csv.load('VMX', pytac.epics.EpicsControlSystem())
bpms = lattice.get_elements('BPM')
disabled_devices = 0
for bpm in bpms:
if not bpm._devices['x'].is_enabled():
disabled_devices += 1
if not bpm._devices['y'].is_enabled():
disabled_devices += 1
print 'There are {0} disabled bpm devices.'.format(disabled_devices)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Print to the screen how many bpm devices are disabled<commit_after>
|
import pytac.load_csv
import pytac.epics
def main():
lattice = pytac.load_csv.load('VMX', pytac.epics.EpicsControlSystem())
bpms = lattice.get_elements('BPM')
disabled_devices = 0
for bpm in bpms:
if not bpm._devices['x'].is_enabled():
disabled_devices += 1
if not bpm._devices['y'].is_enabled():
disabled_devices += 1
print 'There are {0} disabled bpm devices.'.format(disabled_devices)
if __name__=='__main__':
main()
|
Print to the screen how many bpm devices are disabledimport pytac.load_csv
import pytac.epics
def main():
lattice = pytac.load_csv.load('VMX', pytac.epics.EpicsControlSystem())
bpms = lattice.get_elements('BPM')
disabled_devices = 0
for bpm in bpms:
if not bpm._devices['x'].is_enabled():
disabled_devices += 1
if not bpm._devices['y'].is_enabled():
disabled_devices += 1
print 'There are {0} disabled bpm devices.'.format(disabled_devices)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Print to the screen how many bpm devices are disabled<commit_after>import pytac.load_csv
import pytac.epics
def main():
lattice = pytac.load_csv.load('VMX', pytac.epics.EpicsControlSystem())
bpms = lattice.get_elements('BPM')
disabled_devices = 0
for bpm in bpms:
if not bpm._devices['x'].is_enabled():
disabled_devices += 1
if not bpm._devices['y'].is_enabled():
disabled_devices += 1
print 'There are {0} disabled bpm devices.'.format(disabled_devices)
if __name__=='__main__':
main()
|
|
6afb3c7f8efbc8623c8af3c2de763f09ac15536c
|
polygraph/types/tests/test_field.py
|
polygraph/types/tests/test_field.py
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.field import (
field,
validate_field_types,
validate_method_annotations,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import Int, String
from polygraph.types.tests.helper import Person
lazy_string = LazyType("String", "polygraph.types.scalar")
lazy_animal = LazyType("Animal", "polygraph.types.tests.helper")
class FieldTest(TestCase):
def test_validate_field_types(self):
class Test(ObjectType):
@field()
def valid_field(self, arg1: Int) -> String:
pass
@field()
def also_valid_field(self, arg1: lazy_string) -> lazy_animal:
pass
@field()
def bad_argument_type(self, arg1: Person) -> String: # ObjectType is not valid input
pass
@field()
def bad_lazy_argument_type(self, arg1: lazy_animal) -> String:
pass
@field()
def bad_return_type(self, arg1: String) -> str:
pass
self.assertIsNone(validate_field_types(Test.valid_field))
self.assertIsNone(validate_field_types(Test.also_valid_field))
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_lazy_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_return_type)
|
Add test case around field type validation
|
Add test case around field type validation
|
Python
|
mit
|
polygraph-python/polygraph
|
Add test case around field type validation
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.field import (
field,
validate_field_types,
validate_method_annotations,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import Int, String
from polygraph.types.tests.helper import Person
lazy_string = LazyType("String", "polygraph.types.scalar")
lazy_animal = LazyType("Animal", "polygraph.types.tests.helper")
class FieldTest(TestCase):
def test_validate_field_types(self):
class Test(ObjectType):
@field()
def valid_field(self, arg1: Int) -> String:
pass
@field()
def also_valid_field(self, arg1: lazy_string) -> lazy_animal:
pass
@field()
def bad_argument_type(self, arg1: Person) -> String: # ObjectType is not valid input
pass
@field()
def bad_lazy_argument_type(self, arg1: lazy_animal) -> String:
pass
@field()
def bad_return_type(self, arg1: String) -> str:
pass
self.assertIsNone(validate_field_types(Test.valid_field))
self.assertIsNone(validate_field_types(Test.also_valid_field))
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_lazy_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_return_type)
|
<commit_before><commit_msg>Add test case around field type validation<commit_after>
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.field import (
field,
validate_field_types,
validate_method_annotations,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import Int, String
from polygraph.types.tests.helper import Person
lazy_string = LazyType("String", "polygraph.types.scalar")
lazy_animal = LazyType("Animal", "polygraph.types.tests.helper")
class FieldTest(TestCase):
def test_validate_field_types(self):
class Test(ObjectType):
@field()
def valid_field(self, arg1: Int) -> String:
pass
@field()
def also_valid_field(self, arg1: lazy_string) -> lazy_animal:
pass
@field()
def bad_argument_type(self, arg1: Person) -> String: # ObjectType is not valid input
pass
@field()
def bad_lazy_argument_type(self, arg1: lazy_animal) -> String:
pass
@field()
def bad_return_type(self, arg1: String) -> str:
pass
self.assertIsNone(validate_field_types(Test.valid_field))
self.assertIsNone(validate_field_types(Test.also_valid_field))
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_lazy_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_return_type)
|
Add test case around field type validationfrom unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.field import (
field,
validate_field_types,
validate_method_annotations,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import Int, String
from polygraph.types.tests.helper import Person
lazy_string = LazyType("String", "polygraph.types.scalar")
lazy_animal = LazyType("Animal", "polygraph.types.tests.helper")
class FieldTest(TestCase):
def test_validate_field_types(self):
class Test(ObjectType):
@field()
def valid_field(self, arg1: Int) -> String:
pass
@field()
def also_valid_field(self, arg1: lazy_string) -> lazy_animal:
pass
@field()
def bad_argument_type(self, arg1: Person) -> String: # ObjectType is not valid input
pass
@field()
def bad_lazy_argument_type(self, arg1: lazy_animal) -> String:
pass
@field()
def bad_return_type(self, arg1: String) -> str:
pass
self.assertIsNone(validate_field_types(Test.valid_field))
self.assertIsNone(validate_field_types(Test.also_valid_field))
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_lazy_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_return_type)
|
<commit_before><commit_msg>Add test case around field type validation<commit_after>from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.field import (
field,
validate_field_types,
validate_method_annotations,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import Int, String
from polygraph.types.tests.helper import Person
lazy_string = LazyType("String", "polygraph.types.scalar")
lazy_animal = LazyType("Animal", "polygraph.types.tests.helper")
class FieldTest(TestCase):
def test_validate_field_types(self):
class Test(ObjectType):
@field()
def valid_field(self, arg1: Int) -> String:
pass
@field()
def also_valid_field(self, arg1: lazy_string) -> lazy_animal:
pass
@field()
def bad_argument_type(self, arg1: Person) -> String: # ObjectType is not valid input
pass
@field()
def bad_lazy_argument_type(self, arg1: lazy_animal) -> String:
pass
@field()
def bad_return_type(self, arg1: String) -> str:
pass
self.assertIsNone(validate_field_types(Test.valid_field))
self.assertIsNone(validate_field_types(Test.also_valid_field))
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_lazy_argument_type)
with self.assertRaises(PolygraphSchemaError):
validate_field_types(Test.bad_return_type)
|
|
795681f19f80eca114c05d1e2c4e73a48529f27a
|
edit_distance/python/edit_distance.py
|
edit_distance/python/edit_distance.py
|
# Helpful tutorial: https://www.youtube.com/watch?v=We3YDTzNXEk
# Useful link: https://en.wikipedia.org/wiki/Edit_distance
def get_edit_distance(s1, s2):
l1 = len(s1) + 1
l2 = len(s2) + 1
edit_table = {}
for i in range(l1):
edit_table[i, 0] = i
for j in range(l2):
edit_table[0, j] = j
for i in range(1, l1):
for j in range(1, l2):
edit_table[i, j] = min(edit_table[i - 1, j], edit_table[i, j - 1],
edit_table[i - 1, j - 1])
if s1[i - 1] != s2[j - 1]:
edit_table[i, j] += 1
return edit_table[i, j]
if __name__ == '__main__':
# returns 1 as adding 'a' in 2nd postion to
# 'hello' will make it 'haello'
print get_edit_distance('hello', 'haello')
# returns 2 as replacing 'o' in 'redor' and adding 'e' at the end will make
# 'redare'
print get_edit_distance('redor', 'redare')
|
Add edit distance algorithm in python
|
Add edit distance algorithm in python
|
Python
|
apache-2.0
|
churrizo/Algorithms_Example,xiroV/Algorithms_Example,churrizo/Algorithms_Example,churrizo/Algorithms_Example,AtoMc/Algorithms_Example,pranjalrai/Algorithms_Example,Anat-Port/Algorithms_Example,pranjalrai/Algorithms_Example,maazsq/Algorithms_Example,alok760/Algorithms_Example,churrizo/Algorithms_Example,churrizo/Algorithms_Example,Anat-Port/Algorithms_Example,Thuva4/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,pranjalrai/Algorithms_Example,maazsq/Algorithms_Example,maazsq/Algorithms_Example,maazsq/Algorithms_Example,xiroV/Algorithms_Example,xiroV/Algorithms_Example,AtoMc/Algorithms_Example,churrizo/Algorithms_Example,Thuva4/Algorithms_Example,Anat-Port/Algorithms_Example,Astrophilic/Algorithms_Example,xiroV/Algorithms_Example,alok760/Algorithms_Example,pranjalrai/Algorithms_Example,Anat-Port/Algorithms_Example,AtoMc/Algorithms_Example,alok760/Algorithms_Example,xiroV/Algorithms_Example,maazsq/Algorithms_Example,Anat-Port/Algorithms_Example,AtoMc/Algorithms_Example,Astrophilic/Algorithms_Example,AtoMc/Algorithms_Example,xiroV/Algorithms_Example,churrizo/Algorithms_Example,AtoMc/Algorithms_Example,alok760/Algorithms_Example,Astrophilic/Algorithms_Example,pranjalrai/Algorithms_Example,alok760/Algorithms_Example,Astrophilic/Algorithms_Example,AtoMc/Algorithms_Example,pranjalrai/Algorithms_Example,pranjalrai/Algorithms_Example,churrizo/Algorithms_Example,xiroV/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,xiroV/Algorithms_Example,alok760/Algorithms_Example,Anat-Port/Algorithms_Example,Astrophilic/Algorithms_Example,maazsq/Algorithms_Example,pranjalrai/Algorithms_Example,Astrophilic/Algorithms_Example,Anat-Port/Algorithms_Example,xiroV/Algorithms_Example,Thuva4/Algorithms_Example,Thuva4/Algorithms_Example,Thuva4/Algorithms_Example,alok760/Algorithms_Example,xiroV/Algorithms_Example,Thuva4/Algorithms_Example,Astrophilic/Algorithms_Example,alok760/Algorithms_Example,AtoMc/Algorithms_Example,Anat-Port/Algorithms_Example,Astrophilic/Algorithms_Example,AtoMc/Algorithms_Example,Anat-Port/Algorithms_Example,churrizo/Algorithms_Example,maazsq/Algorithms_Example,churrizo/Algorithms_Example,alok760/Algorithms_Example,Thuva4/Algorithms_Example,alok760/Algorithms_Example,Astrophilic/Algorithms_Example,AtoMc/Algorithms_Example,Anat-Port/Algorithms_Example,pranjalrai/Algorithms_Example,Astrophilic/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,AtoMc/Algorithms_Example,AtoMc/Algorithms_Example,pranjalrai/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,alok760/Algorithms_Example,xiroV/Algorithms_Example,churrizo/Algorithms_Example,Thuva4/Algorithms_Example,maazsq/Algorithms_Example,churrizo/Algorithms_Example,alok760/Algorithms_Example,Anat-Port/Algorithms_Example,pranjalrai/Algorithms_Example,Astrophilic/Algorithms_Example
|
Add edit distance algorithm in python
|
# Helpful tutorial: https://www.youtube.com/watch?v=We3YDTzNXEk
# Useful link: https://en.wikipedia.org/wiki/Edit_distance
def get_edit_distance(s1, s2):
l1 = len(s1) + 1
l2 = len(s2) + 1
edit_table = {}
for i in range(l1):
edit_table[i, 0] = i
for j in range(l2):
edit_table[0, j] = j
for i in range(1, l1):
for j in range(1, l2):
edit_table[i, j] = min(edit_table[i - 1, j], edit_table[i, j - 1],
edit_table[i - 1, j - 1])
if s1[i - 1] != s2[j - 1]:
edit_table[i, j] += 1
return edit_table[i, j]
if __name__ == '__main__':
# returns 1 as adding 'a' in 2nd postion to
# 'hello' will make it 'haello'
print get_edit_distance('hello', 'haello')
# returns 2 as replacing 'o' in 'redor' and adding 'e' at the end will make
# 'redare'
print get_edit_distance('redor', 'redare')
|
<commit_before><commit_msg>Add edit distance algorithm in python<commit_after>
|
# Helpful tutorial: https://www.youtube.com/watch?v=We3YDTzNXEk
# Useful link: https://en.wikipedia.org/wiki/Edit_distance
def get_edit_distance(s1, s2):
l1 = len(s1) + 1
l2 = len(s2) + 1
edit_table = {}
for i in range(l1):
edit_table[i, 0] = i
for j in range(l2):
edit_table[0, j] = j
for i in range(1, l1):
for j in range(1, l2):
edit_table[i, j] = min(edit_table[i - 1, j], edit_table[i, j - 1],
edit_table[i - 1, j - 1])
if s1[i - 1] != s2[j - 1]:
edit_table[i, j] += 1
return edit_table[i, j]
if __name__ == '__main__':
# returns 1 as adding 'a' in 2nd postion to
# 'hello' will make it 'haello'
print get_edit_distance('hello', 'haello')
# returns 2 as replacing 'o' in 'redor' and adding 'e' at the end will make
# 'redare'
print get_edit_distance('redor', 'redare')
|
Add edit distance algorithm in python# Helpful tutorial: https://www.youtube.com/watch?v=We3YDTzNXEk
# Useful link: https://en.wikipedia.org/wiki/Edit_distance
def get_edit_distance(s1, s2):
l1 = len(s1) + 1
l2 = len(s2) + 1
edit_table = {}
for i in range(l1):
edit_table[i, 0] = i
for j in range(l2):
edit_table[0, j] = j
for i in range(1, l1):
for j in range(1, l2):
edit_table[i, j] = min(edit_table[i - 1, j], edit_table[i, j - 1],
edit_table[i - 1, j - 1])
if s1[i - 1] != s2[j - 1]:
edit_table[i, j] += 1
return edit_table[i, j]
if __name__ == '__main__':
# returns 1 as adding 'a' in 2nd postion to
# 'hello' will make it 'haello'
print get_edit_distance('hello', 'haello')
# returns 2 as replacing 'o' in 'redor' and adding 'e' at the end will make
# 'redare'
print get_edit_distance('redor', 'redare')
|
<commit_before><commit_msg>Add edit distance algorithm in python<commit_after># Helpful tutorial: https://www.youtube.com/watch?v=We3YDTzNXEk
# Useful link: https://en.wikipedia.org/wiki/Edit_distance
def get_edit_distance(s1, s2):
l1 = len(s1) + 1
l2 = len(s2) + 1
edit_table = {}
for i in range(l1):
edit_table[i, 0] = i
for j in range(l2):
edit_table[0, j] = j
for i in range(1, l1):
for j in range(1, l2):
edit_table[i, j] = min(edit_table[i - 1, j], edit_table[i, j - 1],
edit_table[i - 1, j - 1])
if s1[i - 1] != s2[j - 1]:
edit_table[i, j] += 1
return edit_table[i, j]
if __name__ == '__main__':
# returns 1 as adding 'a' in 2nd postion to
# 'hello' will make it 'haello'
print get_edit_distance('hello', 'haello')
# returns 2 as replacing 'o' in 'redor' and adding 'e' at the end will make
# 'redare'
print get_edit_distance('redor', 'redare')
|
|
e832f29697dc0308a99b60008a538f21f59a44f5
|
documentation/html.py
|
documentation/html.py
|
"""Documentation of the JSON representing HTML."""
# an html element
ELEMENT = {
"type": "element",
"name": "", # name of HTML tag
"attrs": {}, # dictionary of type dice[str,str] for tag attrbutes
"children": [] # list of children (also HTML elements)
}
# Attributes of an HTML element
ATTRIBUTES = {
"type": "attrs",
"...": "..."
}
|
Add docs for structure of JSON for HTML
|
doc: Add docs for structure of JSON for HTML
|
Python
|
apache-2.0
|
Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export
|
doc: Add docs for structure of JSON for HTML
|
"""Documentation of the JSON representing HTML."""
# an html element
ELEMENT = {
"type": "element",
"name": "", # name of HTML tag
"attrs": {}, # dictionary of type dice[str,str] for tag attrbutes
"children": [] # list of children (also HTML elements)
}
# Attributes of an HTML element
ATTRIBUTES = {
"type": "attrs",
"...": "..."
}
|
<commit_before><commit_msg>doc: Add docs for structure of JSON for HTML<commit_after>
|
"""Documentation of the JSON representing HTML."""
# an html element
ELEMENT = {
"type": "element",
"name": "", # name of HTML tag
"attrs": {}, # dictionary of type dice[str,str] for tag attrbutes
"children": [] # list of children (also HTML elements)
}
# Attributes of an HTML element
ATTRIBUTES = {
"type": "attrs",
"...": "..."
}
|
doc: Add docs for structure of JSON for HTML"""Documentation of the JSON representing HTML."""
# an html element
ELEMENT = {
"type": "element",
"name": "", # name of HTML tag
"attrs": {}, # dictionary of type dice[str,str] for tag attrbutes
"children": [] # list of children (also HTML elements)
}
# Attributes of an HTML element
ATTRIBUTES = {
"type": "attrs",
"...": "..."
}
|
<commit_before><commit_msg>doc: Add docs for structure of JSON for HTML<commit_after>"""Documentation of the JSON representing HTML."""
# an html element
ELEMENT = {
"type": "element",
"name": "", # name of HTML tag
"attrs": {}, # dictionary of type dice[str,str] for tag attrbutes
"children": [] # list of children (also HTML elements)
}
# Attributes of an HTML element
ATTRIBUTES = {
"type": "attrs",
"...": "..."
}
|
|
64fc8cbccd753e0d56157a588902ace934a0a600
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
Remove duplicate trove classifier for Python
|
Remove duplicate trove classifier for Python
Alphabetize list to help catch potential future duplication.
|
Python
|
bsd-2-clause
|
jdufresne/django-decorator-include,twidi/django-decorator-include,jdufresne/django-decorator-include,twidi/django-decorator-include
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
Remove duplicate trove classifier for Python
Alphabetize list to help catch potential future duplication.
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
<commit_before>#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
<commit_msg>Remove duplicate trove classifier for Python
Alphabetize list to help catch potential future duplication.<commit_after>
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
Remove duplicate trove classifier for Python
Alphabetize list to help catch potential future duplication.#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
<commit_before>#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
<commit_msg>Remove duplicate trove classifier for Python
Alphabetize list to help catch potential future duplication.<commit_after>#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(
name='django-decorator-include',
version='1.3',
license='BSD',
description='Include Django URL patterns with decorators.',
long_description=read('README.rst'),
author='Jeff Kistler',
author_email='jeff@jeffkistler.com',
url='https://github.com/twidi/django-decorator-include/',
py_modules=['decorator_include'],
install_requires=['Django>=1.8'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
|
fe7d96c9182831613f4f44bc6c4f5903c7e02858
|
setup.py
|
setup.py
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
Add Python 3.5 trove classifier
|
Add Python 3.5 trove classifier
|
Python
|
mit
|
rpkilby/tox-travis,ryanhiebert/tox-travis,tox-dev/tox-travis
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Add Python 3.5 trove classifier
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
<commit_before>from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Add Python 3.5 trove classifier<commit_after>
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Add Python 3.5 trove classifierfrom setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
<commit_before>from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Add Python 3.5 trove classifier<commit_after>from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
fb9df7b3e02d605c3888f1b30cb799f1c95928dc
|
examples/keras/mnist_mlp.py
|
examples/keras/mnist_mlp.py
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
Add some example model - simple network on MNIST.
|
Add some example model - simple network on MNIST.
A basic example from Keras - https://github.com/fchollet/keras/tree/master/examples
|
Python
|
mit
|
bzamecnik/sanctuary
|
Add some example model - simple network on MNIST.
A basic example from Keras - https://github.com/fchollet/keras/tree/master/examples
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
<commit_before><commit_msg>Add some example model - simple network on MNIST.
A basic example from Keras - https://github.com/fchollet/keras/tree/master/examples<commit_after>
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
Add some example model - simple network on MNIST.
A basic example from Keras - https://github.com/fchollet/keras/tree/master/examples'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
<commit_before><commit_msg>Add some example model - simple network on MNIST.
A basic example from Keras - https://github.com/fchollet/keras/tree/master/examples<commit_after>'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
|
0bea26ee40261f5f7da0f6acb74dce8aabfd5856
|
photutils/utils/tests/test_parameters.py
|
photutils/utils/tests/test_parameters.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the parameters module.
"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._parameters import as_pair
def test_as_pair():
assert_equal(as_pair('myparam', 4), (4, 4))
assert_equal(as_pair('myparam', (3, 4)), (3, 4))
assert_equal(as_pair('myparam', 0), (0, 0))
with pytest.raises(ValueError):
as_pair('myparam', 0, lower_bound=(0, 1))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.nan))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.inf))
with pytest.raises(ValueError):
as_pair('myparam', (3, 4), check_odd=True)
with pytest.raises(ValueError):
as_pair('myparam', 4, check_odd=True)
|
Add unit tests for as_pair
|
Add unit tests for as_pair
|
Python
|
bsd-3-clause
|
astropy/photutils,larrybradley/photutils
|
Add unit tests for as_pair
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the parameters module.
"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._parameters import as_pair
def test_as_pair():
assert_equal(as_pair('myparam', 4), (4, 4))
assert_equal(as_pair('myparam', (3, 4)), (3, 4))
assert_equal(as_pair('myparam', 0), (0, 0))
with pytest.raises(ValueError):
as_pair('myparam', 0, lower_bound=(0, 1))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.nan))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.inf))
with pytest.raises(ValueError):
as_pair('myparam', (3, 4), check_odd=True)
with pytest.raises(ValueError):
as_pair('myparam', 4, check_odd=True)
|
<commit_before><commit_msg>Add unit tests for as_pair<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the parameters module.
"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._parameters import as_pair
def test_as_pair():
assert_equal(as_pair('myparam', 4), (4, 4))
assert_equal(as_pair('myparam', (3, 4)), (3, 4))
assert_equal(as_pair('myparam', 0), (0, 0))
with pytest.raises(ValueError):
as_pair('myparam', 0, lower_bound=(0, 1))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.nan))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.inf))
with pytest.raises(ValueError):
as_pair('myparam', (3, 4), check_odd=True)
with pytest.raises(ValueError):
as_pair('myparam', 4, check_odd=True)
|
Add unit tests for as_pair# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the parameters module.
"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._parameters import as_pair
def test_as_pair():
assert_equal(as_pair('myparam', 4), (4, 4))
assert_equal(as_pair('myparam', (3, 4)), (3, 4))
assert_equal(as_pair('myparam', 0), (0, 0))
with pytest.raises(ValueError):
as_pair('myparam', 0, lower_bound=(0, 1))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.nan))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.inf))
with pytest.raises(ValueError):
as_pair('myparam', (3, 4), check_odd=True)
with pytest.raises(ValueError):
as_pair('myparam', 4, check_odd=True)
|
<commit_before><commit_msg>Add unit tests for as_pair<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the parameters module.
"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from .._parameters import as_pair
def test_as_pair():
assert_equal(as_pair('myparam', 4), (4, 4))
assert_equal(as_pair('myparam', (3, 4)), (3, 4))
assert_equal(as_pair('myparam', 0), (0, 0))
with pytest.raises(ValueError):
as_pair('myparam', 0, lower_bound=(0, 1))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.nan))
with pytest.raises(ValueError):
as_pair('myparam', (1, np.inf))
with pytest.raises(ValueError):
as_pair('myparam', (3, 4), check_odd=True)
with pytest.raises(ValueError):
as_pair('myparam', 4, check_odd=True)
|
|
6b81d4f79112347262f0b43965531f300db0ef26
|
jarviscli/tests/test_cryptotracker.py
|
jarviscli/tests/test_cryptotracker.py
|
import unittest
from colorama import Fore
from tests import PluginTest
from mock import patch
from plugins import cryptotracker
class TestCryptotracker(PluginTest):
"""
A test class that contains test cases for the methods of
the cryptotracker plugin.
"""
def setUp(self):
self.module = self.load_plugin(cryptotracker.main)
def test_print_in_color_red(self):
change = -1.54
colored_text = Fore.RED + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
def test_print_in_color_green(self):
change = 1.54
colored_text = Fore.GREEN + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
@patch.object(cryptotracker, 'check_prices')
def test_main_specific_pair(self, mock_check_prices):
s = 'BTC/USD'
base_expected = 'BTC'
target_expected = 'USD'
self.module.run(s)
mock_check_prices.assert_called_with(base_expected, target_expected)
@patch.object(cryptotracker, 'check_prices')
def test_main_default_list(self, mock_check_prices):
s = ''
base_expected = ['BTC', 'ETH', 'LTC', 'XRP', 'ADA']
target_expected = 'USD'
self.module.run(s)
expected_calls = [(i, target_expected) for i in base_expected]
self.assertTrue(mock_check_prices.call_args_list, expected_calls)
@patch('builtins.print')
def test_main_exception_message(self, mock_print):
s = 'wrong argument'
self.module.run(s)
mock_print.assert_called_with(
"{WARNING}Wrong format!{COLOR_RESET} "
"Try {ADVICE}cryptotracker base_currency/target_currency{COLOR_RESET} OR "
"{ADVICE}cryptotracker{COLOR_RESET}".format(
WARNING=Fore.RED, ADVICE=Fore.BLUE, COLOR_RESET=Fore.RESET))
@patch('builtins.print')
def test_check_prices_exception_message(self, mock_print):
target = "wrong_currency"
base = "wrong_currency"
cryptotracker.check_prices(target, base)
mock_print.assert_called_with(
"{WARNING}Wrong pair {}/{}!{COLOR_RESET} "
"\nFull list of symbols is here: "
"https://coinmarketcap.com/all/views/all/"
"\n".format(
base,
target,
WARNING=Fore.RED,
COLOR_RESET=Fore.RESET))
if __name__ == '__main__':
unittest.main()
|
Create test cases for the cryptotracker plugin
|
Create test cases for the cryptotracker plugin
|
Python
|
mit
|
sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis
|
Create test cases for the cryptotracker plugin
|
import unittest
from colorama import Fore
from tests import PluginTest
from mock import patch
from plugins import cryptotracker
class TestCryptotracker(PluginTest):
"""
A test class that contains test cases for the methods of
the cryptotracker plugin.
"""
def setUp(self):
self.module = self.load_plugin(cryptotracker.main)
def test_print_in_color_red(self):
change = -1.54
colored_text = Fore.RED + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
def test_print_in_color_green(self):
change = 1.54
colored_text = Fore.GREEN + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
@patch.object(cryptotracker, 'check_prices')
def test_main_specific_pair(self, mock_check_prices):
s = 'BTC/USD'
base_expected = 'BTC'
target_expected = 'USD'
self.module.run(s)
mock_check_prices.assert_called_with(base_expected, target_expected)
@patch.object(cryptotracker, 'check_prices')
def test_main_default_list(self, mock_check_prices):
s = ''
base_expected = ['BTC', 'ETH', 'LTC', 'XRP', 'ADA']
target_expected = 'USD'
self.module.run(s)
expected_calls = [(i, target_expected) for i in base_expected]
self.assertTrue(mock_check_prices.call_args_list, expected_calls)
@patch('builtins.print')
def test_main_exception_message(self, mock_print):
s = 'wrong argument'
self.module.run(s)
mock_print.assert_called_with(
"{WARNING}Wrong format!{COLOR_RESET} "
"Try {ADVICE}cryptotracker base_currency/target_currency{COLOR_RESET} OR "
"{ADVICE}cryptotracker{COLOR_RESET}".format(
WARNING=Fore.RED, ADVICE=Fore.BLUE, COLOR_RESET=Fore.RESET))
@patch('builtins.print')
def test_check_prices_exception_message(self, mock_print):
target = "wrong_currency"
base = "wrong_currency"
cryptotracker.check_prices(target, base)
mock_print.assert_called_with(
"{WARNING}Wrong pair {}/{}!{COLOR_RESET} "
"\nFull list of symbols is here: "
"https://coinmarketcap.com/all/views/all/"
"\n".format(
base,
target,
WARNING=Fore.RED,
COLOR_RESET=Fore.RESET))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test cases for the cryptotracker plugin<commit_after>
|
import unittest
from colorama import Fore
from tests import PluginTest
from mock import patch
from plugins import cryptotracker
class TestCryptotracker(PluginTest):
"""
A test class that contains test cases for the methods of
the cryptotracker plugin.
"""
def setUp(self):
self.module = self.load_plugin(cryptotracker.main)
def test_print_in_color_red(self):
change = -1.54
colored_text = Fore.RED + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
def test_print_in_color_green(self):
change = 1.54
colored_text = Fore.GREEN + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
@patch.object(cryptotracker, 'check_prices')
def test_main_specific_pair(self, mock_check_prices):
s = 'BTC/USD'
base_expected = 'BTC'
target_expected = 'USD'
self.module.run(s)
mock_check_prices.assert_called_with(base_expected, target_expected)
@patch.object(cryptotracker, 'check_prices')
def test_main_default_list(self, mock_check_prices):
s = ''
base_expected = ['BTC', 'ETH', 'LTC', 'XRP', 'ADA']
target_expected = 'USD'
self.module.run(s)
expected_calls = [(i, target_expected) for i in base_expected]
self.assertTrue(mock_check_prices.call_args_list, expected_calls)
@patch('builtins.print')
def test_main_exception_message(self, mock_print):
s = 'wrong argument'
self.module.run(s)
mock_print.assert_called_with(
"{WARNING}Wrong format!{COLOR_RESET} "
"Try {ADVICE}cryptotracker base_currency/target_currency{COLOR_RESET} OR "
"{ADVICE}cryptotracker{COLOR_RESET}".format(
WARNING=Fore.RED, ADVICE=Fore.BLUE, COLOR_RESET=Fore.RESET))
@patch('builtins.print')
def test_check_prices_exception_message(self, mock_print):
target = "wrong_currency"
base = "wrong_currency"
cryptotracker.check_prices(target, base)
mock_print.assert_called_with(
"{WARNING}Wrong pair {}/{}!{COLOR_RESET} "
"\nFull list of symbols is here: "
"https://coinmarketcap.com/all/views/all/"
"\n".format(
base,
target,
WARNING=Fore.RED,
COLOR_RESET=Fore.RESET))
if __name__ == '__main__':
unittest.main()
|
Create test cases for the cryptotracker pluginimport unittest
from colorama import Fore
from tests import PluginTest
from mock import patch
from plugins import cryptotracker
class TestCryptotracker(PluginTest):
"""
A test class that contains test cases for the methods of
the cryptotracker plugin.
"""
def setUp(self):
self.module = self.load_plugin(cryptotracker.main)
def test_print_in_color_red(self):
change = -1.54
colored_text = Fore.RED + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
def test_print_in_color_green(self):
change = 1.54
colored_text = Fore.GREEN + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
@patch.object(cryptotracker, 'check_prices')
def test_main_specific_pair(self, mock_check_prices):
s = 'BTC/USD'
base_expected = 'BTC'
target_expected = 'USD'
self.module.run(s)
mock_check_prices.assert_called_with(base_expected, target_expected)
@patch.object(cryptotracker, 'check_prices')
def test_main_default_list(self, mock_check_prices):
s = ''
base_expected = ['BTC', 'ETH', 'LTC', 'XRP', 'ADA']
target_expected = 'USD'
self.module.run(s)
expected_calls = [(i, target_expected) for i in base_expected]
self.assertTrue(mock_check_prices.call_args_list, expected_calls)
@patch('builtins.print')
def test_main_exception_message(self, mock_print):
s = 'wrong argument'
self.module.run(s)
mock_print.assert_called_with(
"{WARNING}Wrong format!{COLOR_RESET} "
"Try {ADVICE}cryptotracker base_currency/target_currency{COLOR_RESET} OR "
"{ADVICE}cryptotracker{COLOR_RESET}".format(
WARNING=Fore.RED, ADVICE=Fore.BLUE, COLOR_RESET=Fore.RESET))
@patch('builtins.print')
def test_check_prices_exception_message(self, mock_print):
target = "wrong_currency"
base = "wrong_currency"
cryptotracker.check_prices(target, base)
mock_print.assert_called_with(
"{WARNING}Wrong pair {}/{}!{COLOR_RESET} "
"\nFull list of symbols is here: "
"https://coinmarketcap.com/all/views/all/"
"\n".format(
base,
target,
WARNING=Fore.RED,
COLOR_RESET=Fore.RESET))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test cases for the cryptotracker plugin<commit_after>import unittest
from colorama import Fore
from tests import PluginTest
from mock import patch
from plugins import cryptotracker
class TestCryptotracker(PluginTest):
"""
A test class that contains test cases for the methods of
the cryptotracker plugin.
"""
def setUp(self):
self.module = self.load_plugin(cryptotracker.main)
def test_print_in_color_red(self):
change = -1.54
colored_text = Fore.RED + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
def test_print_in_color_green(self):
change = 1.54
colored_text = Fore.GREEN + str(change) + "%" + Fore.RESET
self.assertEqual(cryptotracker.print_in_color(change),
colored_text)
@patch.object(cryptotracker, 'check_prices')
def test_main_specific_pair(self, mock_check_prices):
s = 'BTC/USD'
base_expected = 'BTC'
target_expected = 'USD'
self.module.run(s)
mock_check_prices.assert_called_with(base_expected, target_expected)
@patch.object(cryptotracker, 'check_prices')
def test_main_default_list(self, mock_check_prices):
s = ''
base_expected = ['BTC', 'ETH', 'LTC', 'XRP', 'ADA']
target_expected = 'USD'
self.module.run(s)
expected_calls = [(i, target_expected) for i in base_expected]
self.assertTrue(mock_check_prices.call_args_list, expected_calls)
@patch('builtins.print')
def test_main_exception_message(self, mock_print):
s = 'wrong argument'
self.module.run(s)
mock_print.assert_called_with(
"{WARNING}Wrong format!{COLOR_RESET} "
"Try {ADVICE}cryptotracker base_currency/target_currency{COLOR_RESET} OR "
"{ADVICE}cryptotracker{COLOR_RESET}".format(
WARNING=Fore.RED, ADVICE=Fore.BLUE, COLOR_RESET=Fore.RESET))
@patch('builtins.print')
def test_check_prices_exception_message(self, mock_print):
target = "wrong_currency"
base = "wrong_currency"
cryptotracker.check_prices(target, base)
mock_print.assert_called_with(
"{WARNING}Wrong pair {}/{}!{COLOR_RESET} "
"\nFull list of symbols is here: "
"https://coinmarketcap.com/all/views/all/"
"\n".format(
base,
target,
WARNING=Fore.RED,
COLOR_RESET=Fore.RESET))
if __name__ == '__main__':
unittest.main()
|
|
28b4fa36a7ae022324fb89d69c14ebf9201ad116
|
lib/GenomeFileUtil/core/exceptions.py
|
lib/GenomeFileUtil/core/exceptions.py
|
import json
class RENotFound(RuntimeError):
"""A resource was not found on the relation engine."""
def __init__(self, coll, key, val, resp_json):
"""
`key` - the key we used to try to find something.
`val` - the val we used to try to look up the above key.
"""
self.key = key
self.val = val
self.resp_json = resp_json
def __repr__(self):
# Eg: "Relation engine API error fetching taxon document by taxonomy ID
# using the value 123. The server response was:
# etc.
return (f"Relation engine API error fetching a {self.coll} document by"
f"{self.key} using the value '{self.val}'. The server response "
f"was: \n{json.dumps(self.resp_json, indent=2)}")
|
Add an RENotFound exception class to be raised when we cannot locate a required resource on RE
|
Add an RENotFound exception class to be raised when we cannot locate a required resource on RE
|
Python
|
mit
|
kbaseapps/GenomeFileUtil,kbaseapps/GenomeFileUtil,kbaseapps/GenomeFileUtil,kbaseapps/GenomeFileUtil
|
Add an RENotFound exception class to be raised when we cannot locate a required resource on RE
|
import json
class RENotFound(RuntimeError):
"""A resource was not found on the relation engine."""
def __init__(self, coll, key, val, resp_json):
"""
`key` - the key we used to try to find something.
`val` - the val we used to try to look up the above key.
"""
self.key = key
self.val = val
self.resp_json = resp_json
def __repr__(self):
# Eg: "Relation engine API error fetching taxon document by taxonomy ID
# using the value 123. The server response was:
# etc.
return (f"Relation engine API error fetching a {self.coll} document by"
f"{self.key} using the value '{self.val}'. The server response "
f"was: \n{json.dumps(self.resp_json, indent=2)}")
|
<commit_before><commit_msg>Add an RENotFound exception class to be raised when we cannot locate a required resource on RE<commit_after>
|
import json
class RENotFound(RuntimeError):
"""A resource was not found on the relation engine."""
def __init__(self, coll, key, val, resp_json):
"""
`key` - the key we used to try to find something.
`val` - the val we used to try to look up the above key.
"""
self.key = key
self.val = val
self.resp_json = resp_json
def __repr__(self):
# Eg: "Relation engine API error fetching taxon document by taxonomy ID
# using the value 123. The server response was:
# etc.
return (f"Relation engine API error fetching a {self.coll} document by"
f"{self.key} using the value '{self.val}'. The server response "
f"was: \n{json.dumps(self.resp_json, indent=2)}")
|
Add an RENotFound exception class to be raised when we cannot locate a required resource on REimport json
class RENotFound(RuntimeError):
"""A resource was not found on the relation engine."""
def __init__(self, coll, key, val, resp_json):
"""
`key` - the key we used to try to find something.
`val` - the val we used to try to look up the above key.
"""
self.key = key
self.val = val
self.resp_json = resp_json
def __repr__(self):
# Eg: "Relation engine API error fetching taxon document by taxonomy ID
# using the value 123. The server response was:
# etc.
return (f"Relation engine API error fetching a {self.coll} document by"
f"{self.key} using the value '{self.val}'. The server response "
f"was: \n{json.dumps(self.resp_json, indent=2)}")
|
<commit_before><commit_msg>Add an RENotFound exception class to be raised when we cannot locate a required resource on RE<commit_after>import json
class RENotFound(RuntimeError):
"""A resource was not found on the relation engine."""
def __init__(self, coll, key, val, resp_json):
"""
`key` - the key we used to try to find something.
`val` - the val we used to try to look up the above key.
"""
self.key = key
self.val = val
self.resp_json = resp_json
def __repr__(self):
# Eg: "Relation engine API error fetching taxon document by taxonomy ID
# using the value 123. The server response was:
# etc.
return (f"Relation engine API error fetching a {self.coll} document by"
f"{self.key} using the value '{self.val}'. The server response "
f"was: \n{json.dumps(self.resp_json, indent=2)}")
|
|
1ed276fceba0a4d8a105bc2b60986d815326dcca
|
tests/test_utils.py
|
tests/test_utils.py
|
import datetime
import unittest
import hiro
from hiro.errors import InvalidTypeError
from hiro.utils import timedelta_to_seconds, time_in_seconds, chained
class TestTimeDeltaToSeconds(unittest.TestCase):
def test_fractional(self):
delta = datetime.timedelta(seconds=1, microseconds=1000)
self.assertAlmostEqual(timedelta_to_seconds(delta), 1.001)
def test_days(self):
delta = datetime.timedelta(days=10)
self.assertEqual(timedelta_to_seconds(delta),
delta.days * 24 * 60 * 60)
class TestTimeInSeconds(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(time_in_seconds(1), 1)
self.assertEqual(time_in_seconds(1.0), 1.0)
def test_date(self):
d = datetime.date(1970, 1, 1)
self.assertEqual(time_in_seconds(d), 0)
def test_datetime(self):
d = datetime.datetime(1970, 1, 1, 0, 0, 0)
self.assertEqual(time_in_seconds(d), 0)
def test_invalid_type(self):
with self.assertRaises(InvalidTypeError):
time_in_seconds("this is a string")
class TestChained(unittest.TestCase):
class Foo(object):
@chained
def return_value(self, value=None):
return value
def setUp(self):
self.obj = self.Foo()
def test_no_return(self):
self.assertIs(self.obj.return_value(), self.obj)
def test_with_return(self):
o = object()
self.assertIs(self.obj.return_value(o), o)
def test_kwargs(self):
o = object()
self.assertIs(self.obj.return_value(value=o), o)
|
Add test suite for hiro.utils
|
Add test suite for hiro.utils
|
Python
|
mit
|
alisaifee/hiro,alisaifee/hiro
|
Add test suite for hiro.utils
|
import datetime
import unittest
import hiro
from hiro.errors import InvalidTypeError
from hiro.utils import timedelta_to_seconds, time_in_seconds, chained
class TestTimeDeltaToSeconds(unittest.TestCase):
def test_fractional(self):
delta = datetime.timedelta(seconds=1, microseconds=1000)
self.assertAlmostEqual(timedelta_to_seconds(delta), 1.001)
def test_days(self):
delta = datetime.timedelta(days=10)
self.assertEqual(timedelta_to_seconds(delta),
delta.days * 24 * 60 * 60)
class TestTimeInSeconds(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(time_in_seconds(1), 1)
self.assertEqual(time_in_seconds(1.0), 1.0)
def test_date(self):
d = datetime.date(1970, 1, 1)
self.assertEqual(time_in_seconds(d), 0)
def test_datetime(self):
d = datetime.datetime(1970, 1, 1, 0, 0, 0)
self.assertEqual(time_in_seconds(d), 0)
def test_invalid_type(self):
with self.assertRaises(InvalidTypeError):
time_in_seconds("this is a string")
class TestChained(unittest.TestCase):
class Foo(object):
@chained
def return_value(self, value=None):
return value
def setUp(self):
self.obj = self.Foo()
def test_no_return(self):
self.assertIs(self.obj.return_value(), self.obj)
def test_with_return(self):
o = object()
self.assertIs(self.obj.return_value(o), o)
def test_kwargs(self):
o = object()
self.assertIs(self.obj.return_value(value=o), o)
|
<commit_before><commit_msg>Add test suite for hiro.utils<commit_after>
|
import datetime
import unittest
import hiro
from hiro.errors import InvalidTypeError
from hiro.utils import timedelta_to_seconds, time_in_seconds, chained
class TestTimeDeltaToSeconds(unittest.TestCase):
def test_fractional(self):
delta = datetime.timedelta(seconds=1, microseconds=1000)
self.assertAlmostEqual(timedelta_to_seconds(delta), 1.001)
def test_days(self):
delta = datetime.timedelta(days=10)
self.assertEqual(timedelta_to_seconds(delta),
delta.days * 24 * 60 * 60)
class TestTimeInSeconds(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(time_in_seconds(1), 1)
self.assertEqual(time_in_seconds(1.0), 1.0)
def test_date(self):
d = datetime.date(1970, 1, 1)
self.assertEqual(time_in_seconds(d), 0)
def test_datetime(self):
d = datetime.datetime(1970, 1, 1, 0, 0, 0)
self.assertEqual(time_in_seconds(d), 0)
def test_invalid_type(self):
with self.assertRaises(InvalidTypeError):
time_in_seconds("this is a string")
class TestChained(unittest.TestCase):
class Foo(object):
@chained
def return_value(self, value=None):
return value
def setUp(self):
self.obj = self.Foo()
def test_no_return(self):
self.assertIs(self.obj.return_value(), self.obj)
def test_with_return(self):
o = object()
self.assertIs(self.obj.return_value(o), o)
def test_kwargs(self):
o = object()
self.assertIs(self.obj.return_value(value=o), o)
|
Add test suite for hiro.utilsimport datetime
import unittest
import hiro
from hiro.errors import InvalidTypeError
from hiro.utils import timedelta_to_seconds, time_in_seconds, chained
class TestTimeDeltaToSeconds(unittest.TestCase):
def test_fractional(self):
delta = datetime.timedelta(seconds=1, microseconds=1000)
self.assertAlmostEqual(timedelta_to_seconds(delta), 1.001)
def test_days(self):
delta = datetime.timedelta(days=10)
self.assertEqual(timedelta_to_seconds(delta),
delta.days * 24 * 60 * 60)
class TestTimeInSeconds(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(time_in_seconds(1), 1)
self.assertEqual(time_in_seconds(1.0), 1.0)
def test_date(self):
d = datetime.date(1970, 1, 1)
self.assertEqual(time_in_seconds(d), 0)
def test_datetime(self):
d = datetime.datetime(1970, 1, 1, 0, 0, 0)
self.assertEqual(time_in_seconds(d), 0)
def test_invalid_type(self):
with self.assertRaises(InvalidTypeError):
time_in_seconds("this is a string")
class TestChained(unittest.TestCase):
class Foo(object):
@chained
def return_value(self, value=None):
return value
def setUp(self):
self.obj = self.Foo()
def test_no_return(self):
self.assertIs(self.obj.return_value(), self.obj)
def test_with_return(self):
o = object()
self.assertIs(self.obj.return_value(o), o)
def test_kwargs(self):
o = object()
self.assertIs(self.obj.return_value(value=o), o)
|
<commit_before><commit_msg>Add test suite for hiro.utils<commit_after>import datetime
import unittest
import hiro
from hiro.errors import InvalidTypeError
from hiro.utils import timedelta_to_seconds, time_in_seconds, chained
class TestTimeDeltaToSeconds(unittest.TestCase):
def test_fractional(self):
delta = datetime.timedelta(seconds=1, microseconds=1000)
self.assertAlmostEqual(timedelta_to_seconds(delta), 1.001)
def test_days(self):
delta = datetime.timedelta(days=10)
self.assertEqual(timedelta_to_seconds(delta),
delta.days * 24 * 60 * 60)
class TestTimeInSeconds(unittest.TestCase):
def test_passthrough(self):
self.assertEqual(time_in_seconds(1), 1)
self.assertEqual(time_in_seconds(1.0), 1.0)
def test_date(self):
d = datetime.date(1970, 1, 1)
self.assertEqual(time_in_seconds(d), 0)
def test_datetime(self):
d = datetime.datetime(1970, 1, 1, 0, 0, 0)
self.assertEqual(time_in_seconds(d), 0)
def test_invalid_type(self):
with self.assertRaises(InvalidTypeError):
time_in_seconds("this is a string")
class TestChained(unittest.TestCase):
class Foo(object):
@chained
def return_value(self, value=None):
return value
def setUp(self):
self.obj = self.Foo()
def test_no_return(self):
self.assertIs(self.obj.return_value(), self.obj)
def test_with_return(self):
o = object()
self.assertIs(self.obj.return_value(o), o)
def test_kwargs(self):
o = object()
self.assertIs(self.obj.return_value(value=o), o)
|
|
be4388302069e59996438f083ab738470a68860c
|
jenkinsapi/utils/krb_requester.py
|
jenkinsapi/utils/krb_requester.py
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
Add kerberos authentication requester using requests_kerberos
|
Add kerberos authentication requester using requests_kerberos
|
Python
|
mit
|
salimfadhley/jenkinsapi,JohnLZeller/jenkinsapi,imsardine/jenkinsapi,jduan/jenkinsapi,aerickson/jenkinsapi,mistermocha/jenkinsapi,imsardine/jenkinsapi,jduan/jenkinsapi,mistermocha/jenkinsapi,JohnLZeller/jenkinsapi,zaro0508/jenkinsapi,zaro0508/jenkinsapi,aerickson/jenkinsapi,mistermocha/jenkinsapi,zaro0508/jenkinsapi,salimfadhley/jenkinsapi,imsardine/jenkinsapi,JohnLZeller/jenkinsapi,domenkozar/jenkinsapi,domenkozar/jenkinsapi
|
Add kerberos authentication requester using requests_kerberos
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
Add kerberos authentication requester using requests_kerberosfrom jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
|
5e99e33df59ebb40c1c49f66b6b88244292d7d1b
|
data/tm_prediction/parallel_tmhmm.py
|
data/tm_prediction/parallel_tmhmm.py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Script to process tmhmm in parallel
"""
from Bio import SeqIO
import argparse
import os
import sys
import multiprocessing
def parse_and_validate(args):
"""
Parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file",
help="input fast file",
type=str,
action="store")
parser.add_argument("-o", "--output_dir",
help="output directory",
type=str,
action="store")
parser.add_argument("-p", "--cpu",
help="number of cores",
type=int,
action="store")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print("Input file {} does not exist".format(args.input_file))
sys.exit(1)
if not os.path.isdir(args.output_dir):
print("Output directory {} does not exist".format(args.output_dir))
sys.exit(1)
return parser.parse_args()
|
Add arg parsing for parallel tmhmm script
|
Add arg parsing for parallel tmhmm script
|
Python
|
apache-2.0
|
fmaguire/volution,fmaguire/volution,fmaguire/volution
|
Add arg parsing for parallel tmhmm script
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Script to process tmhmm in parallel
"""
from Bio import SeqIO
import argparse
import os
import sys
import multiprocessing
def parse_and_validate(args):
"""
Parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file",
help="input fast file",
type=str,
action="store")
parser.add_argument("-o", "--output_dir",
help="output directory",
type=str,
action="store")
parser.add_argument("-p", "--cpu",
help="number of cores",
type=int,
action="store")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print("Input file {} does not exist".format(args.input_file))
sys.exit(1)
if not os.path.isdir(args.output_dir):
print("Output directory {} does not exist".format(args.output_dir))
sys.exit(1)
return parser.parse_args()
|
<commit_before><commit_msg>Add arg parsing for parallel tmhmm script<commit_after>
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Script to process tmhmm in parallel
"""
from Bio import SeqIO
import argparse
import os
import sys
import multiprocessing
def parse_and_validate(args):
"""
Parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file",
help="input fast file",
type=str,
action="store")
parser.add_argument("-o", "--output_dir",
help="output directory",
type=str,
action="store")
parser.add_argument("-p", "--cpu",
help="number of cores",
type=int,
action="store")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print("Input file {} does not exist".format(args.input_file))
sys.exit(1)
if not os.path.isdir(args.output_dir):
print("Output directory {} does not exist".format(args.output_dir))
sys.exit(1)
return parser.parse_args()
|
Add arg parsing for parallel tmhmm script#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Script to process tmhmm in parallel
"""
from Bio import SeqIO
import argparse
import os
import sys
import multiprocessing
def parse_and_validate(args):
"""
Parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file",
help="input fast file",
type=str,
action="store")
parser.add_argument("-o", "--output_dir",
help="output directory",
type=str,
action="store")
parser.add_argument("-p", "--cpu",
help="number of cores",
type=int,
action="store")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print("Input file {} does not exist".format(args.input_file))
sys.exit(1)
if not os.path.isdir(args.output_dir):
print("Output directory {} does not exist".format(args.output_dir))
sys.exit(1)
return parser.parse_args()
|
<commit_before><commit_msg>Add arg parsing for parallel tmhmm script<commit_after>#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
Script to process tmhmm in parallel
"""
from Bio import SeqIO
import argparse
import os
import sys
import multiprocessing
def parse_and_validate(args):
"""
Parse input args
"""
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file",
help="input fast file",
type=str,
action="store")
parser.add_argument("-o", "--output_dir",
help="output directory",
type=str,
action="store")
parser.add_argument("-p", "--cpu",
help="number of cores",
type=int,
action="store")
args = parser.parse_args()
if not os.path.isfile(args.input_file):
print("Input file {} does not exist".format(args.input_file))
sys.exit(1)
if not os.path.isdir(args.output_dir):
print("Output directory {} does not exist".format(args.output_dir))
sys.exit(1)
return parser.parse_args()
|
|
2ebbb9dcf32ec1bee38f59e6be24c17529402042
|
money_rounding.py
|
money_rounding.py
|
def get_price_without_vat(price_to_show, vat_percent):
raise NotImplementedError()
def get_price_without_vat_from_other_valuta(conversion_rate, origin_price,
origin_vat, other_vat):
raise NotImplementedError()
|
Add task for rounding of money
|
Add task for rounding of money
|
Python
|
mit
|
coolshop-com/coolshop-application-assignment
|
Add task for rounding of money
|
def get_price_without_vat(price_to_show, vat_percent):
raise NotImplementedError()
def get_price_without_vat_from_other_valuta(conversion_rate, origin_price,
origin_vat, other_vat):
raise NotImplementedError()
|
<commit_before><commit_msg>Add task for rounding of money<commit_after>
|
def get_price_without_vat(price_to_show, vat_percent):
raise NotImplementedError()
def get_price_without_vat_from_other_valuta(conversion_rate, origin_price,
origin_vat, other_vat):
raise NotImplementedError()
|
Add task for rounding of moneydef get_price_without_vat(price_to_show, vat_percent):
raise NotImplementedError()
def get_price_without_vat_from_other_valuta(conversion_rate, origin_price,
origin_vat, other_vat):
raise NotImplementedError()
|
<commit_before><commit_msg>Add task for rounding of money<commit_after>def get_price_without_vat(price_to_show, vat_percent):
raise NotImplementedError()
def get_price_without_vat_from_other_valuta(conversion_rate, origin_price,
origin_vat, other_vat):
raise NotImplementedError()
|
|
859a13523da2c06dd031451d3d458cc3665ee295
|
dtda_module.py
|
dtda_module.py
|
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.dtda import DTDA
class DTDA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(DTDA_Module, self).__init__(argv)
def init(self):
super(DTDA_Module, self).init()
self.send(KQMLPerformative.fromString(
'(subscribe :content (request &key :content (hello . *)))'))
self.ready()
def receive_request(self, msg, content):
content_list = cast(KQMLList, content)
task = content_list.get(0).toString().lower()
if task == 'is-drug-target':
reply_content = self.respond_is_drug_target(content_list)
elif task == 'find-target-drug':
reply_content = self.respond_find_target_drug(content_list)
elif task == 'find-disease-targets':
reply_content = self.respond_find_disease_targets(content_list)
elif task == 'find-treatment':
reply_content = self.respond_find_treatment(content_list)
else:
self.error_reply(msg, 'unknown request task ' + task)
return
reply_msg = KQMLPerformative('tell')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_is_drug_target(self, content_list):
ddta = DTDA()
# TODO: get parameters from content
is_target = ddta.is_nominal_drug_target('Vemurafenib', 'BRAF')
reply_content = KQMLList()
if is_target:
msg_str = 'TRUE'
else:
msg_str = 'FALSE'
reply_content.add(msg_str)
return reply_content
def respond_find_target_drug(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_disease_targets(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_treatment(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
if __name__ == "__main__":
import sys
DTDA_Module(sys.argv[1:]).run()
|
Implement DTDA as TRIPS module
|
Implement DTDA as TRIPS module
|
Python
|
bsd-2-clause
|
bgyori/bioagents,sorgerlab/bioagents
|
Implement DTDA as TRIPS module
|
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.dtda import DTDA
class DTDA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(DTDA_Module, self).__init__(argv)
def init(self):
super(DTDA_Module, self).init()
self.send(KQMLPerformative.fromString(
'(subscribe :content (request &key :content (hello . *)))'))
self.ready()
def receive_request(self, msg, content):
content_list = cast(KQMLList, content)
task = content_list.get(0).toString().lower()
if task == 'is-drug-target':
reply_content = self.respond_is_drug_target(content_list)
elif task == 'find-target-drug':
reply_content = self.respond_find_target_drug(content_list)
elif task == 'find-disease-targets':
reply_content = self.respond_find_disease_targets(content_list)
elif task == 'find-treatment':
reply_content = self.respond_find_treatment(content_list)
else:
self.error_reply(msg, 'unknown request task ' + task)
return
reply_msg = KQMLPerformative('tell')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_is_drug_target(self, content_list):
ddta = DTDA()
# TODO: get parameters from content
is_target = ddta.is_nominal_drug_target('Vemurafenib', 'BRAF')
reply_content = KQMLList()
if is_target:
msg_str = 'TRUE'
else:
msg_str = 'FALSE'
reply_content.add(msg_str)
return reply_content
def respond_find_target_drug(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_disease_targets(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_treatment(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
if __name__ == "__main__":
import sys
DTDA_Module(sys.argv[1:]).run()
|
<commit_before><commit_msg>Implement DTDA as TRIPS module<commit_after>
|
from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.dtda import DTDA
class DTDA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(DTDA_Module, self).__init__(argv)
def init(self):
super(DTDA_Module, self).init()
self.send(KQMLPerformative.fromString(
'(subscribe :content (request &key :content (hello . *)))'))
self.ready()
def receive_request(self, msg, content):
content_list = cast(KQMLList, content)
task = content_list.get(0).toString().lower()
if task == 'is-drug-target':
reply_content = self.respond_is_drug_target(content_list)
elif task == 'find-target-drug':
reply_content = self.respond_find_target_drug(content_list)
elif task == 'find-disease-targets':
reply_content = self.respond_find_disease_targets(content_list)
elif task == 'find-treatment':
reply_content = self.respond_find_treatment(content_list)
else:
self.error_reply(msg, 'unknown request task ' + task)
return
reply_msg = KQMLPerformative('tell')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_is_drug_target(self, content_list):
ddta = DTDA()
# TODO: get parameters from content
is_target = ddta.is_nominal_drug_target('Vemurafenib', 'BRAF')
reply_content = KQMLList()
if is_target:
msg_str = 'TRUE'
else:
msg_str = 'FALSE'
reply_content.add(msg_str)
return reply_content
def respond_find_target_drug(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_disease_targets(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_treatment(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
if __name__ == "__main__":
import sys
DTDA_Module(sys.argv[1:]).run()
|
Implement DTDA as TRIPS modulefrom jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.dtda import DTDA
class DTDA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(DTDA_Module, self).__init__(argv)
def init(self):
super(DTDA_Module, self).init()
self.send(KQMLPerformative.fromString(
'(subscribe :content (request &key :content (hello . *)))'))
self.ready()
def receive_request(self, msg, content):
content_list = cast(KQMLList, content)
task = content_list.get(0).toString().lower()
if task == 'is-drug-target':
reply_content = self.respond_is_drug_target(content_list)
elif task == 'find-target-drug':
reply_content = self.respond_find_target_drug(content_list)
elif task == 'find-disease-targets':
reply_content = self.respond_find_disease_targets(content_list)
elif task == 'find-treatment':
reply_content = self.respond_find_treatment(content_list)
else:
self.error_reply(msg, 'unknown request task ' + task)
return
reply_msg = KQMLPerformative('tell')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_is_drug_target(self, content_list):
ddta = DTDA()
# TODO: get parameters from content
is_target = ddta.is_nominal_drug_target('Vemurafenib', 'BRAF')
reply_content = KQMLList()
if is_target:
msg_str = 'TRUE'
else:
msg_str = 'FALSE'
reply_content.add(msg_str)
return reply_content
def respond_find_target_drug(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_disease_targets(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_treatment(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
if __name__ == "__main__":
import sys
DTDA_Module(sys.argv[1:]).run()
|
<commit_before><commit_msg>Implement DTDA as TRIPS module<commit_after>from jnius import autoclass, cast
from TripsModule import trips_module
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
from bioagents.dtda import DTDA
class DTDA_Module(trips_module.TripsModule):
def __init__(self, argv):
super(DTDA_Module, self).__init__(argv)
def init(self):
super(DTDA_Module, self).init()
self.send(KQMLPerformative.fromString(
'(subscribe :content (request &key :content (hello . *)))'))
self.ready()
def receive_request(self, msg, content):
content_list = cast(KQMLList, content)
task = content_list.get(0).toString().lower()
if task == 'is-drug-target':
reply_content = self.respond_is_drug_target(content_list)
elif task == 'find-target-drug':
reply_content = self.respond_find_target_drug(content_list)
elif task == 'find-disease-targets':
reply_content = self.respond_find_disease_targets(content_list)
elif task == 'find-treatment':
reply_content = self.respond_find_treatment(content_list)
else:
self.error_reply(msg, 'unknown request task ' + task)
return
reply_msg = KQMLPerformative('tell')
reply_msg.setParameter(':content', cast(KQMLObject, reply_content))
self.reply(msg, reply_msg)
def respond_is_drug_target(self, content_list):
ddta = DTDA()
# TODO: get parameters from content
is_target = ddta.is_nominal_drug_target('Vemurafenib', 'BRAF')
reply_content = KQMLList()
if is_target:
msg_str = 'TRUE'
else:
msg_str = 'FALSE'
reply_content.add(msg_str)
return reply_content
def respond_find_target_drug(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_disease_targets(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
def respond_find_treatment(self, content_list):
# TODO: implement
reply_content = KQMLList()
reply_content.add('')
return reply_content
if __name__ == "__main__":
import sys
DTDA_Module(sys.argv[1:]).run()
|
|
a4b6d4c7880222901ddcd53485100a8ee3a784f2
|
backend/django/apps/accounts/serializers.py
|
backend/django/apps/accounts/serializers.py
|
from rest_framework import serializers
from .models import AbstractAccount
class WholeAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
class Meta:
model = AbstractAccount
fields = ('id', 'first_name', 'last_name', 'email', 'password',
'telephone', 'address', 'last_activity_at',
'last_activity_at',)
def create(self, validated_data):
return AbstractAccount.objects.create(**validated_data)
def update(self, instance, validated_data):
return AbstractAccount.objects.update_user(
instance=instance, validated_data=validated_data)
|
Create the serializer for the AbstractAccount
|
Create the serializer for the AbstractAccount
|
Python
|
mit
|
slavpetroff/sweetshop,slavpetroff/sweetshop
|
Create the serializer for the AbstractAccount
|
from rest_framework import serializers
from .models import AbstractAccount
class WholeAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
class Meta:
model = AbstractAccount
fields = ('id', 'first_name', 'last_name', 'email', 'password',
'telephone', 'address', 'last_activity_at',
'last_activity_at',)
def create(self, validated_data):
return AbstractAccount.objects.create(**validated_data)
def update(self, instance, validated_data):
return AbstractAccount.objects.update_user(
instance=instance, validated_data=validated_data)
|
<commit_before><commit_msg>Create the serializer for the AbstractAccount<commit_after>
|
from rest_framework import serializers
from .models import AbstractAccount
class WholeAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
class Meta:
model = AbstractAccount
fields = ('id', 'first_name', 'last_name', 'email', 'password',
'telephone', 'address', 'last_activity_at',
'last_activity_at',)
def create(self, validated_data):
return AbstractAccount.objects.create(**validated_data)
def update(self, instance, validated_data):
return AbstractAccount.objects.update_user(
instance=instance, validated_data=validated_data)
|
Create the serializer for the AbstractAccountfrom rest_framework import serializers
from .models import AbstractAccount
class WholeAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
class Meta:
model = AbstractAccount
fields = ('id', 'first_name', 'last_name', 'email', 'password',
'telephone', 'address', 'last_activity_at',
'last_activity_at',)
def create(self, validated_data):
return AbstractAccount.objects.create(**validated_data)
def update(self, instance, validated_data):
return AbstractAccount.objects.update_user(
instance=instance, validated_data=validated_data)
|
<commit_before><commit_msg>Create the serializer for the AbstractAccount<commit_after>from rest_framework import serializers
from .models import AbstractAccount
class WholeAccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
class Meta:
model = AbstractAccount
fields = ('id', 'first_name', 'last_name', 'email', 'password',
'telephone', 'address', 'last_activity_at',
'last_activity_at',)
def create(self, validated_data):
return AbstractAccount.objects.create(**validated_data)
def update(self, instance, validated_data):
return AbstractAccount.objects.update_user(
instance=instance, validated_data=validated_data)
|
|
282477965c07124f4cba7b647b2f9ab1b54d9903
|
scripts/occupy_seat_group.py
|
scripts/occupy_seat_group.py
|
#!/usr/bin/env python
"""Occupy a seat group with a ticket bundle.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.seating import seat_group_service
from byceps.services.ticketing import ticket_bundle_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
def get_seat_group(ctx, param, seat_group_id):
seat_group = seat_group_service.find_seat_group(seat_group_id)
if not seat_group:
raise click.BadParameter(
'Unknown seat group ID "{}".'.format(seat_group_id))
return seat_group
def get_ticket_bundle(ctx, param, ticket_bundle_id):
ticket_bundle = ticket_bundle_service.find_bundle(ticket_bundle_id)
if not ticket_bundle:
raise click.BadParameter(
'Unknown ticket bundle ID "{}".'.format(ticket_bundle_id))
return ticket_bundle
@click.command()
@click.argument('seat_group', callback=get_seat_group)
@click.argument('ticket_bundle', callback=get_ticket_bundle)
def execute(seat_group, ticket_bundle):
seat_group_service.occupy_seat_group(seat_group, ticket_bundle)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to occupy a seat group with a ticket bundle
|
Add script to occupy a seat group with a ticket bundle
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,m-ober/byceps
|
Add script to occupy a seat group with a ticket bundle
|
#!/usr/bin/env python
"""Occupy a seat group with a ticket bundle.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.seating import seat_group_service
from byceps.services.ticketing import ticket_bundle_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
def get_seat_group(ctx, param, seat_group_id):
seat_group = seat_group_service.find_seat_group(seat_group_id)
if not seat_group:
raise click.BadParameter(
'Unknown seat group ID "{}".'.format(seat_group_id))
return seat_group
def get_ticket_bundle(ctx, param, ticket_bundle_id):
ticket_bundle = ticket_bundle_service.find_bundle(ticket_bundle_id)
if not ticket_bundle:
raise click.BadParameter(
'Unknown ticket bundle ID "{}".'.format(ticket_bundle_id))
return ticket_bundle
@click.command()
@click.argument('seat_group', callback=get_seat_group)
@click.argument('ticket_bundle', callback=get_ticket_bundle)
def execute(seat_group, ticket_bundle):
seat_group_service.occupy_seat_group(seat_group, ticket_bundle)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to occupy a seat group with a ticket bundle<commit_after>
|
#!/usr/bin/env python
"""Occupy a seat group with a ticket bundle.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.seating import seat_group_service
from byceps.services.ticketing import ticket_bundle_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
def get_seat_group(ctx, param, seat_group_id):
seat_group = seat_group_service.find_seat_group(seat_group_id)
if not seat_group:
raise click.BadParameter(
'Unknown seat group ID "{}".'.format(seat_group_id))
return seat_group
def get_ticket_bundle(ctx, param, ticket_bundle_id):
ticket_bundle = ticket_bundle_service.find_bundle(ticket_bundle_id)
if not ticket_bundle:
raise click.BadParameter(
'Unknown ticket bundle ID "{}".'.format(ticket_bundle_id))
return ticket_bundle
@click.command()
@click.argument('seat_group', callback=get_seat_group)
@click.argument('ticket_bundle', callback=get_ticket_bundle)
def execute(seat_group, ticket_bundle):
seat_group_service.occupy_seat_group(seat_group, ticket_bundle)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to occupy a seat group with a ticket bundle#!/usr/bin/env python
"""Occupy a seat group with a ticket bundle.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.seating import seat_group_service
from byceps.services.ticketing import ticket_bundle_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
def get_seat_group(ctx, param, seat_group_id):
seat_group = seat_group_service.find_seat_group(seat_group_id)
if not seat_group:
raise click.BadParameter(
'Unknown seat group ID "{}".'.format(seat_group_id))
return seat_group
def get_ticket_bundle(ctx, param, ticket_bundle_id):
ticket_bundle = ticket_bundle_service.find_bundle(ticket_bundle_id)
if not ticket_bundle:
raise click.BadParameter(
'Unknown ticket bundle ID "{}".'.format(ticket_bundle_id))
return ticket_bundle
@click.command()
@click.argument('seat_group', callback=get_seat_group)
@click.argument('ticket_bundle', callback=get_ticket_bundle)
def execute(seat_group, ticket_bundle):
seat_group_service.occupy_seat_group(seat_group, ticket_bundle)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to occupy a seat group with a ticket bundle<commit_after>#!/usr/bin/env python
"""Occupy a seat group with a ticket bundle.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.seating import seat_group_service
from byceps.services.ticketing import ticket_bundle_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
def get_seat_group(ctx, param, seat_group_id):
seat_group = seat_group_service.find_seat_group(seat_group_id)
if not seat_group:
raise click.BadParameter(
'Unknown seat group ID "{}".'.format(seat_group_id))
return seat_group
def get_ticket_bundle(ctx, param, ticket_bundle_id):
ticket_bundle = ticket_bundle_service.find_bundle(ticket_bundle_id)
if not ticket_bundle:
raise click.BadParameter(
'Unknown ticket bundle ID "{}".'.format(ticket_bundle_id))
return ticket_bundle
@click.command()
@click.argument('seat_group', callback=get_seat_group)
@click.argument('ticket_bundle', callback=get_ticket_bundle)
def execute(seat_group, ticket_bundle):
seat_group_service.occupy_seat_group(seat_group, ticket_bundle)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
1cd21ad4538cd71e93fec3a7efac29646503bde7
|
white_balance.py
|
white_balance.py
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if len(roi) != 4:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
else:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if roi is not None and len(roi) == 4:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
else:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
Fix Bug for Nonetype ROI
|
Fix Bug for Nonetype ROI
|
Python
|
mit
|
danforthcenter/plantcv-seeds
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if len(roi) != 4:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
else:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
Fix Bug for Nonetype ROI
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if roi is not None and len(roi) == 4:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
else:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
<commit_before>import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if len(roi) != 4:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
else:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
<commit_msg>Fix Bug for Nonetype ROI<commit_after>
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if roi is not None and len(roi) == 4:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
else:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if len(roi) != 4:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
else:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
Fix Bug for Nonetype ROIimport cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if roi is not None and len(roi) == 4:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
else:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
<commit_before>import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if len(roi) != 4:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
else:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
<commit_msg>Fix Bug for Nonetype ROI<commit_after>import cv2
import numpy as np
import plantcv as pcv
def white_balance(img, roi=None):
"""Corrects the exposure of an image based on its histogram.
Inputs:
img - A grayscale image on which to perform the correction
roi - A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
If a list of 4 points is not given, whole image will be used.
Returns:
img - Image after exposure correction
"""
# Finds histogram of roi if valid roi is given. Otherwise, finds histogram of entire image
if roi is not None and len(roi) == 4:
hist = cv2.calcHist(tuple(img[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]]), [0], None, [256], [0, 256])
else:
hist = cv2.calcHist(tuple(img), [0], None, [256], [0, 256]) # Creates histogram of original image
# Calculates index of maximum of histogram and finds alpha based on the peak
hmax = np.argmax(hist)
alpha = 255 / float(hmax)
# Converts values greater than hmax to 255 and scales all others by alpha
img = np.asarray(np.where(img <= hmax, np.multiply(alpha, img), 255), np.uint8)
return img
|
cc6aad9324373d4dd7860ec2e5e807ae4ec03028
|
test/option--.py
|
test/option--.py
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.
|
Add a test for -- terminating option processing.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@48 fdb21ef1-2011-0410-befe-b5e4ea1792b1
|
Python
|
mit
|
azverkan/scons,datalogics-robb/scons,azverkan/scons,azverkan/scons,datalogics-robb/scons,datalogics/scons,datalogics/scons,datalogics-robb/scons,datalogics-robb/scons,datalogics/scons,azverkan/scons,azverkan/scons,datalogics/scons
|
Add a test for -- terminating option processing.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@48 fdb21ef1-2011-0410-befe-b5e4ea1792b1
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@48 fdb21ef1-2011-0410-befe-b5e4ea1792b1<commit_after>
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@48 fdb21ef1-2011-0410-befe-b5e4ea1792b1#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@48 fdb21ef1-2011-0410-befe-b5e4ea1792b1<commit_after>#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
|
1d0faecd1f8897e4b9e68cb62cc49125250ff59f
|
k8s_snapshots/rule.py
|
k8s_snapshots/rule.py
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
volume_name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
Fix accidentally commited attribute name change
|
Fix accidentally commited attribute name change
|
Python
|
bsd-2-clause
|
miracle2k/k8s-snapshots,EQTPartners/k8s-snapshots
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
volume_name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
Fix accidentally commited attribute name change
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
<commit_before>from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
volume_name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
<commit_msg>Fix accidentally commited attribute name change<commit_after>
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
volume_name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
Fix accidentally commited attribute name changefrom typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
<commit_before>from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
volume_name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
<commit_msg>Fix accidentally commited attribute name change<commit_after>from typing import Dict, Any
import attr
@attr.s(slots=True)
class Rule:
"""
A rule describes how and when to make backups.
"""
name = attr.ib()
namespace = attr.ib()
deltas = attr.ib()
gce_disk = attr.ib()
gce_disk_zone = attr.ib()
claim_name = attr.ib()
@property
def pretty_name(self):
return self.claim_name or self.name
def to_dict(self) -> Dict[str, Any]:
""" Helper, returns attr.asdict(self) """
return attr.asdict(self)
def __str__ (self):
return self.name
|
6444674ced35019c3139dfeb7af69dfe985b3fe1
|
choosealicense/test/test_context.py
|
choosealicense/test/test_context.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license context` function
"""
from click.testing import CliRunner
from choosealicense.main import (context, LICENSE_WITH_CONTEXT,
get_default_context)
def test_show_license_context():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses:
result = runner.invoke(context, license)
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
assert output == ("Just use it, there's no context for "
"the license.\n")
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'])
if license == 'isc':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\temail: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['email'])
if license == 'bsd-3-clause':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\tproject: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['project'])
|
Add test for `license context` function
|
Add test for `license context` function
|
Python
|
mit
|
lord63/choosealicense-cli
|
Add test for `license context` function
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license context` function
"""
from click.testing import CliRunner
from choosealicense.main import (context, LICENSE_WITH_CONTEXT,
get_default_context)
def test_show_license_context():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses:
result = runner.invoke(context, license)
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
assert output == ("Just use it, there's no context for "
"the license.\n")
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'])
if license == 'isc':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\temail: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['email'])
if license == 'bsd-3-clause':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\tproject: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['project'])
|
<commit_before><commit_msg>Add test for `license context` function<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license context` function
"""
from click.testing import CliRunner
from choosealicense.main import (context, LICENSE_WITH_CONTEXT,
get_default_context)
def test_show_license_context():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses:
result = runner.invoke(context, license)
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
assert output == ("Just use it, there's no context for "
"the license.\n")
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'])
if license == 'isc':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\temail: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['email'])
if license == 'bsd-3-clause':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\tproject: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['project'])
|
Add test for `license context` function#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license context` function
"""
from click.testing import CliRunner
from choosealicense.main import (context, LICENSE_WITH_CONTEXT,
get_default_context)
def test_show_license_context():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses:
result = runner.invoke(context, license)
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
assert output == ("Just use it, there's no context for "
"the license.\n")
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'])
if license == 'isc':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\temail: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['email'])
if license == 'bsd-3-clause':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\tproject: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['project'])
|
<commit_before><commit_msg>Add test for `license context` function<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license context` function
"""
from click.testing import CliRunner
from choosealicense.main import (context, LICENSE_WITH_CONTEXT,
get_default_context)
def test_show_license_context():
all_the_licenses = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, '
'isc, lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
runner = CliRunner()
for license in all_the_licenses:
result = runner.invoke(context, license)
output, exit_code = result.output, result.exit_code
assert exit_code == 0
if license not in LICENSE_WITH_CONTEXT:
assert output == ("Just use it, there's no context for "
"the license.\n")
else:
defaults = get_default_context()
if license in ['mit', 'artistic-2.0', 'bsd-2-clause']:
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'])
if license == 'isc':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\temail: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['email'])
if license == 'bsd-3-clause':
assert output == (
"The template has following defaults:\n"
"\tyear: {0}\n"
"\tfullname: {1}\n"
"\tproject: {2}\n"
"You can overwrite them at your ease.\n"
).format(defaults['year'], defaults['fullname'],
defaults['project'])
|
|
a90b6fb3b03fb177b07814873f1fcfe01c7cab6a
|
labonneboite/alembic/versions/c519ecaf1fa6_deduplicate_users.py
|
labonneboite/alembic/versions/c519ecaf1fa6_deduplicate_users.py
|
"""
deduplicate users
Revision ID: c519ecaf1fa6
Revises: a6ff4a27b063
Create Date: 2018-09-26 16:45:13.810694
"""
# from alembic import op
import sqlalchemy as sa
# Revision identifiers, used by Alembic.
revision = 'c519ecaf1fa6'
down_revision = 'a6ff4a27b063'
branch_labels = None
depends_on = None
def upgrade():
try:
deduplicate_users()
except KeyboardInterrupt:
pass
def downgrade():
# This migration can be run as many times as we need: just rollback
# (alembic downgrade -1) and re-apply (alembic upgrade HEAD).
pass
def deduplicate_users():
# We import the app to initialize the social models
import labonneboite.web.app # pylint: disable=unused-import,unused-variable
from labonneboite.common.database import db_session
from labonneboite.common.models import auth
from labonneboite.common.models.user_favorite_offices import UserFavoriteOffice
# Iterate on duplicated users
for user in auth.User.query.group_by('external_id').having(sa.func.count(auth.User.external_id) > 1):
duplicate_user_ids = []
favorite_count = 0
# Create favorites, if necessary
for duplicate_user in auth.User.query.filter(auth.User.external_id == user.external_id, auth.User.id != user.id):
duplicate_user_ids.append(duplicate_user.id)
for favorite in duplicate_user.favorite_offices:
_, created = UserFavoriteOffice.get_or_create(user_id=user.id, office_siret=favorite.office_siret)
if created:
favorite_count += 1
print("Removing {} duplicates for user #{} ({} favorite added to original user)".format(len(duplicate_user_ids), user.id, favorite_count))
# Remove duplicate social user
db_session.query(auth.UserSocialAuth).filter(auth.UserSocialAuth.user_id.in_(duplicate_user_ids)).delete(synchronize_session=False)
# Remove duplicate user
auth.User.query.filter(auth.User.id.in_(duplicate_user_ids)).delete(synchronize_session=False)
|
Add data migration to deduplicate users
|
Add data migration to deduplicate users
|
Python
|
agpl-3.0
|
StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite
|
Add data migration to deduplicate users
|
"""
deduplicate users
Revision ID: c519ecaf1fa6
Revises: a6ff4a27b063
Create Date: 2018-09-26 16:45:13.810694
"""
# from alembic import op
import sqlalchemy as sa
# Revision identifiers, used by Alembic.
revision = 'c519ecaf1fa6'
down_revision = 'a6ff4a27b063'
branch_labels = None
depends_on = None
def upgrade():
try:
deduplicate_users()
except KeyboardInterrupt:
pass
def downgrade():
# This migration can be run as many times as we need: just rollback
# (alembic downgrade -1) and re-apply (alembic upgrade HEAD).
pass
def deduplicate_users():
# We import the app to initialize the social models
import labonneboite.web.app # pylint: disable=unused-import,unused-variable
from labonneboite.common.database import db_session
from labonneboite.common.models import auth
from labonneboite.common.models.user_favorite_offices import UserFavoriteOffice
# Iterate on duplicated users
for user in auth.User.query.group_by('external_id').having(sa.func.count(auth.User.external_id) > 1):
duplicate_user_ids = []
favorite_count = 0
# Create favorites, if necessary
for duplicate_user in auth.User.query.filter(auth.User.external_id == user.external_id, auth.User.id != user.id):
duplicate_user_ids.append(duplicate_user.id)
for favorite in duplicate_user.favorite_offices:
_, created = UserFavoriteOffice.get_or_create(user_id=user.id, office_siret=favorite.office_siret)
if created:
favorite_count += 1
print("Removing {} duplicates for user #{} ({} favorite added to original user)".format(len(duplicate_user_ids), user.id, favorite_count))
# Remove duplicate social user
db_session.query(auth.UserSocialAuth).filter(auth.UserSocialAuth.user_id.in_(duplicate_user_ids)).delete(synchronize_session=False)
# Remove duplicate user
auth.User.query.filter(auth.User.id.in_(duplicate_user_ids)).delete(synchronize_session=False)
|
<commit_before><commit_msg>Add data migration to deduplicate users<commit_after>
|
"""
deduplicate users
Revision ID: c519ecaf1fa6
Revises: a6ff4a27b063
Create Date: 2018-09-26 16:45:13.810694
"""
# from alembic import op
import sqlalchemy as sa
# Revision identifiers, used by Alembic.
revision = 'c519ecaf1fa6'
down_revision = 'a6ff4a27b063'
branch_labels = None
depends_on = None
def upgrade():
try:
deduplicate_users()
except KeyboardInterrupt:
pass
def downgrade():
# This migration can be run as many times as we need: just rollback
# (alembic downgrade -1) and re-apply (alembic upgrade HEAD).
pass
def deduplicate_users():
# We import the app to initialize the social models
import labonneboite.web.app # pylint: disable=unused-import,unused-variable
from labonneboite.common.database import db_session
from labonneboite.common.models import auth
from labonneboite.common.models.user_favorite_offices import UserFavoriteOffice
# Iterate on duplicated users
for user in auth.User.query.group_by('external_id').having(sa.func.count(auth.User.external_id) > 1):
duplicate_user_ids = []
favorite_count = 0
# Create favorites, if necessary
for duplicate_user in auth.User.query.filter(auth.User.external_id == user.external_id, auth.User.id != user.id):
duplicate_user_ids.append(duplicate_user.id)
for favorite in duplicate_user.favorite_offices:
_, created = UserFavoriteOffice.get_or_create(user_id=user.id, office_siret=favorite.office_siret)
if created:
favorite_count += 1
print("Removing {} duplicates for user #{} ({} favorite added to original user)".format(len(duplicate_user_ids), user.id, favorite_count))
# Remove duplicate social user
db_session.query(auth.UserSocialAuth).filter(auth.UserSocialAuth.user_id.in_(duplicate_user_ids)).delete(synchronize_session=False)
# Remove duplicate user
auth.User.query.filter(auth.User.id.in_(duplicate_user_ids)).delete(synchronize_session=False)
|
Add data migration to deduplicate users"""
deduplicate users
Revision ID: c519ecaf1fa6
Revises: a6ff4a27b063
Create Date: 2018-09-26 16:45:13.810694
"""
# from alembic import op
import sqlalchemy as sa
# Revision identifiers, used by Alembic.
revision = 'c519ecaf1fa6'
down_revision = 'a6ff4a27b063'
branch_labels = None
depends_on = None
def upgrade():
try:
deduplicate_users()
except KeyboardInterrupt:
pass
def downgrade():
# This migration can be run as many times as we need: just rollback
# (alembic downgrade -1) and re-apply (alembic upgrade HEAD).
pass
def deduplicate_users():
# We import the app to initialize the social models
import labonneboite.web.app # pylint: disable=unused-import,unused-variable
from labonneboite.common.database import db_session
from labonneboite.common.models import auth
from labonneboite.common.models.user_favorite_offices import UserFavoriteOffice
# Iterate on duplicated users
for user in auth.User.query.group_by('external_id').having(sa.func.count(auth.User.external_id) > 1):
duplicate_user_ids = []
favorite_count = 0
# Create favorites, if necessary
for duplicate_user in auth.User.query.filter(auth.User.external_id == user.external_id, auth.User.id != user.id):
duplicate_user_ids.append(duplicate_user.id)
for favorite in duplicate_user.favorite_offices:
_, created = UserFavoriteOffice.get_or_create(user_id=user.id, office_siret=favorite.office_siret)
if created:
favorite_count += 1
print("Removing {} duplicates for user #{} ({} favorite added to original user)".format(len(duplicate_user_ids), user.id, favorite_count))
# Remove duplicate social user
db_session.query(auth.UserSocialAuth).filter(auth.UserSocialAuth.user_id.in_(duplicate_user_ids)).delete(synchronize_session=False)
# Remove duplicate user
auth.User.query.filter(auth.User.id.in_(duplicate_user_ids)).delete(synchronize_session=False)
|
<commit_before><commit_msg>Add data migration to deduplicate users<commit_after>"""
deduplicate users
Revision ID: c519ecaf1fa6
Revises: a6ff4a27b063
Create Date: 2018-09-26 16:45:13.810694
"""
# from alembic import op
import sqlalchemy as sa
# Revision identifiers, used by Alembic.
revision = 'c519ecaf1fa6'
down_revision = 'a6ff4a27b063'
branch_labels = None
depends_on = None
def upgrade():
try:
deduplicate_users()
except KeyboardInterrupt:
pass
def downgrade():
# This migration can be run as many times as we need: just rollback
# (alembic downgrade -1) and re-apply (alembic upgrade HEAD).
pass
def deduplicate_users():
# We import the app to initialize the social models
import labonneboite.web.app # pylint: disable=unused-import,unused-variable
from labonneboite.common.database import db_session
from labonneboite.common.models import auth
from labonneboite.common.models.user_favorite_offices import UserFavoriteOffice
# Iterate on duplicated users
for user in auth.User.query.group_by('external_id').having(sa.func.count(auth.User.external_id) > 1):
duplicate_user_ids = []
favorite_count = 0
# Create favorites, if necessary
for duplicate_user in auth.User.query.filter(auth.User.external_id == user.external_id, auth.User.id != user.id):
duplicate_user_ids.append(duplicate_user.id)
for favorite in duplicate_user.favorite_offices:
_, created = UserFavoriteOffice.get_or_create(user_id=user.id, office_siret=favorite.office_siret)
if created:
favorite_count += 1
print("Removing {} duplicates for user #{} ({} favorite added to original user)".format(len(duplicate_user_ids), user.id, favorite_count))
# Remove duplicate social user
db_session.query(auth.UserSocialAuth).filter(auth.UserSocialAuth.user_id.in_(duplicate_user_ids)).delete(synchronize_session=False)
# Remove duplicate user
auth.User.query.filter(auth.User.id.in_(duplicate_user_ids)).delete(synchronize_session=False)
|
|
8b80caed94f4bed7e970f6b7c730b71de2133da8
|
django_adaptive/cached.py
|
django_adaptive/cached.py
|
"""
Provide support for Cached loader.
Generate key for cached templates based on template names
and device type (desktop or mobile or tablet)
"""
from django.template.loaders.cached import Loader as CachedLoader
from django_adaptive.loader_utils import get_template_suffix
class AdaptiveTemplateCache(dict):
def get_device(self):
return get_template_suffix()
def get_key(self, key):
return key + self.get_device()
def __getitem__(self, key):
return super(AdaptiveTemplateCache, self).__getitem__(
self.get_key(key))
def __setitem__(self, key, value):
return super(AdaptiveTemplateCache, self).__setitem__(
self.get_key(key), value)
class Loader(CachedLoader):
def __init__(self, loaders):
super(Loader, self).__init__(loaders)
self.template_cache = AdaptiveTemplateCache()
|
Support for Cached template loader with django adaptive
|
Support for Cached template loader with django adaptive
|
Python
|
bsd-3-clause
|
RevSquare/django-adaptive
|
Support for Cached template loader with django adaptive
|
"""
Provide support for Cached loader.
Generate key for cached templates based on template names
and device type (desktop or mobile or tablet)
"""
from django.template.loaders.cached import Loader as CachedLoader
from django_adaptive.loader_utils import get_template_suffix
class AdaptiveTemplateCache(dict):
def get_device(self):
return get_template_suffix()
def get_key(self, key):
return key + self.get_device()
def __getitem__(self, key):
return super(AdaptiveTemplateCache, self).__getitem__(
self.get_key(key))
def __setitem__(self, key, value):
return super(AdaptiveTemplateCache, self).__setitem__(
self.get_key(key), value)
class Loader(CachedLoader):
def __init__(self, loaders):
super(Loader, self).__init__(loaders)
self.template_cache = AdaptiveTemplateCache()
|
<commit_before><commit_msg>Support for Cached template loader with django adaptive<commit_after>
|
"""
Provide support for Cached loader.
Generate key for cached templates based on template names
and device type (desktop or mobile or tablet)
"""
from django.template.loaders.cached import Loader as CachedLoader
from django_adaptive.loader_utils import get_template_suffix
class AdaptiveTemplateCache(dict):
def get_device(self):
return get_template_suffix()
def get_key(self, key):
return key + self.get_device()
def __getitem__(self, key):
return super(AdaptiveTemplateCache, self).__getitem__(
self.get_key(key))
def __setitem__(self, key, value):
return super(AdaptiveTemplateCache, self).__setitem__(
self.get_key(key), value)
class Loader(CachedLoader):
def __init__(self, loaders):
super(Loader, self).__init__(loaders)
self.template_cache = AdaptiveTemplateCache()
|
Support for Cached template loader with django adaptive"""
Provide support for Cached loader.
Generate key for cached templates based on template names
and device type (desktop or mobile or tablet)
"""
from django.template.loaders.cached import Loader as CachedLoader
from django_adaptive.loader_utils import get_template_suffix
class AdaptiveTemplateCache(dict):
def get_device(self):
return get_template_suffix()
def get_key(self, key):
return key + self.get_device()
def __getitem__(self, key):
return super(AdaptiveTemplateCache, self).__getitem__(
self.get_key(key))
def __setitem__(self, key, value):
return super(AdaptiveTemplateCache, self).__setitem__(
self.get_key(key), value)
class Loader(CachedLoader):
def __init__(self, loaders):
super(Loader, self).__init__(loaders)
self.template_cache = AdaptiveTemplateCache()
|
<commit_before><commit_msg>Support for Cached template loader with django adaptive<commit_after>"""
Provide support for Cached loader.
Generate key for cached templates based on template names
and device type (desktop or mobile or tablet)
"""
from django.template.loaders.cached import Loader as CachedLoader
from django_adaptive.loader_utils import get_template_suffix
class AdaptiveTemplateCache(dict):
def get_device(self):
return get_template_suffix()
def get_key(self, key):
return key + self.get_device()
def __getitem__(self, key):
return super(AdaptiveTemplateCache, self).__getitem__(
self.get_key(key))
def __setitem__(self, key, value):
return super(AdaptiveTemplateCache, self).__setitem__(
self.get_key(key), value)
class Loader(CachedLoader):
def __init__(self, loaders):
super(Loader, self).__init__(loaders)
self.template_cache = AdaptiveTemplateCache()
|
|
89570d098dd15b11e8787e44c353e27dfc1debff
|
dashboard_app/migrations/0002_auto_20140917_1935.py
|
dashboard_app/migrations/0002_auto_20140917_1935.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bundle',
name='is_deserialized',
field=models.BooleanField(default=False, help_text='Set when document has been analyzed and loaded into the database', verbose_name='Is deserialized', editable=False),
),
migrations.AlterField(
model_name='bundlestream',
name='is_anonymous',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='testrun',
name='time_check_performed',
field=models.BooleanField(default=False, help_text="Indicator on wether timestamps in the log file (and any data derived from them) should be trusted.<br/>Many pre-production or development devices do not have a battery-powered RTC and it's not common for development images not to synchronize time with internet time servers.<br/>This field allows us to track tests results that <em>certainly</em> have correct time if we ever end up with lots of tests results from 1972", verbose_name='Time check performed'),
),
]
|
Add automatic migration for dashboard changes.
|
Add automatic migration for dashboard changes.
Migrations for 'dashboard_app':
0002_auto_20140917_1935.py:
- Alter field is_deserialized on bundle
- Alter field is_anonymous on bundlestream
- Alter field time_check_performed on testrun
Change-Id: I2627c0b48512ceb0c50bb8d29ea67827a6f73c80
|
Python
|
agpl-3.0
|
Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server
|
Add automatic migration for dashboard changes.
Migrations for 'dashboard_app':
0002_auto_20140917_1935.py:
- Alter field is_deserialized on bundle
- Alter field is_anonymous on bundlestream
- Alter field time_check_performed on testrun
Change-Id: I2627c0b48512ceb0c50bb8d29ea67827a6f73c80
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bundle',
name='is_deserialized',
field=models.BooleanField(default=False, help_text='Set when document has been analyzed and loaded into the database', verbose_name='Is deserialized', editable=False),
),
migrations.AlterField(
model_name='bundlestream',
name='is_anonymous',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='testrun',
name='time_check_performed',
field=models.BooleanField(default=False, help_text="Indicator on wether timestamps in the log file (and any data derived from them) should be trusted.<br/>Many pre-production or development devices do not have a battery-powered RTC and it's not common for development images not to synchronize time with internet time servers.<br/>This field allows us to track tests results that <em>certainly</em> have correct time if we ever end up with lots of tests results from 1972", verbose_name='Time check performed'),
),
]
|
<commit_before><commit_msg>Add automatic migration for dashboard changes.
Migrations for 'dashboard_app':
0002_auto_20140917_1935.py:
- Alter field is_deserialized on bundle
- Alter field is_anonymous on bundlestream
- Alter field time_check_performed on testrun
Change-Id: I2627c0b48512ceb0c50bb8d29ea67827a6f73c80<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bundle',
name='is_deserialized',
field=models.BooleanField(default=False, help_text='Set when document has been analyzed and loaded into the database', verbose_name='Is deserialized', editable=False),
),
migrations.AlterField(
model_name='bundlestream',
name='is_anonymous',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='testrun',
name='time_check_performed',
field=models.BooleanField(default=False, help_text="Indicator on wether timestamps in the log file (and any data derived from them) should be trusted.<br/>Many pre-production or development devices do not have a battery-powered RTC and it's not common for development images not to synchronize time with internet time servers.<br/>This field allows us to track tests results that <em>certainly</em> have correct time if we ever end up with lots of tests results from 1972", verbose_name='Time check performed'),
),
]
|
Add automatic migration for dashboard changes.
Migrations for 'dashboard_app':
0002_auto_20140917_1935.py:
- Alter field is_deserialized on bundle
- Alter field is_anonymous on bundlestream
- Alter field time_check_performed on testrun
Change-Id: I2627c0b48512ceb0c50bb8d29ea67827a6f73c80# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bundle',
name='is_deserialized',
field=models.BooleanField(default=False, help_text='Set when document has been analyzed and loaded into the database', verbose_name='Is deserialized', editable=False),
),
migrations.AlterField(
model_name='bundlestream',
name='is_anonymous',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='testrun',
name='time_check_performed',
field=models.BooleanField(default=False, help_text="Indicator on wether timestamps in the log file (and any data derived from them) should be trusted.<br/>Many pre-production or development devices do not have a battery-powered RTC and it's not common for development images not to synchronize time with internet time servers.<br/>This field allows us to track tests results that <em>certainly</em> have correct time if we ever end up with lots of tests results from 1972", verbose_name='Time check performed'),
),
]
|
<commit_before><commit_msg>Add automatic migration for dashboard changes.
Migrations for 'dashboard_app':
0002_auto_20140917_1935.py:
- Alter field is_deserialized on bundle
- Alter field is_anonymous on bundlestream
- Alter field time_check_performed on testrun
Change-Id: I2627c0b48512ceb0c50bb8d29ea67827a6f73c80<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bundle',
name='is_deserialized',
field=models.BooleanField(default=False, help_text='Set when document has been analyzed and loaded into the database', verbose_name='Is deserialized', editable=False),
),
migrations.AlterField(
model_name='bundlestream',
name='is_anonymous',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='testrun',
name='time_check_performed',
field=models.BooleanField(default=False, help_text="Indicator on wether timestamps in the log file (and any data derived from them) should be trusted.<br/>Many pre-production or development devices do not have a battery-powered RTC and it's not common for development images not to synchronize time with internet time servers.<br/>This field allows us to track tests results that <em>certainly</em> have correct time if we ever end up with lots of tests results from 1972", verbose_name='Time check performed'),
),
]
|
|
ab0ecd4defaffc2e342d435ca490dd28e0314316
|
tests/test_simpleflow/test_exceptions.py
|
tests/test_simpleflow/test_exceptions.py
|
import unittest
from sure import expect
from simpleflow.exceptions import TaskFailed
class TestTaskFailed(unittest.TestCase):
def test_task_failed_representation(self):
failure = TaskFailed("message", None, None)
expect(str(failure)).to.equal("('message', None, None)")
expect(repr(failure)).to.equal('TaskFailed (message, "None")')
failure = TaskFailed("message", "reason", "detail")
expect(str(failure)).to.equal("('message', 'reason', 'detail')")
expect(repr(failure)).to.equal('TaskFailed (message, "reason")')
|
Add basic tests for simpleflow.exceptions.TaskFailed
|
Add basic tests for simpleflow.exceptions.TaskFailed
|
Python
|
mit
|
botify-labs/simpleflow,botify-labs/simpleflow
|
Add basic tests for simpleflow.exceptions.TaskFailed
|
import unittest
from sure import expect
from simpleflow.exceptions import TaskFailed
class TestTaskFailed(unittest.TestCase):
def test_task_failed_representation(self):
failure = TaskFailed("message", None, None)
expect(str(failure)).to.equal("('message', None, None)")
expect(repr(failure)).to.equal('TaskFailed (message, "None")')
failure = TaskFailed("message", "reason", "detail")
expect(str(failure)).to.equal("('message', 'reason', 'detail')")
expect(repr(failure)).to.equal('TaskFailed (message, "reason")')
|
<commit_before><commit_msg>Add basic tests for simpleflow.exceptions.TaskFailed<commit_after>
|
import unittest
from sure import expect
from simpleflow.exceptions import TaskFailed
class TestTaskFailed(unittest.TestCase):
def test_task_failed_representation(self):
failure = TaskFailed("message", None, None)
expect(str(failure)).to.equal("('message', None, None)")
expect(repr(failure)).to.equal('TaskFailed (message, "None")')
failure = TaskFailed("message", "reason", "detail")
expect(str(failure)).to.equal("('message', 'reason', 'detail')")
expect(repr(failure)).to.equal('TaskFailed (message, "reason")')
|
Add basic tests for simpleflow.exceptions.TaskFailedimport unittest
from sure import expect
from simpleflow.exceptions import TaskFailed
class TestTaskFailed(unittest.TestCase):
def test_task_failed_representation(self):
failure = TaskFailed("message", None, None)
expect(str(failure)).to.equal("('message', None, None)")
expect(repr(failure)).to.equal('TaskFailed (message, "None")')
failure = TaskFailed("message", "reason", "detail")
expect(str(failure)).to.equal("('message', 'reason', 'detail')")
expect(repr(failure)).to.equal('TaskFailed (message, "reason")')
|
<commit_before><commit_msg>Add basic tests for simpleflow.exceptions.TaskFailed<commit_after>import unittest
from sure import expect
from simpleflow.exceptions import TaskFailed
class TestTaskFailed(unittest.TestCase):
def test_task_failed_representation(self):
failure = TaskFailed("message", None, None)
expect(str(failure)).to.equal("('message', None, None)")
expect(repr(failure)).to.equal('TaskFailed (message, "None")')
failure = TaskFailed("message", "reason", "detail")
expect(str(failure)).to.equal("('message', 'reason', 'detail')")
expect(repr(failure)).to.equal('TaskFailed (message, "reason")')
|
|
ab55836a2ff1ab5ec1ee62a5119b2dbadf8944a9
|
tests/rules_tests/isValid_tests/EpsilonTest.py
|
tests/rules_tests/isValid_tests/EpsilonTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule, EPS
from .grammar import *
class EpsilonTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file to epsilon tests
|
Add file to epsilon tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file to epsilon tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule, EPS
from .grammar import *
class EpsilonTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file to epsilon tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule, EPS
from .grammar import *
class EpsilonTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file to epsilon tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule, EPS
from .grammar import *
class EpsilonTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file to epsilon tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule, EPS
from .grammar import *
class EpsilonTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
31d2686555a93ddadd3713c3c880b75641d98d89
|
scripts/read_reldist.py
|
scripts/read_reldist.py
|
import os
import yaml
from thermof.parameters import plot_parameters
from thermof.read import read_framework_distance
# --------------------------------------------------------------------------------------------------
main = ''
results_file = '%s-reldist-results.yaml' % os.path.basename(main)
run_list_file = '%s-run-list.yaml' % os.path.basename(main)
# --------------------------------------------------------------------------------------------------
run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
dist_data = read_framework_distance(run_list, plot_parameters['f_dist'])
with open(results_file, 'w') as rfile:
yaml.dump(dist_data, rfile)
with open(run_list_file, 'w') as rlfile:
yaml.dump(run_list, rlfile)
|
Add script for reading reldist for multiple trials
|
Add script for reading reldist for multiple trials
|
Python
|
mit
|
kbsezginel/tee_mof,kbsezginel/tee_mof
|
Add script for reading reldist for multiple trials
|
import os
import yaml
from thermof.parameters import plot_parameters
from thermof.read import read_framework_distance
# --------------------------------------------------------------------------------------------------
main = ''
results_file = '%s-reldist-results.yaml' % os.path.basename(main)
run_list_file = '%s-run-list.yaml' % os.path.basename(main)
# --------------------------------------------------------------------------------------------------
run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
dist_data = read_framework_distance(run_list, plot_parameters['f_dist'])
with open(results_file, 'w') as rfile:
yaml.dump(dist_data, rfile)
with open(run_list_file, 'w') as rlfile:
yaml.dump(run_list, rlfile)
|
<commit_before><commit_msg>Add script for reading reldist for multiple trials<commit_after>
|
import os
import yaml
from thermof.parameters import plot_parameters
from thermof.read import read_framework_distance
# --------------------------------------------------------------------------------------------------
main = ''
results_file = '%s-reldist-results.yaml' % os.path.basename(main)
run_list_file = '%s-run-list.yaml' % os.path.basename(main)
# --------------------------------------------------------------------------------------------------
run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
dist_data = read_framework_distance(run_list, plot_parameters['f_dist'])
with open(results_file, 'w') as rfile:
yaml.dump(dist_data, rfile)
with open(run_list_file, 'w') as rlfile:
yaml.dump(run_list, rlfile)
|
Add script for reading reldist for multiple trialsimport os
import yaml
from thermof.parameters import plot_parameters
from thermof.read import read_framework_distance
# --------------------------------------------------------------------------------------------------
main = ''
results_file = '%s-reldist-results.yaml' % os.path.basename(main)
run_list_file = '%s-run-list.yaml' % os.path.basename(main)
# --------------------------------------------------------------------------------------------------
run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
dist_data = read_framework_distance(run_list, plot_parameters['f_dist'])
with open(results_file, 'w') as rfile:
yaml.dump(dist_data, rfile)
with open(run_list_file, 'w') as rlfile:
yaml.dump(run_list, rlfile)
|
<commit_before><commit_msg>Add script for reading reldist for multiple trials<commit_after>import os
import yaml
from thermof.parameters import plot_parameters
from thermof.read import read_framework_distance
# --------------------------------------------------------------------------------------------------
main = ''
results_file = '%s-reldist-results.yaml' % os.path.basename(main)
run_list_file = '%s-run-list.yaml' % os.path.basename(main)
# --------------------------------------------------------------------------------------------------
run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))]
dist_data = read_framework_distance(run_list, plot_parameters['f_dist'])
with open(results_file, 'w') as rfile:
yaml.dump(dist_data, rfile)
with open(run_list_file, 'w') as rlfile:
yaml.dump(run_list, rlfile)
|
|
9ceb3373b2e812662e402d797207b3cadd74b034
|
tests/functional/test_06_jenkins.py
|
tests/functional/test_06_jenkins.py
|
#!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
import requests as http
from utils import Base
class TestJenkinsBasic(Base):
""" Functional tests to validate config repo bootstrap
"""
def test_config_jobs_exist(self):
""" Test if jenkins config-update and config-check are created
"""
r = http.get('%s/job/config-check' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
r = http.get('%s/job/config-update' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
|
Add a simple functional tests for jenkins: check config-* jobs
|
Add a simple functional tests for jenkins: check config-* jobs
Change-Id: I41f880e55fe380273ed614944bc4523278501252
|
Python
|
apache-2.0
|
invenfantasy/software-factory,enovance/software-factory,invenfantasy/software-factory,invenfantasy/software-factory,invenfantasy/software-factory,enovance/software-factory,enovance/software-factory,enovance/software-factory,invenfantasy/software-factory,enovance/software-factory
|
Add a simple functional tests for jenkins: check config-* jobs
Change-Id: I41f880e55fe380273ed614944bc4523278501252
|
#!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
import requests as http
from utils import Base
class TestJenkinsBasic(Base):
""" Functional tests to validate config repo bootstrap
"""
def test_config_jobs_exist(self):
""" Test if jenkins config-update and config-check are created
"""
r = http.get('%s/job/config-check' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
r = http.get('%s/job/config-update' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
|
<commit_before><commit_msg>Add a simple functional tests for jenkins: check config-* jobs
Change-Id: I41f880e55fe380273ed614944bc4523278501252<commit_after>
|
#!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
import requests as http
from utils import Base
class TestJenkinsBasic(Base):
""" Functional tests to validate config repo bootstrap
"""
def test_config_jobs_exist(self):
""" Test if jenkins config-update and config-check are created
"""
r = http.get('%s/job/config-check' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
r = http.get('%s/job/config-update' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
|
Add a simple functional tests for jenkins: check config-* jobs
Change-Id: I41f880e55fe380273ed614944bc4523278501252#!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
import requests as http
from utils import Base
class TestJenkinsBasic(Base):
""" Functional tests to validate config repo bootstrap
"""
def test_config_jobs_exist(self):
""" Test if jenkins config-update and config-check are created
"""
r = http.get('%s/job/config-check' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
r = http.get('%s/job/config-update' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
|
<commit_before><commit_msg>Add a simple functional tests for jenkins: check config-* jobs
Change-Id: I41f880e55fe380273ed614944bc4523278501252<commit_after>#!/usr/bin/python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
import requests as http
from utils import Base
class TestJenkinsBasic(Base):
""" Functional tests to validate config repo bootstrap
"""
def test_config_jobs_exist(self):
""" Test if jenkins config-update and config-check are created
"""
r = http.get('%s/job/config-check' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
r = http.get('%s/job/config-update' % config.JENKINS_SERVER)
self.assertEquals(r.status_code, 200)
|
|
99b680ee8d610e1c0d02c53e465a510e3c5a50d3
|
thinc/tests/unit/test_exceptions.py
|
thinc/tests/unit/test_exceptions.py
|
import pytest
from .. import exceptions as e
def test_shape_error():
raise_if(e.ShapeError.dimensions_mismatch(10, 20, 'inside test'))
def raise_if(e):
if e is not None:
raise e.with_traceback(self.tb)
|
Add unit test for exceptions
|
Add unit test for exceptions
|
Python
|
mit
|
explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc
|
Add unit test for exceptions
|
import pytest
from .. import exceptions as e
def test_shape_error():
raise_if(e.ShapeError.dimensions_mismatch(10, 20, 'inside test'))
def raise_if(e):
if e is not None:
raise e.with_traceback(self.tb)
|
<commit_before><commit_msg>Add unit test for exceptions<commit_after>
|
import pytest
from .. import exceptions as e
def test_shape_error():
raise_if(e.ShapeError.dimensions_mismatch(10, 20, 'inside test'))
def raise_if(e):
if e is not None:
raise e.with_traceback(self.tb)
|
Add unit test for exceptionsimport pytest
from .. import exceptions as e
def test_shape_error():
raise_if(e.ShapeError.dimensions_mismatch(10, 20, 'inside test'))
def raise_if(e):
if e is not None:
raise e.with_traceback(self.tb)
|
<commit_before><commit_msg>Add unit test for exceptions<commit_after>import pytest
from .. import exceptions as e
def test_shape_error():
raise_if(e.ShapeError.dimensions_mismatch(10, 20, 'inside test'))
def raise_if(e):
if e is not None:
raise e.with_traceback(self.tb)
|
|
4cb92cbfd79117b81ac8b4fa9533c01933eb5770
|
shuup/admin/settings.py
|
shuup/admin/settings.py
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
"""
Settings of Shuup Admin.
See :ref:`apps-settings` (in :obj:`shuup.apps`) for general information
about the Shuup settings system. Especially, when inventing settings of
your own, the :ref:`apps-naming-settings` section is an important read.
"""
#: Spec which defines a list of Wizard Panes to be shown in Shuup Admin
#: for Shuup initialization and configuration.
#:
#: Panes must be subclasses of `shuup.admin.views.WizardPane`.
#:
SHUUP_SETUP_WIZARD_PANE_SPEC = []
|
Add missed wizard spec setting definition
|
Admin: Add missed wizard spec setting definition
|
Python
|
agpl-3.0
|
shoopio/shoop,shoopio/shoop,suutari-ai/shoop,suutari-ai/shoop,shoopio/shoop,suutari-ai/shoop
|
Admin: Add missed wizard spec setting definition
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
"""
Settings of Shuup Admin.
See :ref:`apps-settings` (in :obj:`shuup.apps`) for general information
about the Shuup settings system. Especially, when inventing settings of
your own, the :ref:`apps-naming-settings` section is an important read.
"""
#: Spec which defines a list of Wizard Panes to be shown in Shuup Admin
#: for Shuup initialization and configuration.
#:
#: Panes must be subclasses of `shuup.admin.views.WizardPane`.
#:
SHUUP_SETUP_WIZARD_PANE_SPEC = []
|
<commit_before><commit_msg>Admin: Add missed wizard spec setting definition<commit_after>
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
"""
Settings of Shuup Admin.
See :ref:`apps-settings` (in :obj:`shuup.apps`) for general information
about the Shuup settings system. Especially, when inventing settings of
your own, the :ref:`apps-naming-settings` section is an important read.
"""
#: Spec which defines a list of Wizard Panes to be shown in Shuup Admin
#: for Shuup initialization and configuration.
#:
#: Panes must be subclasses of `shuup.admin.views.WizardPane`.
#:
SHUUP_SETUP_WIZARD_PANE_SPEC = []
|
Admin: Add missed wizard spec setting definition# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
"""
Settings of Shuup Admin.
See :ref:`apps-settings` (in :obj:`shuup.apps`) for general information
about the Shuup settings system. Especially, when inventing settings of
your own, the :ref:`apps-naming-settings` section is an important read.
"""
#: Spec which defines a list of Wizard Panes to be shown in Shuup Admin
#: for Shuup initialization and configuration.
#:
#: Panes must be subclasses of `shuup.admin.views.WizardPane`.
#:
SHUUP_SETUP_WIZARD_PANE_SPEC = []
|
<commit_before><commit_msg>Admin: Add missed wizard spec setting definition<commit_after># -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
"""
Settings of Shuup Admin.
See :ref:`apps-settings` (in :obj:`shuup.apps`) for general information
about the Shuup settings system. Especially, when inventing settings of
your own, the :ref:`apps-naming-settings` section is an important read.
"""
#: Spec which defines a list of Wizard Panes to be shown in Shuup Admin
#: for Shuup initialization and configuration.
#:
#: Panes must be subclasses of `shuup.admin.views.WizardPane`.
#:
SHUUP_SETUP_WIZARD_PANE_SPEC = []
|
|
1b310904b641dda7ad74e98a24f62d573bbd81f8
|
chrome/browser/policy/PRESUBMIT.py
|
chrome/browser/policy/PRESUBMIT.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for chrome/browser/policy.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return [
'linux_chromeos',
'linux_chromeos_clang:compile',
]
|
Send try jobs that touch policy code to the linux_chromeos bot too.
|
Send try jobs that touch policy code to the linux_chromeos bot too.
Review URL: https://chromiumcodereview.appspot.com/10473015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@140268 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,Jonekee/chromium.src,nacl-webkit/chrome_deps,M4sse/chromium.src,keishi/chromium,dednal/chromium.src,axinging/chromium-crosswalk,zcbenz/cefode-chromium,nacl-webkit/chrome_deps,keishi/chromium,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,ltilve/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,anirudhSK/chromium,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,Chilledheart/chromium,mogoweb/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,fujunwei/chromium-crosswalk,junmin-zhu/chromium-rivertrail,krieger-od/nwjs_chromium.src,keishi/chromium,Jonekee/chromium.src,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,keishi/chromium,patrickm/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,anirudhSK/chromium,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,zcbenz/cefode-chromium,anirudhSK/chromium,timopulkkinen/BubbleFish,keishi/chromium,mogoweb/chromium-crosswalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,keishi/chromium,dednal/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,ondra-novak/chromium.src,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,keishi/chromium,ltilve/chromium,ondra-novak/chromium.src,littlstar/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,dednal/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,patrickm/chromium.src,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,Just-D/chromium-1,anirudhSK/chromium,Fireblend/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,anirudhSK/chromium,hujiajie/pa-chromium,M4sse/chromium.src,hujiajie/pa-chromium,littlstar/chromium.src,Just-D/chromium-1,keishi/chromium,ondra-novak/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,timopulkkinen/BubbleFish,anirudhSK/chromium,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,dushu1203/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,zcbenz/cefode-chromium,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,keishi/chromium,anirudhSK/chromium,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,ltilve/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,ltilve/chromium,Pluto-tv/chromium-crosswalk,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,Chilledheart/chromium,axinging/chromium-crosswalk,keishi/chromium,Fireblend/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,dednal/chromium.src,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,Chilledheart/chromium,littlstar/chromium.src,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,jaruba/chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,littlstar/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,zcbenz/cefode-chromium,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,patrickm/chromium.src,Just-D/chromium-1,dednal/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,nacl-webkit/chrome_deps,junmin-zhu/chromium-rivertrail,mohamed--abdel-maksoud/chromium.src,junmin-zhu/chromium-rivertrail,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,timopulkkinen/BubbleFish,ondra-novak/chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,zcbenz/cefode-chromium,dushu1203/chromium.src,anirudhSK/chromium,zcbenz/cefode-chromium,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,junmin-zhu/chromium-rivertrail,keishi/chromium,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,zcbenz/cefode-chromium,markYoungH/chromium.src,Jonekee/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,Chilledheart/chromium,Chilledheart/chromium,M4sse/chromium.src,nacl-webkit/chrome_deps,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,jaruba/chromium.src,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ltilve/chromium,nacl-webkit/chrome_deps,M4sse/chromium.src,ltilve/chromium,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,Chilledheart/chromium,timopulkkinen/BubbleFish,anirudhSK/chromium,hujiajie/pa-chromium,nacl-webkit/chrome_deps
|
Send try jobs that touch policy code to the linux_chromeos bot too.
Review URL: https://chromiumcodereview.appspot.com/10473015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@140268 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for chrome/browser/policy.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return [
'linux_chromeos',
'linux_chromeos_clang:compile',
]
|
<commit_before><commit_msg>Send try jobs that touch policy code to the linux_chromeos bot too.
Review URL: https://chromiumcodereview.appspot.com/10473015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@140268 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for chrome/browser/policy.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return [
'linux_chromeos',
'linux_chromeos_clang:compile',
]
|
Send try jobs that touch policy code to the linux_chromeos bot too.
Review URL: https://chromiumcodereview.appspot.com/10473015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@140268 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for chrome/browser/policy.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return [
'linux_chromeos',
'linux_chromeos_clang:compile',
]
|
<commit_before><commit_msg>Send try jobs that touch policy code to the linux_chromeos bot too.
Review URL: https://chromiumcodereview.appspot.com/10473015
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@140268 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for chrome/browser/policy.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return [
'linux_chromeos',
'linux_chromeos_clang:compile',
]
|
|
e6a589d9a1a81e32de6f2e50ea7aaee4f5f6a6c3
|
migrations/versions/820_untweak_g9_lots.py
|
migrations/versions/820_untweak_g9_lots.py
|
"""no lots are products for G-Cloud 9 - they are all services
Revision ID: 820
Revises: 810
Create Date: 2017-02-01 11:20:00.000000
"""
# revision identifiers, used by Alembic.
revision = '820'
down_revision = '810'
from alembic import op
def upgrade():
# Update G-Cloud 9 lot records
op.execute("""
UPDATE lots SET data = '{"unitSingular": "service", "unitPlural": "services"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
def downgrade():
op.execute("""
UPDATE lots SET data = '{"unitSingular": "product", "unitPlural": "products"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
|
Revert change from services to products for G9 lots
|
Revert change from services to products for G9 lots
Just when I thought I was out, they pull me back in.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Revert change from services to products for G9 lots
Just when I thought I was out, they pull me back in.
|
"""no lots are products for G-Cloud 9 - they are all services
Revision ID: 820
Revises: 810
Create Date: 2017-02-01 11:20:00.000000
"""
# revision identifiers, used by Alembic.
revision = '820'
down_revision = '810'
from alembic import op
def upgrade():
# Update G-Cloud 9 lot records
op.execute("""
UPDATE lots SET data = '{"unitSingular": "service", "unitPlural": "services"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
def downgrade():
op.execute("""
UPDATE lots SET data = '{"unitSingular": "product", "unitPlural": "products"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
|
<commit_before><commit_msg>Revert change from services to products for G9 lots
Just when I thought I was out, they pull me back in.<commit_after>
|
"""no lots are products for G-Cloud 9 - they are all services
Revision ID: 820
Revises: 810
Create Date: 2017-02-01 11:20:00.000000
"""
# revision identifiers, used by Alembic.
revision = '820'
down_revision = '810'
from alembic import op
def upgrade():
# Update G-Cloud 9 lot records
op.execute("""
UPDATE lots SET data = '{"unitSingular": "service", "unitPlural": "services"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
def downgrade():
op.execute("""
UPDATE lots SET data = '{"unitSingular": "product", "unitPlural": "products"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
|
Revert change from services to products for G9 lots
Just when I thought I was out, they pull me back in."""no lots are products for G-Cloud 9 - they are all services
Revision ID: 820
Revises: 810
Create Date: 2017-02-01 11:20:00.000000
"""
# revision identifiers, used by Alembic.
revision = '820'
down_revision = '810'
from alembic import op
def upgrade():
# Update G-Cloud 9 lot records
op.execute("""
UPDATE lots SET data = '{"unitSingular": "service", "unitPlural": "services"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
def downgrade():
op.execute("""
UPDATE lots SET data = '{"unitSingular": "product", "unitPlural": "products"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
|
<commit_before><commit_msg>Revert change from services to products for G9 lots
Just when I thought I was out, they pull me back in.<commit_after>"""no lots are products for G-Cloud 9 - they are all services
Revision ID: 820
Revises: 810
Create Date: 2017-02-01 11:20:00.000000
"""
# revision identifiers, used by Alembic.
revision = '820'
down_revision = '810'
from alembic import op
def upgrade():
# Update G-Cloud 9 lot records
op.execute("""
UPDATE lots SET data = '{"unitSingular": "service", "unitPlural": "services"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
def downgrade():
op.execute("""
UPDATE lots SET data = '{"unitSingular": "product", "unitPlural": "products"}'
WHERE slug in ('cloud-hosting', 'cloud-software');
""")
|
|
deaa7d58271fab03bd403f068ef0e261bc6a6577
|
mosqito/sound_synthesis/read_spectro_xls.py
|
mosqito/sound_synthesis/read_spectro_xls.py
|
# -*- coding: utf-8 -*-
from pandas import ExcelFile, read_excel
from numpy import squeeze, zeros, transpose
def read_spectro_xls(file_name):
"""Read spectrogram as an xls file, format: A3:AN = time, B3:BN = speed,
C1:ZZ1 = DC, C2:ZZ2 = orders, C3:ZZN = spectrum
and compute the frequencies
Parameters
----------
file_name : str
name of the xls file
Outputs
-------
spectrum : ndarray
2D array of the spectrum
freqs : array
1D array of the computed frequencies
time : array
1D array of the time
"""
xls_file = ExcelFile(file_name)
# Read excel file
spectrum = transpose(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'C:ZZ', squeeze=True).to_numpy())
time = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'A', squeeze=True).to_numpy())
speed = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'B', squeeze=True).to_numpy())
DC = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
orders = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=1, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
# Compute frequencies
freqs = zeros(spectrum.shape)
for i in range(len(DC)):
freqs[i,:] = DC[i] + orders[i] * speed / 60
return spectrum, freqs, time
|
Read spectro from excel file with orders as input
|
[NF] Read spectro from excel file with orders as input
|
Python
|
apache-2.0
|
Eomys/MoSQITo
|
[NF] Read spectro from excel file with orders as input
|
# -*- coding: utf-8 -*-
from pandas import ExcelFile, read_excel
from numpy import squeeze, zeros, transpose
def read_spectro_xls(file_name):
"""Read spectrogram as an xls file, format: A3:AN = time, B3:BN = speed,
C1:ZZ1 = DC, C2:ZZ2 = orders, C3:ZZN = spectrum
and compute the frequencies
Parameters
----------
file_name : str
name of the xls file
Outputs
-------
spectrum : ndarray
2D array of the spectrum
freqs : array
1D array of the computed frequencies
time : array
1D array of the time
"""
xls_file = ExcelFile(file_name)
# Read excel file
spectrum = transpose(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'C:ZZ', squeeze=True).to_numpy())
time = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'A', squeeze=True).to_numpy())
speed = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'B', squeeze=True).to_numpy())
DC = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
orders = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=1, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
# Compute frequencies
freqs = zeros(spectrum.shape)
for i in range(len(DC)):
freqs[i,:] = DC[i] + orders[i] * speed / 60
return spectrum, freqs, time
|
<commit_before><commit_msg>[NF] Read spectro from excel file with orders as input<commit_after>
|
# -*- coding: utf-8 -*-
from pandas import ExcelFile, read_excel
from numpy import squeeze, zeros, transpose
def read_spectro_xls(file_name):
"""Read spectrogram as an xls file, format: A3:AN = time, B3:BN = speed,
C1:ZZ1 = DC, C2:ZZ2 = orders, C3:ZZN = spectrum
and compute the frequencies
Parameters
----------
file_name : str
name of the xls file
Outputs
-------
spectrum : ndarray
2D array of the spectrum
freqs : array
1D array of the computed frequencies
time : array
1D array of the time
"""
xls_file = ExcelFile(file_name)
# Read excel file
spectrum = transpose(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'C:ZZ', squeeze=True).to_numpy())
time = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'A', squeeze=True).to_numpy())
speed = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'B', squeeze=True).to_numpy())
DC = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
orders = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=1, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
# Compute frequencies
freqs = zeros(spectrum.shape)
for i in range(len(DC)):
freqs[i,:] = DC[i] + orders[i] * speed / 60
return spectrum, freqs, time
|
[NF] Read spectro from excel file with orders as input# -*- coding: utf-8 -*-
from pandas import ExcelFile, read_excel
from numpy import squeeze, zeros, transpose
def read_spectro_xls(file_name):
"""Read spectrogram as an xls file, format: A3:AN = time, B3:BN = speed,
C1:ZZ1 = DC, C2:ZZ2 = orders, C3:ZZN = spectrum
and compute the frequencies
Parameters
----------
file_name : str
name of the xls file
Outputs
-------
spectrum : ndarray
2D array of the spectrum
freqs : array
1D array of the computed frequencies
time : array
1D array of the time
"""
xls_file = ExcelFile(file_name)
# Read excel file
spectrum = transpose(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'C:ZZ', squeeze=True).to_numpy())
time = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'A', squeeze=True).to_numpy())
speed = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'B', squeeze=True).to_numpy())
DC = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
orders = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=1, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
# Compute frequencies
freqs = zeros(spectrum.shape)
for i in range(len(DC)):
freqs[i,:] = DC[i] + orders[i] * speed / 60
return spectrum, freqs, time
|
<commit_before><commit_msg>[NF] Read spectro from excel file with orders as input<commit_after># -*- coding: utf-8 -*-
from pandas import ExcelFile, read_excel
from numpy import squeeze, zeros, transpose
def read_spectro_xls(file_name):
"""Read spectrogram as an xls file, format: A3:AN = time, B3:BN = speed,
C1:ZZ1 = DC, C2:ZZ2 = orders, C3:ZZN = spectrum
and compute the frequencies
Parameters
----------
file_name : str
name of the xls file
Outputs
-------
spectrum : ndarray
2D array of the spectrum
freqs : array
1D array of the computed frequencies
time : array
1D array of the time
"""
xls_file = ExcelFile(file_name)
# Read excel file
spectrum = transpose(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'C:ZZ', squeeze=True).to_numpy())
time = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'A', squeeze=True).to_numpy())
speed = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=2, usecols = 'B', squeeze=True).to_numpy())
DC = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
orders = squeeze(read_excel(xls_file, sheet_name="Sheet1", header=None, skiprows=1, nrows=1, usecols = 'C:ZZ', squeeze=True).to_numpy())
# Compute frequencies
freqs = zeros(spectrum.shape)
for i in range(len(DC)):
freqs[i,:] = DC[i] + orders[i] * speed / 60
return spectrum, freqs, time
|
|
8782775bceeb01a1985627e1203359b6d67d5272
|
numba/typesystem/exttypes/attributestype.py
|
numba/typesystem/exttypes/attributestype.py
|
from numba.typesystem import *
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class ExtensionAttributesTableType(NumbaType):
"""
Type for extension type attributes.
"""
def __init__(self, parents):
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
|
Add extension attribute table type
|
Add extension attribute table type
|
Python
|
bsd-2-clause
|
numba/numba,stuartarchibald/numba,jriehl/numba,GaZ3ll3/numba,pombredanne/numba,stonebig/numba,stuartarchibald/numba,stefanseefeld/numba,shiquanwang/numba,gdementen/numba,pitrou/numba,GaZ3ll3/numba,stefanseefeld/numba,pitrou/numba,sklam/numba,pombredanne/numba,pitrou/numba,cpcloud/numba,gdementen/numba,IntelLabs/numba,jriehl/numba,cpcloud/numba,stuartarchibald/numba,stuartarchibald/numba,shiquanwang/numba,pombredanne/numba,numba/numba,gdementen/numba,seibert/numba,pitrou/numba,pitrou/numba,numba/numba,stonebig/numba,cpcloud/numba,ssarangi/numba,sklam/numba,gmarkall/numba,GaZ3ll3/numba,jriehl/numba,ssarangi/numba,numba/numba,seibert/numba,stonebig/numba,gmarkall/numba,gdementen/numba,gmarkall/numba,stefanseefeld/numba,IntelLabs/numba,seibert/numba,stefanseefeld/numba,gmarkall/numba,jriehl/numba,cpcloud/numba,IntelLabs/numba,stonebig/numba,pombredanne/numba,pombredanne/numba,ssarangi/numba,gmarkall/numba,shiquanwang/numba,GaZ3ll3/numba,numba/numba,IntelLabs/numba,jriehl/numba,ssarangi/numba,GaZ3ll3/numba,stefanseefeld/numba,gdementen/numba,stonebig/numba,stuartarchibald/numba,ssarangi/numba,sklam/numba,seibert/numba,seibert/numba,sklam/numba,sklam/numba,cpcloud/numba,IntelLabs/numba
|
Add extension attribute table type
|
from numba.typesystem import *
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class ExtensionAttributesTableType(NumbaType):
"""
Type for extension type attributes.
"""
def __init__(self, parents):
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
|
<commit_before><commit_msg>Add extension attribute table type<commit_after>
|
from numba.typesystem import *
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class ExtensionAttributesTableType(NumbaType):
"""
Type for extension type attributes.
"""
def __init__(self, parents):
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
|
Add extension attribute table typefrom numba.typesystem import *
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class ExtensionAttributesTableType(NumbaType):
"""
Type for extension type attributes.
"""
def __init__(self, parents):
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
|
<commit_before><commit_msg>Add extension attribute table type<commit_after>from numba.typesystem import *
#------------------------------------------------------------------------
# Extension Attributes Type
#------------------------------------------------------------------------
class ExtensionAttributesTableType(NumbaType):
"""
Type for extension type attributes.
"""
def __init__(self, parents):
# List of parent extension attribute table types
self.parents = parents
# attribute_name -> attribute_type
self.attributedict = {}
def need_tp_dealloc(self):
"""
Returns whether this extension type needs a tp_dealloc, tp_traverse
and tp_clear filled out.
"""
if self.parent_type is not None and self.parent_type.need_tp_dealloc:
result = False
else:
field_types = self.attribute_struct.fielddict.itervalues()
result = any(map(is_obj, field_types))
self._need_tp_dealloc = result
return result
|
|
6cc2a8c748d50799d97ab8096e3fc2a82ebf674c
|
edb/tools/__main__.py
|
edb/tools/__main__.py
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub to allow invoking `edb` as `python -m edb.tools`."""
import sys
from edb.tools import edbcommands
if __name__ == '__main__':
sys.exit(edbcommands(prog_name='edb'))
|
Allow running the `edb` command as `python -m edb.tools`
|
Allow running the `edb` command as `python -m edb.tools`
|
Python
|
apache-2.0
|
edgedb/edgedb,edgedb/edgedb,edgedb/edgedb
|
Allow running the `edb` command as `python -m edb.tools`
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub to allow invoking `edb` as `python -m edb.tools`."""
import sys
from edb.tools import edbcommands
if __name__ == '__main__':
sys.exit(edbcommands(prog_name='edb'))
|
<commit_before><commit_msg>Allow running the `edb` command as `python -m edb.tools`<commit_after>
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub to allow invoking `edb` as `python -m edb.tools`."""
import sys
from edb.tools import edbcommands
if __name__ == '__main__':
sys.exit(edbcommands(prog_name='edb'))
|
Allow running the `edb` command as `python -m edb.tools`#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub to allow invoking `edb` as `python -m edb.tools`."""
import sys
from edb.tools import edbcommands
if __name__ == '__main__':
sys.exit(edbcommands(prog_name='edb'))
|
<commit_before><commit_msg>Allow running the `edb` command as `python -m edb.tools`<commit_after>#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub to allow invoking `edb` as `python -m edb.tools`."""
import sys
from edb.tools import edbcommands
if __name__ == '__main__':
sys.exit(edbcommands(prog_name='edb'))
|
|
2cddde6dcee901021d449bf956bf144617ab3705
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
PACKAGES = [
'lib',
'lib.scripts',
'lib.scripts.biosql',
'lib.scripts.blast',
'lib.scripts.ftp',
'lib.scripts.genbank',
'lib.scripts.manager',
'lib.scripts.multiprocessing',
'lib.scripts.phylogenetic_analyses'
]
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orthologs',
description="A project that will help to analyze orthologous gense.",
version='0.1.0',
long_description=long_description,
url='https://github.com/robear22890/Orthologs-Project',
license='?',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Natural Language :: English'
],
packages=
)
|
Develop here. Belongs in top level Orthologs Project.
|
Develop here. Belongs in top level Orthologs Project.
|
Python
|
mit
|
datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts
|
Develop here. Belongs in top level Orthologs Project.
|
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
PACKAGES = [
'lib',
'lib.scripts',
'lib.scripts.biosql',
'lib.scripts.blast',
'lib.scripts.ftp',
'lib.scripts.genbank',
'lib.scripts.manager',
'lib.scripts.multiprocessing',
'lib.scripts.phylogenetic_analyses'
]
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orthologs',
description="A project that will help to analyze orthologous gense.",
version='0.1.0',
long_description=long_description,
url='https://github.com/robear22890/Orthologs-Project',
license='?',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Natural Language :: English'
],
packages=
)
|
<commit_before><commit_msg>Develop here. Belongs in top level Orthologs Project.<commit_after>
|
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
PACKAGES = [
'lib',
'lib.scripts',
'lib.scripts.biosql',
'lib.scripts.blast',
'lib.scripts.ftp',
'lib.scripts.genbank',
'lib.scripts.manager',
'lib.scripts.multiprocessing',
'lib.scripts.phylogenetic_analyses'
]
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orthologs',
description="A project that will help to analyze orthologous gense.",
version='0.1.0',
long_description=long_description,
url='https://github.com/robear22890/Orthologs-Project',
license='?',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Natural Language :: English'
],
packages=
)
|
Develop here. Belongs in top level Orthologs Project.
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
PACKAGES = [
'lib',
'lib.scripts',
'lib.scripts.biosql',
'lib.scripts.blast',
'lib.scripts.ftp',
'lib.scripts.genbank',
'lib.scripts.manager',
'lib.scripts.multiprocessing',
'lib.scripts.phylogenetic_analyses'
]
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orthologs',
description="A project that will help to analyze orthologous gense.",
version='0.1.0',
long_description=long_description,
url='https://github.com/robear22890/Orthologs-Project',
license='?',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Natural Language :: English'
],
packages=
)
|
<commit_before><commit_msg>Develop here. Belongs in top level Orthologs Project.<commit_after>
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
PACKAGES = [
'lib',
'lib.scripts',
'lib.scripts.biosql',
'lib.scripts.blast',
'lib.scripts.ftp',
'lib.scripts.genbank',
'lib.scripts.manager',
'lib.scripts.multiprocessing',
'lib.scripts.phylogenetic_analyses'
]
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orthologs',
description="A project that will help to analyze orthologous gense.",
version='0.1.0',
long_description=long_description,
url='https://github.com/robear22890/Orthologs-Project',
license='?',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3',
'Operating System :: Unix',
'Natural Language :: English'
],
packages=
)
|
|
e583cb2baba822863cda6caf80dd5ebd0ce042c8
|
helevents/migrations/0003_auto_20170915_1529.py
|
helevents/migrations/0003_auto_20170915_1529.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:29
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helevents', '0002_auto_20151231_1111'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
Add django 1.11 user migration to helevents
|
Add django 1.11 user migration to helevents
|
Python
|
mit
|
City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents
|
Add django 1.11 user migration to helevents
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:29
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helevents', '0002_auto_20151231_1111'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add django 1.11 user migration to helevents<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:29
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helevents', '0002_auto_20151231_1111'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
Add django 1.11 user migration to helevents# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:29
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helevents', '0002_auto_20151231_1111'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add django 1.11 user migration to helevents<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:29
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('helevents', '0002_auto_20151231_1111'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
|
16a5b7897a76c9a53e60d78baf7fb94fcc59b220
|
powerline/segments/shell.py
|
powerline/segments/shell.py
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
return str(segment_info.last_exit_code) if segment_info.last_exit_code else None
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
if not segment_info.last_exit_code:
return None
return [{'contents': str(segment_info.last_exit_code), 'highlight_group': 'exit_fail'}]
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
Use exit_fail hl group for last_status segment
|
Use exit_fail hl group for last_status segment
Fixes #270
|
Python
|
mit
|
bezhermoso/powerline,darac/powerline,magus424/powerline,wfscheper/powerline,dragon788/powerline,prvnkumar/powerline,xxxhycl2010/powerline,QuLogic/powerline,magus424/powerline,russellb/powerline,seanfisk/powerline,cyrixhero/powerline,blindFS/powerline,bezhermoso/powerline,kenrachynski/powerline,prvnkumar/powerline,junix/powerline,darac/powerline,S0lll0s/powerline,xxxhycl2010/powerline,EricSB/powerline,QuLogic/powerline,s0undt3ch/powerline,wfscheper/powerline,areteix/powerline,junix/powerline,IvanAli/powerline,blindFS/powerline,IvanAli/powerline,dragon788/powerline,s0undt3ch/powerline,S0lll0s/powerline,areteix/powerline,Luffin/powerline,bartvm/powerline,xfumihiro/powerline,kenrachynski/powerline,xfumihiro/powerline,cyrixhero/powerline,S0lll0s/powerline,EricSB/powerline,blindFS/powerline,russellb/powerline,Liangjianghao/powerline,firebitsbr/powerline,junix/powerline,darac/powerline,Liangjianghao/powerline,keelerm84/powerline,firebitsbr/powerline,bartvm/powerline,IvanAli/powerline,areteix/powerline,xxxhycl2010/powerline,EricSB/powerline,dragon788/powerline,xfumihiro/powerline,s0undt3ch/powerline,lukw00/powerline,prvnkumar/powerline,bezhermoso/powerline,firebitsbr/powerline,seanfisk/powerline,Luffin/powerline,bartvm/powerline,Luffin/powerline,wfscheper/powerline,lukw00/powerline,DoctorJellyface/powerline,DoctorJellyface/powerline,QuLogic/powerline,cyrixhero/powerline,russellb/powerline,lukw00/powerline,kenrachynski/powerline,keelerm84/powerline,DoctorJellyface/powerline,Liangjianghao/powerline,seanfisk/powerline,magus424/powerline
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
return str(segment_info.last_exit_code) if segment_info.last_exit_code else None
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
Use exit_fail hl group for last_status segment
Fixes #270
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
if not segment_info.last_exit_code:
return None
return [{'contents': str(segment_info.last_exit_code), 'highlight_group': 'exit_fail'}]
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
<commit_before># -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
return str(segment_info.last_exit_code) if segment_info.last_exit_code else None
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
<commit_msg>Use exit_fail hl group for last_status segment
Fixes #270<commit_after>
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
if not segment_info.last_exit_code:
return None
return [{'contents': str(segment_info.last_exit_code), 'highlight_group': 'exit_fail'}]
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
return str(segment_info.last_exit_code) if segment_info.last_exit_code else None
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
Use exit_fail hl group for last_status segment
Fixes #270# -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
if not segment_info.last_exit_code:
return None
return [{'contents': str(segment_info.last_exit_code), 'highlight_group': 'exit_fail'}]
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
<commit_before># -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
return str(segment_info.last_exit_code) if segment_info.last_exit_code else None
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
<commit_msg>Use exit_fail hl group for last_status segment
Fixes #270<commit_after># -*- coding: utf-8 -*-
from powerline.theme import requires_segment_info
@requires_segment_info
def last_status(segment_info):
'''Return last exit code.'''
if not segment_info.last_exit_code:
return None
return [{'contents': str(segment_info.last_exit_code), 'highlight_group': 'exit_fail'}]
@requires_segment_info
def last_pipe_status(segment_info):
'''Return last pipe status.'''
if any(segment_info.last_pipe_status):
return [{"contents": str(status), "highlight_group": "exit_fail" if status else "exit_success"}
for status in segment_info.last_pipe_status]
else:
return None
|
fd8c2f45bd686a0f1e03891faf80f8c95d41f633
|
services/management/commands/empty_search_columns.py
|
services/management/commands/empty_search_columns.py
|
import logging
from django.core.management.base import BaseCommand
from munigeo.models import Address, AdministrativeDivision
from services.models import Service, ServiceNode, Unit
logger = logging.getLogger("search")
MODELS = [Address, AdministrativeDivision, Unit, Service, ServiceNode]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
logger.info("Emptying search columns...")
for model in MODELS:
for lang in ["fi", "sv", "en"]:
logger.info(
f"Emptying search columns for model: {model.__name__} and language {lang}."
)
key = "search_column_%s" % lang
model.objects.update(**{key: None})
|
Add management command that empty search columns
|
Add management command that empty search columns
|
Python
|
agpl-3.0
|
City-of-Helsinki/smbackend,City-of-Helsinki/smbackend
|
Add management command that empty search columns
|
import logging
from django.core.management.base import BaseCommand
from munigeo.models import Address, AdministrativeDivision
from services.models import Service, ServiceNode, Unit
logger = logging.getLogger("search")
MODELS = [Address, AdministrativeDivision, Unit, Service, ServiceNode]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
logger.info("Emptying search columns...")
for model in MODELS:
for lang in ["fi", "sv", "en"]:
logger.info(
f"Emptying search columns for model: {model.__name__} and language {lang}."
)
key = "search_column_%s" % lang
model.objects.update(**{key: None})
|
<commit_before><commit_msg>Add management command that empty search columns<commit_after>
|
import logging
from django.core.management.base import BaseCommand
from munigeo.models import Address, AdministrativeDivision
from services.models import Service, ServiceNode, Unit
logger = logging.getLogger("search")
MODELS = [Address, AdministrativeDivision, Unit, Service, ServiceNode]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
logger.info("Emptying search columns...")
for model in MODELS:
for lang in ["fi", "sv", "en"]:
logger.info(
f"Emptying search columns for model: {model.__name__} and language {lang}."
)
key = "search_column_%s" % lang
model.objects.update(**{key: None})
|
Add management command that empty search columnsimport logging
from django.core.management.base import BaseCommand
from munigeo.models import Address, AdministrativeDivision
from services.models import Service, ServiceNode, Unit
logger = logging.getLogger("search")
MODELS = [Address, AdministrativeDivision, Unit, Service, ServiceNode]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
logger.info("Emptying search columns...")
for model in MODELS:
for lang in ["fi", "sv", "en"]:
logger.info(
f"Emptying search columns for model: {model.__name__} and language {lang}."
)
key = "search_column_%s" % lang
model.objects.update(**{key: None})
|
<commit_before><commit_msg>Add management command that empty search columns<commit_after>import logging
from django.core.management.base import BaseCommand
from munigeo.models import Address, AdministrativeDivision
from services.models import Service, ServiceNode, Unit
logger = logging.getLogger("search")
MODELS = [Address, AdministrativeDivision, Unit, Service, ServiceNode]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
logger.info("Emptying search columns...")
for model in MODELS:
for lang in ["fi", "sv", "en"]:
logger.info(
f"Emptying search columns for model: {model.__name__} and language {lang}."
)
key = "search_column_%s" % lang
model.objects.update(**{key: None})
|
|
455192b34b00e16b3fa0b2a45388de4327ca0c7b
|
notes/managers.py
|
notes/managers.py
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes.filter(permissions=1)
return notes
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes = notes.filter(permissions=1)
return notes
|
Fix typo that made private notes viewable by any other user.
|
Fix typo that made private notes viewable by any other user.
|
Python
|
agpl-3.0
|
leonhandreke/snowy,syskill/snowy,jaredjennings/snowy,jaredjennings/snowy,sandyarmstrong/snowy,leonhandreke/snowy,widox/snowy,NoUsername/PrivateNotesExperimental,nekohayo/snowy,widox/snowy,GNOME/snowy,nekohayo/snowy,syskill/snowy,jaredjennings/snowy,GNOME/snowy,sandyarmstrong/snowy,NoUsername/PrivateNotesExperimental,jaredjennings/snowy
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes.filter(permissions=1)
return notes
Fix typo that made private notes viewable by any other user.
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes = notes.filter(permissions=1)
return notes
|
<commit_before>#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes.filter(permissions=1)
return notes
<commit_msg>Fix typo that made private notes viewable by any other user.<commit_after>
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes = notes.filter(permissions=1)
return notes
|
#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes.filter(permissions=1)
return notes
Fix typo that made private notes viewable by any other user.#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes = notes.filter(permissions=1)
return notes
|
<commit_before>#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes.filter(permissions=1)
return notes
<commit_msg>Fix typo that made private notes viewable by any other user.<commit_after>#
# Copyright (c) 2009 Brad Taylor <brad@getcoded.net>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
class NoteManager(models.Manager):
def user_viewable(self, request_user, author):
notes = self.filter(author=author)
if request_user != author:
# Public notes only
notes = notes.filter(permissions=1)
return notes
|
07b5e63ba44b76158f7129720616199d6eb8401a
|
code2html/tests/unit/test_vim.py
|
code2html/tests/unit/test_vim.py
|
# -*- coding: utf-8 -*-
import unittest
from code2html.vim import vim_command
class VimCommandTest(unittest.TestCase):
def test_vim_command(self):
vimrc_file = '/tmp/temporary-vimrc'
expected = 'vim -u /tmp/temporary-vimrc -c TOhtml -c wqa'
self.assertEqual(expected, ' '.join(vim_command(vimrc_file)))
|
Add an unit test for vim.py
|
Add an unit test for vim.py
|
Python
|
mit
|
kfei/code2html
|
Add an unit test for vim.py
|
# -*- coding: utf-8 -*-
import unittest
from code2html.vim import vim_command
class VimCommandTest(unittest.TestCase):
def test_vim_command(self):
vimrc_file = '/tmp/temporary-vimrc'
expected = 'vim -u /tmp/temporary-vimrc -c TOhtml -c wqa'
self.assertEqual(expected, ' '.join(vim_command(vimrc_file)))
|
<commit_before><commit_msg>Add an unit test for vim.py<commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from code2html.vim import vim_command
class VimCommandTest(unittest.TestCase):
def test_vim_command(self):
vimrc_file = '/tmp/temporary-vimrc'
expected = 'vim -u /tmp/temporary-vimrc -c TOhtml -c wqa'
self.assertEqual(expected, ' '.join(vim_command(vimrc_file)))
|
Add an unit test for vim.py# -*- coding: utf-8 -*-
import unittest
from code2html.vim import vim_command
class VimCommandTest(unittest.TestCase):
def test_vim_command(self):
vimrc_file = '/tmp/temporary-vimrc'
expected = 'vim -u /tmp/temporary-vimrc -c TOhtml -c wqa'
self.assertEqual(expected, ' '.join(vim_command(vimrc_file)))
|
<commit_before><commit_msg>Add an unit test for vim.py<commit_after># -*- coding: utf-8 -*-
import unittest
from code2html.vim import vim_command
class VimCommandTest(unittest.TestCase):
def test_vim_command(self):
vimrc_file = '/tmp/temporary-vimrc'
expected = 'vim -u /tmp/temporary-vimrc -c TOhtml -c wqa'
self.assertEqual(expected, ' '.join(vim_command(vimrc_file)))
|
|
7bedccd6f6288c123f8dafb660417b6f7f4bde9c
|
tests/unit/test_vendor_tornado.py
|
tests/unit/test_vendor_tornado.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import logging
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
log = logging.getLogger(__name__)
@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')
class VendorTornadoTest(TestCase):
'''
Ensure we are no using any non vendor'ed tornado
'''
def test_vendored_tornado_import(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "import tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
# Skip salt/ext/tornado/.. since there are a bunch of imports like
# this in docstrings.
if 'salt/ext/tornado/' in line:
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
def test_vendored_tornado_import_from(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "from tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
|
Add tests to validate vendor tornado usage
|
Add tests to validate vendor tornado usage
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests to validate vendor tornado usage
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import logging
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
log = logging.getLogger(__name__)
@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')
class VendorTornadoTest(TestCase):
'''
Ensure we are no using any non vendor'ed tornado
'''
def test_vendored_tornado_import(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "import tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
# Skip salt/ext/tornado/.. since there are a bunch of imports like
# this in docstrings.
if 'salt/ext/tornado/' in line:
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
def test_vendored_tornado_import_from(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "from tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
|
<commit_before><commit_msg>Add tests to validate vendor tornado usage<commit_after>
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import logging
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
log = logging.getLogger(__name__)
@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')
class VendorTornadoTest(TestCase):
'''
Ensure we are no using any non vendor'ed tornado
'''
def test_vendored_tornado_import(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "import tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
# Skip salt/ext/tornado/.. since there are a bunch of imports like
# this in docstrings.
if 'salt/ext/tornado/' in line:
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
def test_vendored_tornado_import_from(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "from tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
|
Add tests to validate vendor tornado usage# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import logging
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
log = logging.getLogger(__name__)
@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')
class VendorTornadoTest(TestCase):
'''
Ensure we are no using any non vendor'ed tornado
'''
def test_vendored_tornado_import(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "import tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
# Skip salt/ext/tornado/.. since there are a bunch of imports like
# this in docstrings.
if 'salt/ext/tornado/' in line:
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
def test_vendored_tornado_import_from(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "from tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
|
<commit_before><commit_msg>Add tests to validate vendor tornado usage<commit_after># -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, unicode_literals
import os
import re
import logging
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt.modules.cmdmod
import salt.utils.platform
log = logging.getLogger(__name__)
@skipIf(not salt.utils.path.which('bash'), 'Bash needed for this test')
class VendorTornadoTest(TestCase):
'''
Ensure we are no using any non vendor'ed tornado
'''
def test_vendored_tornado_import(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "import tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
# Skip salt/ext/tornado/.. since there are a bunch of imports like
# this in docstrings.
if 'salt/ext/tornado/' in line:
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
def test_vendored_tornado_import_from(self):
grep_call = salt.modules.cmdmod.run_stdout(
cmd='bash -c \'grep -r "from tornado" ./salt/*\'',
cwd=RUNTIME_VARS.CODE_DIR,
ignore_retcode=True,
).split('\n')
valid_lines = []
for line in grep_call:
if line == '':
continue
log.error("Test found bad line: %s", line)
valid_lines.append(line)
assert valid_lines == [], len(valid_lines)
|
|
f524a951286b4cef5689abe5a76bc88e13e24e22
|
mindbender/maya/plugins/validate_frame_range.py
|
mindbender/maya/plugins/validate_frame_range.py
|
import pyblish.api
class ValidateMindbenderFrameRange(pyblish.api.InstancePlugin):
"""Animation should normally be published with the range for a shot"""
label = "Validate Frame Range"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
optional = True
families = [
"mindbender.animation",
]
def process(self, instance):
import os
instance_in = str(int(instance.data["startFrame"]))
instance_out = str(int(instance.data["endFrame"]))
global_in = os.environ["MINDBENDER_EDIT_IN"]
global_out = os.environ["MINDBENDER_EDIT_OUT"]
instance_range = "-".join([instance_in, instance_out])
global_range = "-".join([global_in, global_out])
assert instance_range == global_range, (
"%s != %s - Animation range may be invalid" % (
instance_range, global_range
)
)
|
Implement validate frame range, as optional
|
Implement validate frame range, as optional
|
Python
|
mit
|
getavalon/core,mindbender-studio/core,MoonShineVFX/core,MoonShineVFX/core,getavalon/core,mindbender-studio/core
|
Implement validate frame range, as optional
|
import pyblish.api
class ValidateMindbenderFrameRange(pyblish.api.InstancePlugin):
"""Animation should normally be published with the range for a shot"""
label = "Validate Frame Range"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
optional = True
families = [
"mindbender.animation",
]
def process(self, instance):
import os
instance_in = str(int(instance.data["startFrame"]))
instance_out = str(int(instance.data["endFrame"]))
global_in = os.environ["MINDBENDER_EDIT_IN"]
global_out = os.environ["MINDBENDER_EDIT_OUT"]
instance_range = "-".join([instance_in, instance_out])
global_range = "-".join([global_in, global_out])
assert instance_range == global_range, (
"%s != %s - Animation range may be invalid" % (
instance_range, global_range
)
)
|
<commit_before><commit_msg>Implement validate frame range, as optional<commit_after>
|
import pyblish.api
class ValidateMindbenderFrameRange(pyblish.api.InstancePlugin):
"""Animation should normally be published with the range for a shot"""
label = "Validate Frame Range"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
optional = True
families = [
"mindbender.animation",
]
def process(self, instance):
import os
instance_in = str(int(instance.data["startFrame"]))
instance_out = str(int(instance.data["endFrame"]))
global_in = os.environ["MINDBENDER_EDIT_IN"]
global_out = os.environ["MINDBENDER_EDIT_OUT"]
instance_range = "-".join([instance_in, instance_out])
global_range = "-".join([global_in, global_out])
assert instance_range == global_range, (
"%s != %s - Animation range may be invalid" % (
instance_range, global_range
)
)
|
Implement validate frame range, as optionalimport pyblish.api
class ValidateMindbenderFrameRange(pyblish.api.InstancePlugin):
"""Animation should normally be published with the range for a shot"""
label = "Validate Frame Range"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
optional = True
families = [
"mindbender.animation",
]
def process(self, instance):
import os
instance_in = str(int(instance.data["startFrame"]))
instance_out = str(int(instance.data["endFrame"]))
global_in = os.environ["MINDBENDER_EDIT_IN"]
global_out = os.environ["MINDBENDER_EDIT_OUT"]
instance_range = "-".join([instance_in, instance_out])
global_range = "-".join([global_in, global_out])
assert instance_range == global_range, (
"%s != %s - Animation range may be invalid" % (
instance_range, global_range
)
)
|
<commit_before><commit_msg>Implement validate frame range, as optional<commit_after>import pyblish.api
class ValidateMindbenderFrameRange(pyblish.api.InstancePlugin):
"""Animation should normally be published with the range for a shot"""
label = "Validate Frame Range"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
optional = True
families = [
"mindbender.animation",
]
def process(self, instance):
import os
instance_in = str(int(instance.data["startFrame"]))
instance_out = str(int(instance.data["endFrame"]))
global_in = os.environ["MINDBENDER_EDIT_IN"]
global_out = os.environ["MINDBENDER_EDIT_OUT"]
instance_range = "-".join([instance_in, instance_out])
global_range = "-".join([global_in, global_out])
assert instance_range == global_range, (
"%s != %s - Animation range may be invalid" % (
instance_range, global_range
)
)
|
|
8bc4471b95884f00d58d1067ad90c6a3220ce61e
|
katagawa/sql/operators.py
|
katagawa/sql/operators.py
|
"""
Operators - stuff like `column = 'value'`, and similar.
"""
import abc
from katagawa.sql import Token
from katagawa.sql.dialects.common import Field
class Operator(Token):
"""
The base class for an operator.
An operator has three attributes - the field, the other value, and the actual operator itself.
The field is, obviously, a field object. The value can be either a field or another column to compare along,
useful for relationships (WHERE table1.field1 = table2.field2), etc.
This base class implements the actual SQL emitting for you; you only need to define the operator and it will
autogenerate the SQL.
"""
def __init__(self, field: Field, value):
self.field = field
self.value = value
@abc.abstractproperty
def operator(self):
"""
:return: The SQL operator that this represents; for example, the Eq() class will return `=` here.
"""
def generate_sql(self):
"""
Generates the SQL for this interaction.
"""
# Check the type of the field object.
# It should be a field.
if not isinstance(self.field, Field):
raise TypeError("Field in an operator must be a field")
# Use the alias for thie field, because it should be specified.
# We don't want it to generate the SQL directly as it will want to do `name AS alias`, whereas we only want
# the alias itself.
# Alternatively, use `field.identifier` to use the raw identifier, as sometimes a field won't have an alias.
field = self.field.alias or self.field.identifier
# Next, check if the value is a string or a field object.
if isinstance(self.value, Field):
value = self.value.alias or self.field.identifier
elif isinstance(self.value, str):
value = self.value
else:
raise TypeError("Value in an operator must be a field or a string")
# Format the string.
built = '"{f}" {op} "{v}"'.format(f=field, op=self.operator, v=value)
# Return the built string.
return built
|
Add operator module, which contains the base class for an Operator token.
|
[sql] Add operator module, which contains the base class for an Operator token.
|
Python
|
mit
|
SunDwarf/asyncqlio
|
[sql] Add operator module, which contains the base class for an Operator token.
|
"""
Operators - stuff like `column = 'value'`, and similar.
"""
import abc
from katagawa.sql import Token
from katagawa.sql.dialects.common import Field
class Operator(Token):
"""
The base class for an operator.
An operator has three attributes - the field, the other value, and the actual operator itself.
The field is, obviously, a field object. The value can be either a field or another column to compare along,
useful for relationships (WHERE table1.field1 = table2.field2), etc.
This base class implements the actual SQL emitting for you; you only need to define the operator and it will
autogenerate the SQL.
"""
def __init__(self, field: Field, value):
self.field = field
self.value = value
@abc.abstractproperty
def operator(self):
"""
:return: The SQL operator that this represents; for example, the Eq() class will return `=` here.
"""
def generate_sql(self):
"""
Generates the SQL for this interaction.
"""
# Check the type of the field object.
# It should be a field.
if not isinstance(self.field, Field):
raise TypeError("Field in an operator must be a field")
# Use the alias for thie field, because it should be specified.
# We don't want it to generate the SQL directly as it will want to do `name AS alias`, whereas we only want
# the alias itself.
# Alternatively, use `field.identifier` to use the raw identifier, as sometimes a field won't have an alias.
field = self.field.alias or self.field.identifier
# Next, check if the value is a string or a field object.
if isinstance(self.value, Field):
value = self.value.alias or self.field.identifier
elif isinstance(self.value, str):
value = self.value
else:
raise TypeError("Value in an operator must be a field or a string")
# Format the string.
built = '"{f}" {op} "{v}"'.format(f=field, op=self.operator, v=value)
# Return the built string.
return built
|
<commit_before><commit_msg>[sql] Add operator module, which contains the base class for an Operator token.<commit_after>
|
"""
Operators - stuff like `column = 'value'`, and similar.
"""
import abc
from katagawa.sql import Token
from katagawa.sql.dialects.common import Field
class Operator(Token):
"""
The base class for an operator.
An operator has three attributes - the field, the other value, and the actual operator itself.
The field is, obviously, a field object. The value can be either a field or another column to compare along,
useful for relationships (WHERE table1.field1 = table2.field2), etc.
This base class implements the actual SQL emitting for you; you only need to define the operator and it will
autogenerate the SQL.
"""
def __init__(self, field: Field, value):
self.field = field
self.value = value
@abc.abstractproperty
def operator(self):
"""
:return: The SQL operator that this represents; for example, the Eq() class will return `=` here.
"""
def generate_sql(self):
"""
Generates the SQL for this interaction.
"""
# Check the type of the field object.
# It should be a field.
if not isinstance(self.field, Field):
raise TypeError("Field in an operator must be a field")
# Use the alias for thie field, because it should be specified.
# We don't want it to generate the SQL directly as it will want to do `name AS alias`, whereas we only want
# the alias itself.
# Alternatively, use `field.identifier` to use the raw identifier, as sometimes a field won't have an alias.
field = self.field.alias or self.field.identifier
# Next, check if the value is a string or a field object.
if isinstance(self.value, Field):
value = self.value.alias or self.field.identifier
elif isinstance(self.value, str):
value = self.value
else:
raise TypeError("Value in an operator must be a field or a string")
# Format the string.
built = '"{f}" {op} "{v}"'.format(f=field, op=self.operator, v=value)
# Return the built string.
return built
|
[sql] Add operator module, which contains the base class for an Operator token."""
Operators - stuff like `column = 'value'`, and similar.
"""
import abc
from katagawa.sql import Token
from katagawa.sql.dialects.common import Field
class Operator(Token):
"""
The base class for an operator.
An operator has three attributes - the field, the other value, and the actual operator itself.
The field is, obviously, a field object. The value can be either a field or another column to compare along,
useful for relationships (WHERE table1.field1 = table2.field2), etc.
This base class implements the actual SQL emitting for you; you only need to define the operator and it will
autogenerate the SQL.
"""
def __init__(self, field: Field, value):
self.field = field
self.value = value
@abc.abstractproperty
def operator(self):
"""
:return: The SQL operator that this represents; for example, the Eq() class will return `=` here.
"""
def generate_sql(self):
"""
Generates the SQL for this interaction.
"""
# Check the type of the field object.
# It should be a field.
if not isinstance(self.field, Field):
raise TypeError("Field in an operator must be a field")
# Use the alias for thie field, because it should be specified.
# We don't want it to generate the SQL directly as it will want to do `name AS alias`, whereas we only want
# the alias itself.
# Alternatively, use `field.identifier` to use the raw identifier, as sometimes a field won't have an alias.
field = self.field.alias or self.field.identifier
# Next, check if the value is a string or a field object.
if isinstance(self.value, Field):
value = self.value.alias or self.field.identifier
elif isinstance(self.value, str):
value = self.value
else:
raise TypeError("Value in an operator must be a field or a string")
# Format the string.
built = '"{f}" {op} "{v}"'.format(f=field, op=self.operator, v=value)
# Return the built string.
return built
|
<commit_before><commit_msg>[sql] Add operator module, which contains the base class for an Operator token.<commit_after>"""
Operators - stuff like `column = 'value'`, and similar.
"""
import abc
from katagawa.sql import Token
from katagawa.sql.dialects.common import Field
class Operator(Token):
"""
The base class for an operator.
An operator has three attributes - the field, the other value, and the actual operator itself.
The field is, obviously, a field object. The value can be either a field or another column to compare along,
useful for relationships (WHERE table1.field1 = table2.field2), etc.
This base class implements the actual SQL emitting for you; you only need to define the operator and it will
autogenerate the SQL.
"""
def __init__(self, field: Field, value):
self.field = field
self.value = value
@abc.abstractproperty
def operator(self):
"""
:return: The SQL operator that this represents; for example, the Eq() class will return `=` here.
"""
def generate_sql(self):
"""
Generates the SQL for this interaction.
"""
# Check the type of the field object.
# It should be a field.
if not isinstance(self.field, Field):
raise TypeError("Field in an operator must be a field")
# Use the alias for thie field, because it should be specified.
# We don't want it to generate the SQL directly as it will want to do `name AS alias`, whereas we only want
# the alias itself.
# Alternatively, use `field.identifier` to use the raw identifier, as sometimes a field won't have an alias.
field = self.field.alias or self.field.identifier
# Next, check if the value is a string or a field object.
if isinstance(self.value, Field):
value = self.value.alias or self.field.identifier
elif isinstance(self.value, str):
value = self.value
else:
raise TypeError("Value in an operator must be a field or a string")
# Format the string.
built = '"{f}" {op} "{v}"'.format(f=field, op=self.operator, v=value)
# Return the built string.
return built
|
|
19a9d49fe84f0ba89de04001d2a5d0c8cc3f135a
|
tools/print-zk.py
|
tools/print-zk.py
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import nodepool.config
import nodepool.zk
# A script to print the zookeeper tree given a nodepool config file.
logging.basicConfig()
parser = argparse.ArgumentParser(description='Print the zookeeper tree')
parser.add_argument('-c', dest='config',
default='/etc/nodepool/nodepool.yaml',
help='path to config file')
args = parser.parse_args()
config = nodepool.config.loadConfig(args.config)
zk = nodepool.zk.ZooKeeper()
zk.connect(config.zookeeper_servers.values())
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
def print_tree(node):
data, stat = zk.client.get(node)
print "Node: %s %s" % (node, stat)
if data:
print data
for child in zk.client.get_children(node):
print
print_tree(join(node, child))
print_tree('/')
zk.disconnect()
|
Add a script to print the ZK tree
|
Add a script to print the ZK tree
This script uses a nodepool config file to connect to ZK and print
the entire contents of the ZK tree for debugging purposes.
Change-Id: I31566e15d915e701639325f757d1b917ad93c780
|
Python
|
apache-2.0
|
Tesora/tesora-nodepool,Tesora/tesora-nodepool
|
Add a script to print the ZK tree
This script uses a nodepool config file to connect to ZK and print
the entire contents of the ZK tree for debugging purposes.
Change-Id: I31566e15d915e701639325f757d1b917ad93c780
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import nodepool.config
import nodepool.zk
# A script to print the zookeeper tree given a nodepool config file.
logging.basicConfig()
parser = argparse.ArgumentParser(description='Print the zookeeper tree')
parser.add_argument('-c', dest='config',
default='/etc/nodepool/nodepool.yaml',
help='path to config file')
args = parser.parse_args()
config = nodepool.config.loadConfig(args.config)
zk = nodepool.zk.ZooKeeper()
zk.connect(config.zookeeper_servers.values())
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
def print_tree(node):
data, stat = zk.client.get(node)
print "Node: %s %s" % (node, stat)
if data:
print data
for child in zk.client.get_children(node):
print
print_tree(join(node, child))
print_tree('/')
zk.disconnect()
|
<commit_before><commit_msg>Add a script to print the ZK tree
This script uses a nodepool config file to connect to ZK and print
the entire contents of the ZK tree for debugging purposes.
Change-Id: I31566e15d915e701639325f757d1b917ad93c780<commit_after>
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import nodepool.config
import nodepool.zk
# A script to print the zookeeper tree given a nodepool config file.
logging.basicConfig()
parser = argparse.ArgumentParser(description='Print the zookeeper tree')
parser.add_argument('-c', dest='config',
default='/etc/nodepool/nodepool.yaml',
help='path to config file')
args = parser.parse_args()
config = nodepool.config.loadConfig(args.config)
zk = nodepool.zk.ZooKeeper()
zk.connect(config.zookeeper_servers.values())
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
def print_tree(node):
data, stat = zk.client.get(node)
print "Node: %s %s" % (node, stat)
if data:
print data
for child in zk.client.get_children(node):
print
print_tree(join(node, child))
print_tree('/')
zk.disconnect()
|
Add a script to print the ZK tree
This script uses a nodepool config file to connect to ZK and print
the entire contents of the ZK tree for debugging purposes.
Change-Id: I31566e15d915e701639325f757d1b917ad93c780#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import nodepool.config
import nodepool.zk
# A script to print the zookeeper tree given a nodepool config file.
logging.basicConfig()
parser = argparse.ArgumentParser(description='Print the zookeeper tree')
parser.add_argument('-c', dest='config',
default='/etc/nodepool/nodepool.yaml',
help='path to config file')
args = parser.parse_args()
config = nodepool.config.loadConfig(args.config)
zk = nodepool.zk.ZooKeeper()
zk.connect(config.zookeeper_servers.values())
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
def print_tree(node):
data, stat = zk.client.get(node)
print "Node: %s %s" % (node, stat)
if data:
print data
for child in zk.client.get_children(node):
print
print_tree(join(node, child))
print_tree('/')
zk.disconnect()
|
<commit_before><commit_msg>Add a script to print the ZK tree
This script uses a nodepool config file to connect to ZK and print
the entire contents of the ZK tree for debugging purposes.
Change-Id: I31566e15d915e701639325f757d1b917ad93c780<commit_after>#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import nodepool.config
import nodepool.zk
# A script to print the zookeeper tree given a nodepool config file.
logging.basicConfig()
parser = argparse.ArgumentParser(description='Print the zookeeper tree')
parser.add_argument('-c', dest='config',
default='/etc/nodepool/nodepool.yaml',
help='path to config file')
args = parser.parse_args()
config = nodepool.config.loadConfig(args.config)
zk = nodepool.zk.ZooKeeper()
zk.connect(config.zookeeper_servers.values())
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
def print_tree(node):
data, stat = zk.client.get(node)
print "Node: %s %s" % (node, stat)
if data:
print data
for child in zk.client.get_children(node):
print
print_tree(join(node, child))
print_tree('/')
zk.disconnect()
|
|
4a7fb72b496f036e33a8d48b120717f328828756
|
corehq/apps/accounting/tests/test_enterprise_mode.py
|
corehq/apps/accounting/tests/test_enterprise_mode.py
|
from django.test import override_settings
from corehq import privileges
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.shortcuts import create_domain
ADVANCED_PRIVILEGE = privileges.CUSTOM_BRANDING # chosen arbitrarily, feel free to change
class TestEnterpriseMode(DomainSubscriptionMixin, BaseAccountingTest):
def setUp(self):
self.domain_obj = create_domain('test_enterprise_mode')
def tearDown(self):
self.domain_obj.delete()
domain_has_privilege.clear(self.domain_obj.name, ADVANCED_PRIVILEGE)
def test_standard_cant_access_advanced(self):
self.setup_subscription(self.domain_obj.name, SoftwarePlanEdition.STANDARD)
self.addCleanup(self.teardown_subscriptions)
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_no_plan_cant_access_anything(self):
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_enterprise_can_access_anything(self):
with override_settings(ENTERPRISE_MODE=True):
self.assertTrue(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
|
Add failing test for desired behavior
|
Add failing test for desired behavior
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add failing test for desired behavior
|
from django.test import override_settings
from corehq import privileges
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.shortcuts import create_domain
ADVANCED_PRIVILEGE = privileges.CUSTOM_BRANDING # chosen arbitrarily, feel free to change
class TestEnterpriseMode(DomainSubscriptionMixin, BaseAccountingTest):
def setUp(self):
self.domain_obj = create_domain('test_enterprise_mode')
def tearDown(self):
self.domain_obj.delete()
domain_has_privilege.clear(self.domain_obj.name, ADVANCED_PRIVILEGE)
def test_standard_cant_access_advanced(self):
self.setup_subscription(self.domain_obj.name, SoftwarePlanEdition.STANDARD)
self.addCleanup(self.teardown_subscriptions)
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_no_plan_cant_access_anything(self):
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_enterprise_can_access_anything(self):
with override_settings(ENTERPRISE_MODE=True):
self.assertTrue(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
|
<commit_before><commit_msg>Add failing test for desired behavior<commit_after>
|
from django.test import override_settings
from corehq import privileges
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.shortcuts import create_domain
ADVANCED_PRIVILEGE = privileges.CUSTOM_BRANDING # chosen arbitrarily, feel free to change
class TestEnterpriseMode(DomainSubscriptionMixin, BaseAccountingTest):
def setUp(self):
self.domain_obj = create_domain('test_enterprise_mode')
def tearDown(self):
self.domain_obj.delete()
domain_has_privilege.clear(self.domain_obj.name, ADVANCED_PRIVILEGE)
def test_standard_cant_access_advanced(self):
self.setup_subscription(self.domain_obj.name, SoftwarePlanEdition.STANDARD)
self.addCleanup(self.teardown_subscriptions)
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_no_plan_cant_access_anything(self):
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_enterprise_can_access_anything(self):
with override_settings(ENTERPRISE_MODE=True):
self.assertTrue(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
|
Add failing test for desired behaviorfrom django.test import override_settings
from corehq import privileges
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.shortcuts import create_domain
ADVANCED_PRIVILEGE = privileges.CUSTOM_BRANDING # chosen arbitrarily, feel free to change
class TestEnterpriseMode(DomainSubscriptionMixin, BaseAccountingTest):
def setUp(self):
self.domain_obj = create_domain('test_enterprise_mode')
def tearDown(self):
self.domain_obj.delete()
domain_has_privilege.clear(self.domain_obj.name, ADVANCED_PRIVILEGE)
def test_standard_cant_access_advanced(self):
self.setup_subscription(self.domain_obj.name, SoftwarePlanEdition.STANDARD)
self.addCleanup(self.teardown_subscriptions)
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_no_plan_cant_access_anything(self):
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_enterprise_can_access_anything(self):
with override_settings(ENTERPRISE_MODE=True):
self.assertTrue(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
|
<commit_before><commit_msg>Add failing test for desired behavior<commit_after>from django.test import override_settings
from corehq import privileges
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.domain.shortcuts import create_domain
ADVANCED_PRIVILEGE = privileges.CUSTOM_BRANDING # chosen arbitrarily, feel free to change
class TestEnterpriseMode(DomainSubscriptionMixin, BaseAccountingTest):
def setUp(self):
self.domain_obj = create_domain('test_enterprise_mode')
def tearDown(self):
self.domain_obj.delete()
domain_has_privilege.clear(self.domain_obj.name, ADVANCED_PRIVILEGE)
def test_standard_cant_access_advanced(self):
self.setup_subscription(self.domain_obj.name, SoftwarePlanEdition.STANDARD)
self.addCleanup(self.teardown_subscriptions)
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_no_plan_cant_access_anything(self):
self.assertFalse(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
def test_enterprise_can_access_anything(self):
with override_settings(ENTERPRISE_MODE=True):
self.assertTrue(self.domain_obj.has_privilege(ADVANCED_PRIVILEGE))
|
|
08581be11f891e21014a7863ab102d4586388d47
|
packs/docker/actions/build_image.py
|
packs/docker/actions/build_image.py
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
dockerfile_path = os.path.expanduser(dockerfile_path)
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
Expand user in the path.
|
Expand user in the path.
|
Python
|
apache-2.0
|
StackStorm/st2contrib,jtopjian/st2contrib,armab/st2contrib,Aamir-raza-1/st2contrib,tonybaloney/st2contrib,StackStorm/st2contrib,pearsontechnology/st2contrib,digideskio/st2contrib,meirwah/st2contrib,tonybaloney/st2contrib,dennybaa/st2contrib,jtopjian/st2contrib,armab/st2contrib,tonybaloney/st2contrib,digideskio/st2contrib,StackStorm/st2contrib,lakshmi-kannan/st2contrib,lmEshoo/st2contrib,armab/st2contrib,lmEshoo/st2contrib,meirwah/st2contrib,psychopenguin/st2contrib,pearsontechnology/st2contrib,pidah/st2contrib,pinterb/st2contrib,pearsontechnology/st2contrib,pearsontechnology/st2contrib,pidah/st2contrib,Aamir-raza-1/st2contrib,lakshmi-kannan/st2contrib,pidah/st2contrib,dennybaa/st2contrib,psychopenguin/st2contrib,pinterb/st2contrib
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
Expand user in the path.
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
dockerfile_path = os.path.expanduser(dockerfile_path)
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
<commit_before>import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
<commit_msg>Expand user in the path.<commit_after>
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
dockerfile_path = os.path.expanduser(dockerfile_path)
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
Expand user in the path.import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
dockerfile_path = os.path.expanduser(dockerfile_path)
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
<commit_before>import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
<commit_msg>Expand user in the path.<commit_after>import os
from lib.base import DockerBasePythonAction
__all__ = [
'DockerBuildImageAction'
]
class DockerBuildImageAction(DockerBasePythonAction):
def run(self, dockerfile_path, tag):
if os.path.isdir(dockerfile_path):
return self.wrapper.build(path=dockerfile_path, tag=tag)
else:
dockerfile_path = os.path.expanduser(dockerfile_path)
with open(dockerfile_path, 'r') as fp:
return self.wrapper.build(fileobj=fp, tag=tag)
|
7f2700ee4b6aafed259d78affef462197194d2fc
|
usecase/spider_limit.py
|
usecase/spider_limit.py
|
# coding: utf-8
import setup_script
from grab.spider import Spider, Task
import logging
class TestSpider(Spider):
def task_generator(self):
yield Task('initial', url='http://google.com:89/',
network_try_count=9)
def task_initial(self):
print 'done'
logging.basicConfig(level=logging.DEBUG)
bot = TestSpider(network_try_limit=10)
bot.setup_grab(timeout=1)
bot.run()
|
Add use case of spider with limits
|
Add use case of spider with limits
|
Python
|
mit
|
kevinlondon/grab,SpaceAppsXploration/grab,liorvh/grab,subeax/grab,alihalabyah/grab,pombredanne/grab-1,SpaceAppsXploration/grab,subeax/grab,codevlabs/grab,giserh/grab,istinspring/grab,istinspring/grab,DDShadoww/grab,shaunstanislaus/grab,huiyi1990/grab,huiyi1990/grab,giserh/grab,maurobaraldi/grab,lorien/grab,pombredanne/grab-1,raybuhr/grab,codevlabs/grab,lorien/grab,shaunstanislaus/grab,alihalabyah/grab,subeax/grab,raybuhr/grab,DDShadoww/grab,maurobaraldi/grab,liorvh/grab,kevinlondon/grab
|
Add use case of spider with limits
|
# coding: utf-8
import setup_script
from grab.spider import Spider, Task
import logging
class TestSpider(Spider):
def task_generator(self):
yield Task('initial', url='http://google.com:89/',
network_try_count=9)
def task_initial(self):
print 'done'
logging.basicConfig(level=logging.DEBUG)
bot = TestSpider(network_try_limit=10)
bot.setup_grab(timeout=1)
bot.run()
|
<commit_before><commit_msg>Add use case of spider with limits<commit_after>
|
# coding: utf-8
import setup_script
from grab.spider import Spider, Task
import logging
class TestSpider(Spider):
def task_generator(self):
yield Task('initial', url='http://google.com:89/',
network_try_count=9)
def task_initial(self):
print 'done'
logging.basicConfig(level=logging.DEBUG)
bot = TestSpider(network_try_limit=10)
bot.setup_grab(timeout=1)
bot.run()
|
Add use case of spider with limits# coding: utf-8
import setup_script
from grab.spider import Spider, Task
import logging
class TestSpider(Spider):
def task_generator(self):
yield Task('initial', url='http://google.com:89/',
network_try_count=9)
def task_initial(self):
print 'done'
logging.basicConfig(level=logging.DEBUG)
bot = TestSpider(network_try_limit=10)
bot.setup_grab(timeout=1)
bot.run()
|
<commit_before><commit_msg>Add use case of spider with limits<commit_after># coding: utf-8
import setup_script
from grab.spider import Spider, Task
import logging
class TestSpider(Spider):
def task_generator(self):
yield Task('initial', url='http://google.com:89/',
network_try_count=9)
def task_initial(self):
print 'done'
logging.basicConfig(level=logging.DEBUG)
bot = TestSpider(network_try_limit=10)
bot.setup_grab(timeout=1)
bot.run()
|
|
7606021f677955967fc0a21d962d3638fd4d0cbf
|
scripts/dbdata.py
|
scripts/dbdata.py
|
#!/usr/bin/env python
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
Add script to fetch lon/lat from DBPedia.
|
Add script to fetch lon/lat from DBPedia.
|
Python
|
apache-2.0
|
ViralTexts/vt-passim,ViralTexts/vt-passim,ViralTexts/vt-passim
|
Add script to fetch lon/lat from DBPedia.
|
#!/usr/bin/env python
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to fetch lon/lat from DBPedia.<commit_after>
|
#!/usr/bin/env python
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
Add script to fetch lon/lat from DBPedia.#!/usr/bin/env python
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to fetch lon/lat from DBPedia.<commit_after>#!/usr/bin/env python
"""
Script to fetch DBpedia data
"""
import sys, time
from urllib.request import urlopen
from urllib.parse import unquote
import json
def main():
for line in sys.stdin.readlines():
line = line.strip()
norm = unquote(line)
url = line.replace('/resource/', '/data/') + '.json'
time.sleep(1)
# print(url)
try:
resp = urlopen(url)
if resp.code == 200:
data = json.loads(resp.read())[norm]
print(json.dumps({'coverage': line,
'lon': data['http://www.w3.org/2003/01/geo/wgs84_pos#long'][0]['value'],
'lat': data['http://www.w3.org/2003/01/geo/wgs84_pos#lat'][0]['value']},
sort_keys=True))
except:
print(json.dumps({'coverage': line}))
False
if __name__ == "__main__":
main()
|
|
9cf0703f20f47143385260a6b63189f1c780f73e
|
tempest/tests/test_imports.py
|
tempest/tests/test_imports.py
|
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.tests import base
class ConfCounter(object):
def __init__(self, *args, **kwargs):
self.count = 0
def __getattr__(self, key):
self.count += 1
return mock.MagicMock()
def get_counts(self):
return self.count
class TestImports(base.TestCase):
def setUp(self):
super(TestImports, self).setUp()
self.conf_mock = self.patch('tempest.config.CONF',
new_callable=ConfCounter)
def test_account_generator_command_import(self):
from tempest.cmd import account_generator # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_cleanup_command_import(self):
from tempest.cmd import cleanup # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_init_command_import(self):
from tempest.cmd import init # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_list_plugins_command_import(self):
from tempest.cmd import list_plugins # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_run_command_import(self):
from tempest.cmd import run # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_subunit_descibe_command_import(self):
from tempest.cmd import subunit_describe_calls # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_verify_tempest_config_command_import(self):
from tempest.cmd import verify_tempest_config # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_workspace_command_import(self):
from tempest.cmd import workspace # noqa
self.assertEqual(0, self.conf_mock.get_counts())
|
Add unit tests to check for CONF getattr during import
|
Add unit tests to check for CONF getattr during import
Since the early days in tempest we've been fighting getattrs on CONF
during imports. We're able to get around this during test runs by lazy
loading the conf file. However, in things like the tempest commands this
doesn't work because we rely on the config file not being parsed to set
the config file path. This commit adds unit tests to check the import
of the command files for getattrs on CONF. This should prevent future
regressions.
While not strictly necessary because of the lazy loading this also gives
a framework to potentially address the CONF getatrr on discovery. The
first revision of this patch includes the discovery test, for reference.
But we have 212 cases of getattr during import (which includes lots of
skip decorators) so it's unlikely to change any time soon.
Change-Id: Ib2c15dbd06ca810cc899258758cc8a297055fdf8
Closes-Bug: #1726357
|
Python
|
apache-2.0
|
Juniper/tempest,cisco-openstack/tempest,openstack/tempest,openstack/tempest,cisco-openstack/tempest,Juniper/tempest,masayukig/tempest,masayukig/tempest
|
Add unit tests to check for CONF getattr during import
Since the early days in tempest we've been fighting getattrs on CONF
during imports. We're able to get around this during test runs by lazy
loading the conf file. However, in things like the tempest commands this
doesn't work because we rely on the config file not being parsed to set
the config file path. This commit adds unit tests to check the import
of the command files for getattrs on CONF. This should prevent future
regressions.
While not strictly necessary because of the lazy loading this also gives
a framework to potentially address the CONF getatrr on discovery. The
first revision of this patch includes the discovery test, for reference.
But we have 212 cases of getattr during import (which includes lots of
skip decorators) so it's unlikely to change any time soon.
Change-Id: Ib2c15dbd06ca810cc899258758cc8a297055fdf8
Closes-Bug: #1726357
|
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.tests import base
class ConfCounter(object):
def __init__(self, *args, **kwargs):
self.count = 0
def __getattr__(self, key):
self.count += 1
return mock.MagicMock()
def get_counts(self):
return self.count
class TestImports(base.TestCase):
def setUp(self):
super(TestImports, self).setUp()
self.conf_mock = self.patch('tempest.config.CONF',
new_callable=ConfCounter)
def test_account_generator_command_import(self):
from tempest.cmd import account_generator # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_cleanup_command_import(self):
from tempest.cmd import cleanup # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_init_command_import(self):
from tempest.cmd import init # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_list_plugins_command_import(self):
from tempest.cmd import list_plugins # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_run_command_import(self):
from tempest.cmd import run # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_subunit_descibe_command_import(self):
from tempest.cmd import subunit_describe_calls # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_verify_tempest_config_command_import(self):
from tempest.cmd import verify_tempest_config # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_workspace_command_import(self):
from tempest.cmd import workspace # noqa
self.assertEqual(0, self.conf_mock.get_counts())
|
<commit_before><commit_msg>Add unit tests to check for CONF getattr during import
Since the early days in tempest we've been fighting getattrs on CONF
during imports. We're able to get around this during test runs by lazy
loading the conf file. However, in things like the tempest commands this
doesn't work because we rely on the config file not being parsed to set
the config file path. This commit adds unit tests to check the import
of the command files for getattrs on CONF. This should prevent future
regressions.
While not strictly necessary because of the lazy loading this also gives
a framework to potentially address the CONF getatrr on discovery. The
first revision of this patch includes the discovery test, for reference.
But we have 212 cases of getattr during import (which includes lots of
skip decorators) so it's unlikely to change any time soon.
Change-Id: Ib2c15dbd06ca810cc899258758cc8a297055fdf8
Closes-Bug: #1726357<commit_after>
|
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.tests import base
class ConfCounter(object):
def __init__(self, *args, **kwargs):
self.count = 0
def __getattr__(self, key):
self.count += 1
return mock.MagicMock()
def get_counts(self):
return self.count
class TestImports(base.TestCase):
def setUp(self):
super(TestImports, self).setUp()
self.conf_mock = self.patch('tempest.config.CONF',
new_callable=ConfCounter)
def test_account_generator_command_import(self):
from tempest.cmd import account_generator # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_cleanup_command_import(self):
from tempest.cmd import cleanup # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_init_command_import(self):
from tempest.cmd import init # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_list_plugins_command_import(self):
from tempest.cmd import list_plugins # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_run_command_import(self):
from tempest.cmd import run # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_subunit_descibe_command_import(self):
from tempest.cmd import subunit_describe_calls # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_verify_tempest_config_command_import(self):
from tempest.cmd import verify_tempest_config # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_workspace_command_import(self):
from tempest.cmd import workspace # noqa
self.assertEqual(0, self.conf_mock.get_counts())
|
Add unit tests to check for CONF getattr during import
Since the early days in tempest we've been fighting getattrs on CONF
during imports. We're able to get around this during test runs by lazy
loading the conf file. However, in things like the tempest commands this
doesn't work because we rely on the config file not being parsed to set
the config file path. This commit adds unit tests to check the import
of the command files for getattrs on CONF. This should prevent future
regressions.
While not strictly necessary because of the lazy loading this also gives
a framework to potentially address the CONF getatrr on discovery. The
first revision of this patch includes the discovery test, for reference.
But we have 212 cases of getattr during import (which includes lots of
skip decorators) so it's unlikely to change any time soon.
Change-Id: Ib2c15dbd06ca810cc899258758cc8a297055fdf8
Closes-Bug: #1726357# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.tests import base
class ConfCounter(object):
def __init__(self, *args, **kwargs):
self.count = 0
def __getattr__(self, key):
self.count += 1
return mock.MagicMock()
def get_counts(self):
return self.count
class TestImports(base.TestCase):
def setUp(self):
super(TestImports, self).setUp()
self.conf_mock = self.patch('tempest.config.CONF',
new_callable=ConfCounter)
def test_account_generator_command_import(self):
from tempest.cmd import account_generator # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_cleanup_command_import(self):
from tempest.cmd import cleanup # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_init_command_import(self):
from tempest.cmd import init # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_list_plugins_command_import(self):
from tempest.cmd import list_plugins # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_run_command_import(self):
from tempest.cmd import run # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_subunit_descibe_command_import(self):
from tempest.cmd import subunit_describe_calls # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_verify_tempest_config_command_import(self):
from tempest.cmd import verify_tempest_config # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_workspace_command_import(self):
from tempest.cmd import workspace # noqa
self.assertEqual(0, self.conf_mock.get_counts())
|
<commit_before><commit_msg>Add unit tests to check for CONF getattr during import
Since the early days in tempest we've been fighting getattrs on CONF
during imports. We're able to get around this during test runs by lazy
loading the conf file. However, in things like the tempest commands this
doesn't work because we rely on the config file not being parsed to set
the config file path. This commit adds unit tests to check the import
of the command files for getattrs on CONF. This should prevent future
regressions.
While not strictly necessary because of the lazy loading this also gives
a framework to potentially address the CONF getatrr on discovery. The
first revision of this patch includes the discovery test, for reference.
But we have 212 cases of getattr during import (which includes lots of
skip decorators) so it's unlikely to change any time soon.
Change-Id: Ib2c15dbd06ca810cc899258758cc8a297055fdf8
Closes-Bug: #1726357<commit_after># Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.tests import base
class ConfCounter(object):
def __init__(self, *args, **kwargs):
self.count = 0
def __getattr__(self, key):
self.count += 1
return mock.MagicMock()
def get_counts(self):
return self.count
class TestImports(base.TestCase):
def setUp(self):
super(TestImports, self).setUp()
self.conf_mock = self.patch('tempest.config.CONF',
new_callable=ConfCounter)
def test_account_generator_command_import(self):
from tempest.cmd import account_generator # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_cleanup_command_import(self):
from tempest.cmd import cleanup # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_init_command_import(self):
from tempest.cmd import init # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_list_plugins_command_import(self):
from tempest.cmd import list_plugins # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_run_command_import(self):
from tempest.cmd import run # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_subunit_descibe_command_import(self):
from tempest.cmd import subunit_describe_calls # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_verify_tempest_config_command_import(self):
from tempest.cmd import verify_tempest_config # noqa
self.assertEqual(0, self.conf_mock.get_counts())
def test_workspace_command_import(self):
from tempest.cmd import workspace # noqa
self.assertEqual(0, self.conf_mock.get_counts())
|
|
d5db2a91729671e7a47e8c7d442ab76b697dc58f
|
tests/cupy_tests/sparse_tests/test_base.py
|
tests/cupy_tests/sparse_tests/test_base.py
|
import unittest
import scipy.sparse
import cupy.sparse
from cupy import testing
class DummySparseCPU(scipy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseCPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def getnnz(self):
return self._nnz
class DummySparseGPU(cupy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseGPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def get_shape(self):
return self._shape
def getnnz(self):
return self._nnz
dummies = {
scipy.sparse: DummySparseCPU,
cupy.sparse: DummySparseGPU,
}
class TestSpmatrix(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_instantiation(self, xp, sp):
sp.spmatrix()
@testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)
def test_len(self, xp, sp):
s = dummies[sp]()
len(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=1)
return bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_false(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=0)
return bool(s)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(2, 1))
bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_asformat_none(self, xp, sp):
s = dummies[sp]()
self.assertIs(s.asformat(None), s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_maxprint(self, xp, sp):
s = dummies[sp](maxprint=30)
return s.getmaxprint()
|
Add test for base spmatrix
|
Add test for base spmatrix
|
Python
|
mit
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
Add test for base spmatrix
|
import unittest
import scipy.sparse
import cupy.sparse
from cupy import testing
class DummySparseCPU(scipy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseCPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def getnnz(self):
return self._nnz
class DummySparseGPU(cupy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseGPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def get_shape(self):
return self._shape
def getnnz(self):
return self._nnz
dummies = {
scipy.sparse: DummySparseCPU,
cupy.sparse: DummySparseGPU,
}
class TestSpmatrix(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_instantiation(self, xp, sp):
sp.spmatrix()
@testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)
def test_len(self, xp, sp):
s = dummies[sp]()
len(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=1)
return bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_false(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=0)
return bool(s)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(2, 1))
bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_asformat_none(self, xp, sp):
s = dummies[sp]()
self.assertIs(s.asformat(None), s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_maxprint(self, xp, sp):
s = dummies[sp](maxprint=30)
return s.getmaxprint()
|
<commit_before><commit_msg>Add test for base spmatrix<commit_after>
|
import unittest
import scipy.sparse
import cupy.sparse
from cupy import testing
class DummySparseCPU(scipy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseCPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def getnnz(self):
return self._nnz
class DummySparseGPU(cupy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseGPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def get_shape(self):
return self._shape
def getnnz(self):
return self._nnz
dummies = {
scipy.sparse: DummySparseCPU,
cupy.sparse: DummySparseGPU,
}
class TestSpmatrix(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_instantiation(self, xp, sp):
sp.spmatrix()
@testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)
def test_len(self, xp, sp):
s = dummies[sp]()
len(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=1)
return bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_false(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=0)
return bool(s)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(2, 1))
bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_asformat_none(self, xp, sp):
s = dummies[sp]()
self.assertIs(s.asformat(None), s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_maxprint(self, xp, sp):
s = dummies[sp](maxprint=30)
return s.getmaxprint()
|
Add test for base spmatriximport unittest
import scipy.sparse
import cupy.sparse
from cupy import testing
class DummySparseCPU(scipy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseCPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def getnnz(self):
return self._nnz
class DummySparseGPU(cupy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseGPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def get_shape(self):
return self._shape
def getnnz(self):
return self._nnz
dummies = {
scipy.sparse: DummySparseCPU,
cupy.sparse: DummySparseGPU,
}
class TestSpmatrix(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_instantiation(self, xp, sp):
sp.spmatrix()
@testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)
def test_len(self, xp, sp):
s = dummies[sp]()
len(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=1)
return bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_false(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=0)
return bool(s)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(2, 1))
bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_asformat_none(self, xp, sp):
s = dummies[sp]()
self.assertIs(s.asformat(None), s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_maxprint(self, xp, sp):
s = dummies[sp](maxprint=30)
return s.getmaxprint()
|
<commit_before><commit_msg>Add test for base spmatrix<commit_after>import unittest
import scipy.sparse
import cupy.sparse
from cupy import testing
class DummySparseCPU(scipy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseCPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def getnnz(self):
return self._nnz
class DummySparseGPU(cupy.sparse.spmatrix):
def __init__(self, maxprint=50, shape=None, nnz=0):
super(DummySparseGPU, self).__init__(maxprint)
self._shape = shape
self._nnz = nnz
def get_shape(self):
return self._shape
def getnnz(self):
return self._nnz
dummies = {
scipy.sparse: DummySparseCPU,
cupy.sparse: DummySparseGPU,
}
class TestSpmatrix(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_instantiation(self, xp, sp):
sp.spmatrix()
@testing.numpy_cupy_raises(sp_name='sp', accept_error=TypeError)
def test_len(self, xp, sp):
s = dummies[sp]()
len(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=1)
return bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_bool_false(self, xp, sp):
s = dummies[sp](shape=(1, 1), nnz=0)
return bool(s)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_bool_true(self, xp, sp):
s = dummies[sp](shape=(2, 1))
bool(s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_asformat_none(self, xp, sp):
s = dummies[sp]()
self.assertIs(s.asformat(None), s)
@testing.numpy_cupy_equal(sp_name='sp')
def test_maxprint(self, xp, sp):
s = dummies[sp](maxprint=30)
return s.getmaxprint()
|
|
7bdc8dfaabdee59d1961a390418ae6aafe0f9e62
|
platform_tools/android/tradefed/upload_dm_results.py
|
platform_tools/android/tradefed/upload_dm_results.py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload DM output PNG files and JSON summary to Google Storage."""
import datetime
import os
import shutil
import sys
import tempfile
def main(dm_dir, build_number, builder_name):
"""Upload DM output PNG files and JSON summary to Google Storage.
dm_dir: path to PNG files and JSON summary (str)
build_number: nth build on this builder (str or int)
builder_name: name of this builder (str)
"""
# import gs_utils
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(current_dir, "../../../common/py/utils"))
import gs_utils
# Private, but Google-readable.
ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
FINE_ACLS = [(
gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
'google.com',
gs_utils.GSUtils.Permission.READ
)]
# Move dm.json to its own directory to make uploading it easier.
tmp = tempfile.mkdtemp()
shutil.move(os.path.join(dm_dir, 'dm.json'),
os.path.join(tmp, 'dm.json'))
# Only images are left in dm_dir. Upload any new ones.
gs = gs_utils.GSUtils()
gs.upload_dir_contents(dm_dir,
'skia-android-dm',
'dm-images-v1',
upload_if = gs.UploadIf.IF_NEW,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# /dm-json-v1/year/month/day/hour/build-number/builder/dm.json
now = datetime.datetime.utcnow()
summary_dest_dir = '/'.join(['dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
str(build_number),
builder_name])
# Upload the JSON summary.
gs.upload_dir_contents(tmp,
'skia-android-dm',
summary_dest_dir,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# Just for hygiene, put dm.json back.
shutil.move(os.path.join(tmp, 'dm.json'),
os.path.join(dm_dir, 'dm.json'))
os.rmdir(tmp)
if '__main__' == __name__:
main(*sys.argv[1:])
|
Add custom dm upload script to be used by the android framework
|
Add custom dm upload script to be used by the android framework
Review URL: https://codereview.chromium.org/979153002
|
Python
|
bsd-3-clause
|
YUPlayGodDev/platform_external_skia,MarshedOut/android_external_skia,Infinitive-OS/platform_external_skia,noselhq/skia,PAC-ROM/android_external_skia,tmpvar/skia.cc,MinimalOS-AOSP/platform_external_skia,UBERMALLOW/external_skia,rubenvb/skia,ominux/skia,YUPlayGodDev/platform_external_skia,geekboxzone/mmallow_external_skia,qrealka/skia-hc,tmpvar/skia.cc,invisiblek/android_external_skia,MarshedOut/android_external_skia,pcwalton/skia,HalCanary/skia-hc,invisiblek/android_external_skia,MinimalOS-AOSP/platform_external_skia,google/skia,VRToxin-AOSP/android_external_skia,Hikari-no-Tenshi/android_external_skia,nfxosp/platform_external_skia,PAC-ROM/android_external_skia,boulzordev/android_external_skia,invisiblek/android_external_skia,google/skia,amyvmiwei/skia,geekboxzone/mmallow_external_skia,rubenvb/skia,PAC-ROM/android_external_skia,AOSPB/external_skia,OneRom/external_skia,nfxosp/platform_external_skia,invisiblek/android_external_skia,AOSP-YU/platform_external_skia,HalCanary/skia-hc,geekboxzone/mmallow_external_skia,AOSP-YU/platform_external_skia,Jichao/skia,todotodoo/skia,nvoron23/skia,MarshedOut/android_external_skia,boulzordev/android_external_skia,Infinitive-OS/platform_external_skia,BrokenROM/external_skia,OneRom/external_skia,noselhq/skia,geekboxzone/mmallow_external_skia,Jichao/skia,PAC-ROM/android_external_skia,aosp-mirror/platform_external_skia,Hikari-no-Tenshi/android_external_skia,Infinitive-OS/platform_external_skia,Jichao/skia,nfxosp/platform_external_skia,PAC-ROM/android_external_skia,google/skia,google/skia,HalCanary/skia-hc,qrealka/skia-hc,OneRom/external_skia,w3nd1go/android_external_skia,TeamExodus/external_skia,aosp-mirror/platform_external_skia,OneRom/external_skia,Hikari-no-Tenshi/android_external_skia,AOSP-YU/platform_external_skia,aosp-mirror/platform_external_skia,Infinitive-OS/platform_external_skia,OneRom/external_skia,shahrzadmn/skia,timduru/platform-external-skia,MinimalOS-AOSP/platform_external_skia,pcwalton/skia,nvoron23/skia,YUPlayGodDev/platform_external_skia,todotodoo/skia,BrokenROM/external_skia,AOSP-YU/platform_external_skia,HalCanary/skia-hc,UBERMALLOW/external_skia,AOSP-YU/platform_external_skia,w3nd1go/android_external_skia,google/skia,Jichao/skia,BrokenROM/external_skia,amyvmiwei/skia,YUPlayGodDev/platform_external_skia,noselhq/skia,spezi77/android_external_skia,w3nd1go/android_external_skia,HalCanary/skia-hc,AOSPB/external_skia,rubenvb/skia,MarshedOut/android_external_skia,ominux/skia,TeamTwisted/external_skia,rubenvb/skia,google/skia,YUPlayGodDev/platform_external_skia,AOSP-YU/platform_external_skia,geekboxzone/mmallow_external_skia,todotodoo/skia,Jichao/skia,AOSPB/external_skia,TeamExodus/external_skia,MinimalOS-AOSP/platform_external_skia,todotodoo/skia,vanish87/skia,tmpvar/skia.cc,MarshedOut/android_external_skia,amyvmiwei/skia,noselhq/skia,amyvmiwei/skia,boulzordev/android_external_skia,tmpvar/skia.cc,PAC-ROM/android_external_skia,shahrzadmn/skia,OneRom/external_skia,invisiblek/android_external_skia,PAC-ROM/android_external_skia,nfxosp/platform_external_skia,VRToxin-AOSP/android_external_skia,Hikari-no-Tenshi/android_external_skia,qrealka/skia-hc,MonkeyZZZZ/platform_external_skia,todotodoo/skia,geekboxzone/mmallow_external_skia,w3nd1go/android_external_skia,nvoron23/skia,shahrzadmn/skia,rubenvb/skia,timduru/platform-external-skia,shahrzadmn/skia,timduru/platform-external-skia,boulzordev/android_external_skia,MinimalOS-AOSP/platform_external_skia,ominux/skia,MonkeyZZZZ/platform_external_skia,timduru/platform-external-skia,nvoron23/skia,aosp-mirror/platform_external_skia,vanish87/skia,TeamTwisted/external_skia,boulzordev/android_external_skia,AOSPB/external_skia,noselhq/skia,vanish87/skia,aosp-mirror/platform_external_skia,nvoron23/skia,noselhq/skia,MonkeyZZZZ/platform_external_skia,BrokenROM/external_skia,google/skia,MarshedOut/android_external_skia,nfxosp/platform_external_skia,invisiblek/android_external_skia,HalCanary/skia-hc,amyvmiwei/skia,AOSPB/external_skia,timduru/platform-external-skia,Infinitive-OS/platform_external_skia,TeamExodus/external_skia,UBERMALLOW/external_skia,aosp-mirror/platform_external_skia,BrokenROM/external_skia,shahrzadmn/skia,HalCanary/skia-hc,UBERMALLOW/external_skia,amyvmiwei/skia,VRToxin-AOSP/android_external_skia,AOSP-YU/platform_external_skia,qrealka/skia-hc,VRToxin-AOSP/android_external_skia,qrealka/skia-hc,pcwalton/skia,HalCanary/skia-hc,noselhq/skia,invisiblek/android_external_skia,noselhq/skia,Hikari-no-Tenshi/android_external_skia,nvoron23/skia,pcwalton/skia,pcwalton/skia,MinimalOS-AOSP/platform_external_skia,MonkeyZZZZ/platform_external_skia,HalCanary/skia-hc,Jichao/skia,Infinitive-OS/platform_external_skia,TeamTwisted/external_skia,google/skia,VRToxin-AOSP/android_external_skia,spezi77/android_external_skia,nfxosp/platform_external_skia,rubenvb/skia,shahrzadmn/skia,Jichao/skia,TeamExodus/external_skia,geekboxzone/mmallow_external_skia,MonkeyZZZZ/platform_external_skia,BrokenROM/external_skia,vanish87/skia,tmpvar/skia.cc,rubenvb/skia,AOSPB/external_skia,VRToxin-AOSP/android_external_skia,PAC-ROM/android_external_skia,TeamTwisted/external_skia,TeamTwisted/external_skia,OneRom/external_skia,PAC-ROM/android_external_skia,Infinitive-OS/platform_external_skia,MinimalOS-AOSP/platform_external_skia,TeamExodus/external_skia,VRToxin-AOSP/android_external_skia,Jichao/skia,qrealka/skia-hc,VRToxin-AOSP/android_external_skia,TeamTwisted/external_skia,AOSPB/external_skia,YUPlayGodDev/platform_external_skia,invisiblek/android_external_skia,ominux/skia,todotodoo/skia,vanish87/skia,qrealka/skia-hc,OneRom/external_skia,Hikari-no-Tenshi/android_external_skia,VRToxin-AOSP/android_external_skia,pcwalton/skia,w3nd1go/android_external_skia,shahrzadmn/skia,vanish87/skia,timduru/platform-external-skia,amyvmiwei/skia,YUPlayGodDev/platform_external_skia,MinimalOS-AOSP/platform_external_skia,TeamTwisted/external_skia,AOSP-YU/platform_external_skia,MarshedOut/android_external_skia,BrokenROM/external_skia,spezi77/android_external_skia,YUPlayGodDev/platform_external_skia,geekboxzone/mmallow_external_skia,aosp-mirror/platform_external_skia,pcwalton/skia,ominux/skia,MonkeyZZZZ/platform_external_skia,MinimalOS-AOSP/platform_external_skia,rubenvb/skia,Hikari-no-Tenshi/android_external_skia,AOSPB/external_skia,tmpvar/skia.cc,vanish87/skia,UBERMALLOW/external_skia,aosp-mirror/platform_external_skia,w3nd1go/android_external_skia,todotodoo/skia,rubenvb/skia,UBERMALLOW/external_skia,google/skia,MonkeyZZZZ/platform_external_skia,tmpvar/skia.cc,tmpvar/skia.cc,TeamTwisted/external_skia,nfxosp/platform_external_skia,shahrzadmn/skia,AOSP-YU/platform_external_skia,Hikari-no-Tenshi/android_external_skia,nvoron23/skia,YUPlayGodDev/platform_external_skia,ominux/skia,MonkeyZZZZ/platform_external_skia,TeamTwisted/external_skia,OneRom/external_skia,qrealka/skia-hc,nfxosp/platform_external_skia,TeamExodus/external_skia,spezi77/android_external_skia,w3nd1go/android_external_skia,BrokenROM/external_skia,timduru/platform-external-skia,nvoron23/skia,Jichao/skia,TeamExodus/external_skia,spezi77/android_external_skia,boulzordev/android_external_skia,google/skia,aosp-mirror/platform_external_skia,w3nd1go/android_external_skia,amyvmiwei/skia,MonkeyZZZZ/platform_external_skia,UBERMALLOW/external_skia,noselhq/skia,UBERMALLOW/external_skia,boulzordev/android_external_skia,vanish87/skia,w3nd1go/android_external_skia,todotodoo/skia,Infinitive-OS/platform_external_skia,ominux/skia,MarshedOut/android_external_skia,shahrzadmn/skia,rubenvb/skia,UBERMALLOW/external_skia,todotodoo/skia,vanish87/skia,boulzordev/android_external_skia,TeamExodus/external_skia,boulzordev/android_external_skia,MarshedOut/android_external_skia,geekboxzone/mmallow_external_skia,nvoron23/skia,HalCanary/skia-hc,ominux/skia,Infinitive-OS/platform_external_skia,tmpvar/skia.cc,nfxosp/platform_external_skia,TeamExodus/external_skia,pcwalton/skia,AOSPB/external_skia,pcwalton/skia,spezi77/android_external_skia,ominux/skia,aosp-mirror/platform_external_skia
|
Add custom dm upload script to be used by the android framework
Review URL: https://codereview.chromium.org/979153002
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload DM output PNG files and JSON summary to Google Storage."""
import datetime
import os
import shutil
import sys
import tempfile
def main(dm_dir, build_number, builder_name):
"""Upload DM output PNG files and JSON summary to Google Storage.
dm_dir: path to PNG files and JSON summary (str)
build_number: nth build on this builder (str or int)
builder_name: name of this builder (str)
"""
# import gs_utils
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(current_dir, "../../../common/py/utils"))
import gs_utils
# Private, but Google-readable.
ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
FINE_ACLS = [(
gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
'google.com',
gs_utils.GSUtils.Permission.READ
)]
# Move dm.json to its own directory to make uploading it easier.
tmp = tempfile.mkdtemp()
shutil.move(os.path.join(dm_dir, 'dm.json'),
os.path.join(tmp, 'dm.json'))
# Only images are left in dm_dir. Upload any new ones.
gs = gs_utils.GSUtils()
gs.upload_dir_contents(dm_dir,
'skia-android-dm',
'dm-images-v1',
upload_if = gs.UploadIf.IF_NEW,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# /dm-json-v1/year/month/day/hour/build-number/builder/dm.json
now = datetime.datetime.utcnow()
summary_dest_dir = '/'.join(['dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
str(build_number),
builder_name])
# Upload the JSON summary.
gs.upload_dir_contents(tmp,
'skia-android-dm',
summary_dest_dir,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# Just for hygiene, put dm.json back.
shutil.move(os.path.join(tmp, 'dm.json'),
os.path.join(dm_dir, 'dm.json'))
os.rmdir(tmp)
if '__main__' == __name__:
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add custom dm upload script to be used by the android framework
Review URL: https://codereview.chromium.org/979153002<commit_after>
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload DM output PNG files and JSON summary to Google Storage."""
import datetime
import os
import shutil
import sys
import tempfile
def main(dm_dir, build_number, builder_name):
"""Upload DM output PNG files and JSON summary to Google Storage.
dm_dir: path to PNG files and JSON summary (str)
build_number: nth build on this builder (str or int)
builder_name: name of this builder (str)
"""
# import gs_utils
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(current_dir, "../../../common/py/utils"))
import gs_utils
# Private, but Google-readable.
ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
FINE_ACLS = [(
gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
'google.com',
gs_utils.GSUtils.Permission.READ
)]
# Move dm.json to its own directory to make uploading it easier.
tmp = tempfile.mkdtemp()
shutil.move(os.path.join(dm_dir, 'dm.json'),
os.path.join(tmp, 'dm.json'))
# Only images are left in dm_dir. Upload any new ones.
gs = gs_utils.GSUtils()
gs.upload_dir_contents(dm_dir,
'skia-android-dm',
'dm-images-v1',
upload_if = gs.UploadIf.IF_NEW,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# /dm-json-v1/year/month/day/hour/build-number/builder/dm.json
now = datetime.datetime.utcnow()
summary_dest_dir = '/'.join(['dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
str(build_number),
builder_name])
# Upload the JSON summary.
gs.upload_dir_contents(tmp,
'skia-android-dm',
summary_dest_dir,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# Just for hygiene, put dm.json back.
shutil.move(os.path.join(tmp, 'dm.json'),
os.path.join(dm_dir, 'dm.json'))
os.rmdir(tmp)
if '__main__' == __name__:
main(*sys.argv[1:])
|
Add custom dm upload script to be used by the android framework
Review URL: https://codereview.chromium.org/979153002#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload DM output PNG files and JSON summary to Google Storage."""
import datetime
import os
import shutil
import sys
import tempfile
def main(dm_dir, build_number, builder_name):
"""Upload DM output PNG files and JSON summary to Google Storage.
dm_dir: path to PNG files and JSON summary (str)
build_number: nth build on this builder (str or int)
builder_name: name of this builder (str)
"""
# import gs_utils
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(current_dir, "../../../common/py/utils"))
import gs_utils
# Private, but Google-readable.
ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
FINE_ACLS = [(
gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
'google.com',
gs_utils.GSUtils.Permission.READ
)]
# Move dm.json to its own directory to make uploading it easier.
tmp = tempfile.mkdtemp()
shutil.move(os.path.join(dm_dir, 'dm.json'),
os.path.join(tmp, 'dm.json'))
# Only images are left in dm_dir. Upload any new ones.
gs = gs_utils.GSUtils()
gs.upload_dir_contents(dm_dir,
'skia-android-dm',
'dm-images-v1',
upload_if = gs.UploadIf.IF_NEW,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# /dm-json-v1/year/month/day/hour/build-number/builder/dm.json
now = datetime.datetime.utcnow()
summary_dest_dir = '/'.join(['dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
str(build_number),
builder_name])
# Upload the JSON summary.
gs.upload_dir_contents(tmp,
'skia-android-dm',
summary_dest_dir,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# Just for hygiene, put dm.json back.
shutil.move(os.path.join(tmp, 'dm.json'),
os.path.join(dm_dir, 'dm.json'))
os.rmdir(tmp)
if '__main__' == __name__:
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add custom dm upload script to be used by the android framework
Review URL: https://codereview.chromium.org/979153002<commit_after>#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upload DM output PNG files and JSON summary to Google Storage."""
import datetime
import os
import shutil
import sys
import tempfile
def main(dm_dir, build_number, builder_name):
"""Upload DM output PNG files and JSON summary to Google Storage.
dm_dir: path to PNG files and JSON summary (str)
build_number: nth build on this builder (str or int)
builder_name: name of this builder (str)
"""
# import gs_utils
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(current_dir, "../../../common/py/utils"))
import gs_utils
# Private, but Google-readable.
ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
FINE_ACLS = [(
gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
'google.com',
gs_utils.GSUtils.Permission.READ
)]
# Move dm.json to its own directory to make uploading it easier.
tmp = tempfile.mkdtemp()
shutil.move(os.path.join(dm_dir, 'dm.json'),
os.path.join(tmp, 'dm.json'))
# Only images are left in dm_dir. Upload any new ones.
gs = gs_utils.GSUtils()
gs.upload_dir_contents(dm_dir,
'skia-android-dm',
'dm-images-v1',
upload_if = gs.UploadIf.IF_NEW,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# /dm-json-v1/year/month/day/hour/build-number/builder/dm.json
now = datetime.datetime.utcnow()
summary_dest_dir = '/'.join(['dm-json-v1',
str(now.year ).zfill(4),
str(now.month).zfill(2),
str(now.day ).zfill(2),
str(now.hour ).zfill(2),
str(build_number),
builder_name])
# Upload the JSON summary.
gs.upload_dir_contents(tmp,
'skia-android-dm',
summary_dest_dir,
predefined_acl = ACL,
fine_grained_acl_list = FINE_ACLS)
# Just for hygiene, put dm.json back.
shutil.move(os.path.join(tmp, 'dm.json'),
os.path.join(dm_dir, 'dm.json'))
os.rmdir(tmp)
if '__main__' == __name__:
main(*sys.argv[1:])
|
|
e8f323207f8b59452a040aadd0e411dc5abdb218
|
tests/test_plugin_euronews.py
|
tests/test_plugin_euronews.py
|
import unittest
from streamlink.plugins.euronews import Euronews
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://de.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://it.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://es.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://pt.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ru.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ua.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://tr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://gr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://hu.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fa.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://arabic.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/2017/05/10/peugeot-expects-more-opel-losses-this-year"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/2017/05/10/l-ag-de-psa-approuve-le-rachat-d-opel"))
# shouldn't match
self.assertFalse(Euronews.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Euronews.can_handle_url("http://www.youtube.com/"))
|
Add unit tests for Euronews plugin
|
Add unit tests for Euronews plugin
|
Python
|
bsd-2-clause
|
streamlink/streamlink,beardypig/streamlink,back-to/streamlink,gravyboat/streamlink,beardypig/streamlink,bastimeyer/streamlink,bastimeyer/streamlink,streamlink/streamlink,wlerin/streamlink,wlerin/streamlink,javiercantero/streamlink,melmorabity/streamlink,chhe/streamlink,back-to/streamlink,chhe/streamlink,melmorabity/streamlink,gravyboat/streamlink,javiercantero/streamlink
|
Add unit tests for Euronews plugin
|
import unittest
from streamlink.plugins.euronews import Euronews
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://de.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://it.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://es.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://pt.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ru.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ua.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://tr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://gr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://hu.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fa.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://arabic.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/2017/05/10/peugeot-expects-more-opel-losses-this-year"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/2017/05/10/l-ag-de-psa-approuve-le-rachat-d-opel"))
# shouldn't match
self.assertFalse(Euronews.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Euronews.can_handle_url("http://www.youtube.com/"))
|
<commit_before><commit_msg>Add unit tests for Euronews plugin<commit_after>
|
import unittest
from streamlink.plugins.euronews import Euronews
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://de.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://it.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://es.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://pt.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ru.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ua.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://tr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://gr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://hu.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fa.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://arabic.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/2017/05/10/peugeot-expects-more-opel-losses-this-year"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/2017/05/10/l-ag-de-psa-approuve-le-rachat-d-opel"))
# shouldn't match
self.assertFalse(Euronews.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Euronews.can_handle_url("http://www.youtube.com/"))
|
Add unit tests for Euronews pluginimport unittest
from streamlink.plugins.euronews import Euronews
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://de.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://it.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://es.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://pt.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ru.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ua.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://tr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://gr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://hu.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fa.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://arabic.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/2017/05/10/peugeot-expects-more-opel-losses-this-year"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/2017/05/10/l-ag-de-psa-approuve-le-rachat-d-opel"))
# shouldn't match
self.assertFalse(Euronews.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Euronews.can_handle_url("http://www.youtube.com/"))
|
<commit_before><commit_msg>Add unit tests for Euronews plugin<commit_after>import unittest
from streamlink.plugins.euronews import Euronews
class TestPluginEuronews(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://de.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://it.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://es.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://pt.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ru.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://ua.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://tr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://gr.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://hu.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://fa.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://arabic.euronews.com/live"))
self.assertTrue(Euronews.can_handle_url("http://www.euronews.com/2017/05/10/peugeot-expects-more-opel-losses-this-year"))
self.assertTrue(Euronews.can_handle_url("http://fr.euronews.com/2017/05/10/l-ag-de-psa-approuve-le-rachat-d-opel"))
# shouldn't match
self.assertFalse(Euronews.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Euronews.can_handle_url("http://www.youtube.com/"))
|
|
1b0560da645ecadc2bd9d01dde77274afb8970ba
|
tools/parse_rtntrace_stack.py
|
tools/parse_rtntrace_stack.py
|
#!/usr/bin/env python
import sys, os, subprocess
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
if len(sys.argv) > 1:
outputdir = sys.argv[1]
else:
outputdir = '.'
filename = os.path.join(outputdir, 'sim.rtntracefull')
if not os.path.exists(filename):
print >> sys.stderr, 'Cannot find trace file', filename
sys.exit(1)
fp = open(filename)
headers = fp.readline().strip().split('\t')
data = {}
functions = {}
for line in fp:
line = dict(zip(headers, line.strip().split('\t')))
data[line['eip']] = {'calls': long(line['calls']), 'time': long(line['core_elapsed_time'])/1e15, 'icount': long(line['instruction_count'])}
eip = line['eip'].split(':')[-1]
if eip not in functions:
functions[eip] = cppfilt(line['name']).strip()
for stack in sorted(data.keys()):
eip = stack.split(':')[-1]
print stack, functions[eip], data[stack]['calls'], data[stack]['time'], data[stack]['icount']
|
Add script to parse rtntracefull output into a call tree
|
[rtntracer] Add script to parse rtntracefull output into a call tree
|
Python
|
mit
|
abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper,abanaiyan/sniper
|
[rtntracer] Add script to parse rtntracefull output into a call tree
|
#!/usr/bin/env python
import sys, os, subprocess
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
if len(sys.argv) > 1:
outputdir = sys.argv[1]
else:
outputdir = '.'
filename = os.path.join(outputdir, 'sim.rtntracefull')
if not os.path.exists(filename):
print >> sys.stderr, 'Cannot find trace file', filename
sys.exit(1)
fp = open(filename)
headers = fp.readline().strip().split('\t')
data = {}
functions = {}
for line in fp:
line = dict(zip(headers, line.strip().split('\t')))
data[line['eip']] = {'calls': long(line['calls']), 'time': long(line['core_elapsed_time'])/1e15, 'icount': long(line['instruction_count'])}
eip = line['eip'].split(':')[-1]
if eip not in functions:
functions[eip] = cppfilt(line['name']).strip()
for stack in sorted(data.keys()):
eip = stack.split(':')[-1]
print stack, functions[eip], data[stack]['calls'], data[stack]['time'], data[stack]['icount']
|
<commit_before><commit_msg>[rtntracer] Add script to parse rtntracefull output into a call tree<commit_after>
|
#!/usr/bin/env python
import sys, os, subprocess
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
if len(sys.argv) > 1:
outputdir = sys.argv[1]
else:
outputdir = '.'
filename = os.path.join(outputdir, 'sim.rtntracefull')
if not os.path.exists(filename):
print >> sys.stderr, 'Cannot find trace file', filename
sys.exit(1)
fp = open(filename)
headers = fp.readline().strip().split('\t')
data = {}
functions = {}
for line in fp:
line = dict(zip(headers, line.strip().split('\t')))
data[line['eip']] = {'calls': long(line['calls']), 'time': long(line['core_elapsed_time'])/1e15, 'icount': long(line['instruction_count'])}
eip = line['eip'].split(':')[-1]
if eip not in functions:
functions[eip] = cppfilt(line['name']).strip()
for stack in sorted(data.keys()):
eip = stack.split(':')[-1]
print stack, functions[eip], data[stack]['calls'], data[stack]['time'], data[stack]['icount']
|
[rtntracer] Add script to parse rtntracefull output into a call tree#!/usr/bin/env python
import sys, os, subprocess
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
if len(sys.argv) > 1:
outputdir = sys.argv[1]
else:
outputdir = '.'
filename = os.path.join(outputdir, 'sim.rtntracefull')
if not os.path.exists(filename):
print >> sys.stderr, 'Cannot find trace file', filename
sys.exit(1)
fp = open(filename)
headers = fp.readline().strip().split('\t')
data = {}
functions = {}
for line in fp:
line = dict(zip(headers, line.strip().split('\t')))
data[line['eip']] = {'calls': long(line['calls']), 'time': long(line['core_elapsed_time'])/1e15, 'icount': long(line['instruction_count'])}
eip = line['eip'].split(':')[-1]
if eip not in functions:
functions[eip] = cppfilt(line['name']).strip()
for stack in sorted(data.keys()):
eip = stack.split(':')[-1]
print stack, functions[eip], data[stack]['calls'], data[stack]['time'], data[stack]['icount']
|
<commit_before><commit_msg>[rtntracer] Add script to parse rtntracefull output into a call tree<commit_after>#!/usr/bin/env python
import sys, os, subprocess
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
if len(sys.argv) > 1:
outputdir = sys.argv[1]
else:
outputdir = '.'
filename = os.path.join(outputdir, 'sim.rtntracefull')
if not os.path.exists(filename):
print >> sys.stderr, 'Cannot find trace file', filename
sys.exit(1)
fp = open(filename)
headers = fp.readline().strip().split('\t')
data = {}
functions = {}
for line in fp:
line = dict(zip(headers, line.strip().split('\t')))
data[line['eip']] = {'calls': long(line['calls']), 'time': long(line['core_elapsed_time'])/1e15, 'icount': long(line['instruction_count'])}
eip = line['eip'].split(':')[-1]
if eip not in functions:
functions[eip] = cppfilt(line['name']).strip()
for stack in sorted(data.keys()):
eip = stack.split(':')[-1]
print stack, functions[eip], data[stack]['calls'], data[stack]['time'], data[stack]['icount']
|
|
144608f98f5b8aff555ee4954d0a07aebb828aff
|
ThinkLikeProg/chp2ex.py
|
ThinkLikeProg/chp2ex.py
|
#!/usr/local/bin/python
# Think Like a Programmer Chapter 2: Pure Puzzles exercises
def main():
poundV()
print()
poundDiamond()
def poundV():
'''
Using only single output statements of a space, pound, or new line, create:
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
for i in range(n, 0, -1):
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
def poundDiamond():
'''
Using only single output statements of a space, pound, or new line, create:
##
####
######
########
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
rangeItem = list(range(1, n + 1))
for i in rangeItem + rangeItem[::-1]:
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
if __name__ == '__main__':
main()
|
Add Think Like a Programmer section and start chp 2 exercises
|
Add Think Like a Programmer section and start chp 2 exercises
|
Python
|
mit
|
HKuz/Test_Code
|
Add Think Like a Programmer section and start chp 2 exercises
|
#!/usr/local/bin/python
# Think Like a Programmer Chapter 2: Pure Puzzles exercises
def main():
poundV()
print()
poundDiamond()
def poundV():
'''
Using only single output statements of a space, pound, or new line, create:
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
for i in range(n, 0, -1):
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
def poundDiamond():
'''
Using only single output statements of a space, pound, or new line, create:
##
####
######
########
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
rangeItem = list(range(1, n + 1))
for i in rangeItem + rangeItem[::-1]:
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Think Like a Programmer section and start chp 2 exercises<commit_after>
|
#!/usr/local/bin/python
# Think Like a Programmer Chapter 2: Pure Puzzles exercises
def main():
poundV()
print()
poundDiamond()
def poundV():
'''
Using only single output statements of a space, pound, or new line, create:
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
for i in range(n, 0, -1):
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
def poundDiamond():
'''
Using only single output statements of a space, pound, or new line, create:
##
####
######
########
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
rangeItem = list(range(1, n + 1))
for i in rangeItem + rangeItem[::-1]:
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
if __name__ == '__main__':
main()
|
Add Think Like a Programmer section and start chp 2 exercises#!/usr/local/bin/python
# Think Like a Programmer Chapter 2: Pure Puzzles exercises
def main():
poundV()
print()
poundDiamond()
def poundV():
'''
Using only single output statements of a space, pound, or new line, create:
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
for i in range(n, 0, -1):
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
def poundDiamond():
'''
Using only single output statements of a space, pound, or new line, create:
##
####
######
########
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
rangeItem = list(range(1, n + 1))
for i in rangeItem + rangeItem[::-1]:
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Think Like a Programmer section and start chp 2 exercises<commit_after>#!/usr/local/bin/python
# Think Like a Programmer Chapter 2: Pure Puzzles exercises
def main():
poundV()
print()
poundDiamond()
def poundV():
'''
Using only single output statements of a space, pound, or new line, create:
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
for i in range(n, 0, -1):
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
def poundDiamond():
'''
Using only single output statements of a space, pound, or new line, create:
##
####
######
########
########
######
####
##
'''
n = 4
space = ' '
pound = '#'
rangeItem = list(range(1, n + 1))
for i in rangeItem + rangeItem[::-1]:
print((space * (n - i)) + (pound * i * 2) + (space * (n - i)))
if __name__ == '__main__':
main()
|
|
59dc769f3ccf0e0251c527b7e1e23544dedab048
|
tests/test_to_text.py
|
tests/test_to_text.py
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_to_text_simple():
assert Rule(
recurrence.WEEKLY
).to_text() == 'weekly'
def test_rule_to_text_interval():
assert Rule(
recurrence.WEEKLY,
interval=3
).to_text() == 'every 3 weeks'
def test_rule_to_text_oneoff():
assert Rule(
recurrence.WEEKLY,
count=1
).to_text() == 'weekly, occuring once'
def test_rule_to_text_multiple():
assert Rule(
recurrence.WEEKLY,
count=5
).to_text() == 'weekly, occuring 5 times'
def test_rule_to_text_yearly_bymonth():
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text() == 'annually, each February, April'
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text(True) == 'annually, each Feb, Apr'
|
Add initial tests for to_text
|
Add initial tests for to_text
|
Python
|
bsd-3-clause
|
django-recurrence/django-recurrence,FrankSalad/django-recurrence,linux2400/django-recurrence,Nikola-K/django-recurrence,django-recurrence/django-recurrence,FrankSalad/django-recurrence,Nikola-K/django-recurrence,linux2400/django-recurrence
|
Add initial tests for to_text
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_to_text_simple():
assert Rule(
recurrence.WEEKLY
).to_text() == 'weekly'
def test_rule_to_text_interval():
assert Rule(
recurrence.WEEKLY,
interval=3
).to_text() == 'every 3 weeks'
def test_rule_to_text_oneoff():
assert Rule(
recurrence.WEEKLY,
count=1
).to_text() == 'weekly, occuring once'
def test_rule_to_text_multiple():
assert Rule(
recurrence.WEEKLY,
count=5
).to_text() == 'weekly, occuring 5 times'
def test_rule_to_text_yearly_bymonth():
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text() == 'annually, each February, April'
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text(True) == 'annually, each Feb, Apr'
|
<commit_before><commit_msg>Add initial tests for to_text<commit_after>
|
from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_to_text_simple():
assert Rule(
recurrence.WEEKLY
).to_text() == 'weekly'
def test_rule_to_text_interval():
assert Rule(
recurrence.WEEKLY,
interval=3
).to_text() == 'every 3 weeks'
def test_rule_to_text_oneoff():
assert Rule(
recurrence.WEEKLY,
count=1
).to_text() == 'weekly, occuring once'
def test_rule_to_text_multiple():
assert Rule(
recurrence.WEEKLY,
count=5
).to_text() == 'weekly, occuring 5 times'
def test_rule_to_text_yearly_bymonth():
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text() == 'annually, each February, April'
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text(True) == 'annually, each Feb, Apr'
|
Add initial tests for to_textfrom datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_to_text_simple():
assert Rule(
recurrence.WEEKLY
).to_text() == 'weekly'
def test_rule_to_text_interval():
assert Rule(
recurrence.WEEKLY,
interval=3
).to_text() == 'every 3 weeks'
def test_rule_to_text_oneoff():
assert Rule(
recurrence.WEEKLY,
count=1
).to_text() == 'weekly, occuring once'
def test_rule_to_text_multiple():
assert Rule(
recurrence.WEEKLY,
count=5
).to_text() == 'weekly, occuring 5 times'
def test_rule_to_text_yearly_bymonth():
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text() == 'annually, each February, April'
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text(True) == 'annually, each Feb, Apr'
|
<commit_before><commit_msg>Add initial tests for to_text<commit_after>from datetime import datetime
from recurrence import Recurrence, Rule
import recurrence
def test_rule_to_text_simple():
assert Rule(
recurrence.WEEKLY
).to_text() == 'weekly'
def test_rule_to_text_interval():
assert Rule(
recurrence.WEEKLY,
interval=3
).to_text() == 'every 3 weeks'
def test_rule_to_text_oneoff():
assert Rule(
recurrence.WEEKLY,
count=1
).to_text() == 'weekly, occuring once'
def test_rule_to_text_multiple():
assert Rule(
recurrence.WEEKLY,
count=5
).to_text() == 'weekly, occuring 5 times'
def test_rule_to_text_yearly_bymonth():
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text() == 'annually, each February, April'
assert Rule(
recurrence.YEARLY,
bymonth=[1,3],
).to_text(True) == 'annually, each Feb, Apr'
|
|
045f1ae3d436b4372e45fd821740d7a94d2ca049
|
src/repository/migrations/0003_auto_20170524_1503.py
|
src/repository/migrations/0003_auto_20170524_1503.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
Change meta option for Github
|
Change meta option for Github
|
Python
|
bsd-3-clause
|
lozadaOmr/ansible-admin,lozadaOmr/ansible-admin,lozadaOmr/ansible-admin
|
Change meta option for Github
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
<commit_before><commit_msg>Change meta option for Github<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
Change meta option for Github# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
<commit_before><commit_msg>Change meta option for Github<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
|
40761c39bcc3703e8fd99544aa1d08f955538779
|
TrevorNet/tasks.py
|
TrevorNet/tasks.py
|
import nets
import random
import math
def XOR():
'''Exclusive or'''
net = nets.FeedForwardNet(2, 3, 1)
domain = ((1,1), (1,-1), (-1,1), (-1,-1))
rng = ((0,), (1,), (1,), (0,))
for i in range(100000):
r = random.randrange(4)
net.train(domain[r], rng[r])
for d in domain:
print('{0} => {1}'.format(d, net.predict(d)))
def sin():
'''A normalized sin: f(x) = .5*sin(x)+.5'''
net = nets.FeedForwardNet(1, 50, 1)
for i in range(300000):
verbose = False
if i%10000 == 0:
print('progress: {0}%'.format(i/3000))
verbose = True
x = random.random()*2*math.pi
y = .5*math.sin(x)+.5
net.train((x,), (y,), verbose)
for i in range(20):
x = .05*i*2*math.pi
print('{0} => {1}'.format((x,), net.predict((x,))))
if __name__ == '__main__':
XOR()
sin()
|
Add file with test problems
|
Add file with test problems
|
Python
|
mit
|
tmerr/trevornet
|
Add file with test problems
|
import nets
import random
import math
def XOR():
'''Exclusive or'''
net = nets.FeedForwardNet(2, 3, 1)
domain = ((1,1), (1,-1), (-1,1), (-1,-1))
rng = ((0,), (1,), (1,), (0,))
for i in range(100000):
r = random.randrange(4)
net.train(domain[r], rng[r])
for d in domain:
print('{0} => {1}'.format(d, net.predict(d)))
def sin():
'''A normalized sin: f(x) = .5*sin(x)+.5'''
net = nets.FeedForwardNet(1, 50, 1)
for i in range(300000):
verbose = False
if i%10000 == 0:
print('progress: {0}%'.format(i/3000))
verbose = True
x = random.random()*2*math.pi
y = .5*math.sin(x)+.5
net.train((x,), (y,), verbose)
for i in range(20):
x = .05*i*2*math.pi
print('{0} => {1}'.format((x,), net.predict((x,))))
if __name__ == '__main__':
XOR()
sin()
|
<commit_before><commit_msg>Add file with test problems<commit_after>
|
import nets
import random
import math
def XOR():
'''Exclusive or'''
net = nets.FeedForwardNet(2, 3, 1)
domain = ((1,1), (1,-1), (-1,1), (-1,-1))
rng = ((0,), (1,), (1,), (0,))
for i in range(100000):
r = random.randrange(4)
net.train(domain[r], rng[r])
for d in domain:
print('{0} => {1}'.format(d, net.predict(d)))
def sin():
'''A normalized sin: f(x) = .5*sin(x)+.5'''
net = nets.FeedForwardNet(1, 50, 1)
for i in range(300000):
verbose = False
if i%10000 == 0:
print('progress: {0}%'.format(i/3000))
verbose = True
x = random.random()*2*math.pi
y = .5*math.sin(x)+.5
net.train((x,), (y,), verbose)
for i in range(20):
x = .05*i*2*math.pi
print('{0} => {1}'.format((x,), net.predict((x,))))
if __name__ == '__main__':
XOR()
sin()
|
Add file with test problemsimport nets
import random
import math
def XOR():
'''Exclusive or'''
net = nets.FeedForwardNet(2, 3, 1)
domain = ((1,1), (1,-1), (-1,1), (-1,-1))
rng = ((0,), (1,), (1,), (0,))
for i in range(100000):
r = random.randrange(4)
net.train(domain[r], rng[r])
for d in domain:
print('{0} => {1}'.format(d, net.predict(d)))
def sin():
'''A normalized sin: f(x) = .5*sin(x)+.5'''
net = nets.FeedForwardNet(1, 50, 1)
for i in range(300000):
verbose = False
if i%10000 == 0:
print('progress: {0}%'.format(i/3000))
verbose = True
x = random.random()*2*math.pi
y = .5*math.sin(x)+.5
net.train((x,), (y,), verbose)
for i in range(20):
x = .05*i*2*math.pi
print('{0} => {1}'.format((x,), net.predict((x,))))
if __name__ == '__main__':
XOR()
sin()
|
<commit_before><commit_msg>Add file with test problems<commit_after>import nets
import random
import math
def XOR():
'''Exclusive or'''
net = nets.FeedForwardNet(2, 3, 1)
domain = ((1,1), (1,-1), (-1,1), (-1,-1))
rng = ((0,), (1,), (1,), (0,))
for i in range(100000):
r = random.randrange(4)
net.train(domain[r], rng[r])
for d in domain:
print('{0} => {1}'.format(d, net.predict(d)))
def sin():
'''A normalized sin: f(x) = .5*sin(x)+.5'''
net = nets.FeedForwardNet(1, 50, 1)
for i in range(300000):
verbose = False
if i%10000 == 0:
print('progress: {0}%'.format(i/3000))
verbose = True
x = random.random()*2*math.pi
y = .5*math.sin(x)+.5
net.train((x,), (y,), verbose)
for i in range(20):
x = .05*i*2*math.pi
print('{0} => {1}'.format((x,), net.predict((x,))))
if __name__ == '__main__':
XOR()
sin()
|
|
7b2f561bb7d36eb9fe978e622789932c02c11411
|
vsub/templatetags/parse_if.py
|
vsub/templatetags/parse_if.py
|
# Loosely based on noparse.py from https://code.djangoproject.com/ticket/14502
from django import template
from django.template.defaulttags import TemplateIfParser
register = template.Library()
token_formats = {
template.TOKEN_TEXT: u'%s',
template.TOKEN_VAR: u'%s%%s%s' % (template.VARIABLE_TAG_START, template.VARIABLE_TAG_END),
template.TOKEN_COMMENT: u'%s%%s%s' % (template.COMMENT_TAG_START, template.COMMENT_TAG_END),
# template.TOKEN_BLOCK is handled in place; formatting breaks on '%}'.
}
@register.tag
def parse_if(parser, token):
bits = token.split_contents()
tag_name = bits[0]
end_tag = 'end%s' % tag_name
condition = TemplateIfParser(parser, bits[1:]).parse()
text = []
while parser.tokens:
token = parser.next_token()
if (token.token_type == template.TOKEN_BLOCK) and (token.contents == end_tag):
return ParseIfNode(condition, u''.join(text))
if token.token_type == template.TOKEN_BLOCK:
text.append(u'%s%s%s' % (template.BLOCK_TAG_START, token.contents,
template.BLOCK_TAG_END))
else:
text.append(token_formats[token.token_type] % token.contents)
parser.unclosed_block_tag(end_tag)
class ParseIfNode(template.Node):
def __init__(self, condition, text):
self.condition = condition
self.text = text
def render(self, context):
try:
match = self.condition.eval(context)
except template.VariableDoesNotExist:
match = None
if match:
return self._render(context)
return ''
def _render(self, context):
# Instantiating a Template object uses a new Parser instance, so
# none of the loaded libraries carry over. Each new Parser instance
# imports builtin libraries by default. Since we can't access the
# Parser instance created to parse the template, we temporarily add
# a library to builtins that contains all of the libraries that are
# currently loaded. A better way to do this would be a mechanism
# to override the Parser instance (or compile_string() function)
# used in the new Template.
temp_lib = self._aggregate_libraries(template.libraries.values())
builtins = template.builtins
builtins.append(temp_lib)
cursor = builtins.index(temp_lib)
try:
return template.Template(self.text).render(context)
finally:
# Remove our temporary library from builtins, so that it
# doesn't pollute other parsers.
del builtins[cursor]
def _aggregate_libraries(self, libraries):
temp_lib = template.Library()
for library in libraries:
temp_lib.tags.update(library.tags)
temp_lib.filters.update(library.filters)
return temp_lib
|
Add a template tag to conditionally *parse* the tag's nested content.
|
Add a template tag to conditionally *parse* the tag's nested content.
|
Python
|
mit
|
PrecisionMojo/pm-www,PrecisionMojo/pm-www
|
Add a template tag to conditionally *parse* the tag's nested content.
|
# Loosely based on noparse.py from https://code.djangoproject.com/ticket/14502
from django import template
from django.template.defaulttags import TemplateIfParser
register = template.Library()
token_formats = {
template.TOKEN_TEXT: u'%s',
template.TOKEN_VAR: u'%s%%s%s' % (template.VARIABLE_TAG_START, template.VARIABLE_TAG_END),
template.TOKEN_COMMENT: u'%s%%s%s' % (template.COMMENT_TAG_START, template.COMMENT_TAG_END),
# template.TOKEN_BLOCK is handled in place; formatting breaks on '%}'.
}
@register.tag
def parse_if(parser, token):
bits = token.split_contents()
tag_name = bits[0]
end_tag = 'end%s' % tag_name
condition = TemplateIfParser(parser, bits[1:]).parse()
text = []
while parser.tokens:
token = parser.next_token()
if (token.token_type == template.TOKEN_BLOCK) and (token.contents == end_tag):
return ParseIfNode(condition, u''.join(text))
if token.token_type == template.TOKEN_BLOCK:
text.append(u'%s%s%s' % (template.BLOCK_TAG_START, token.contents,
template.BLOCK_TAG_END))
else:
text.append(token_formats[token.token_type] % token.contents)
parser.unclosed_block_tag(end_tag)
class ParseIfNode(template.Node):
def __init__(self, condition, text):
self.condition = condition
self.text = text
def render(self, context):
try:
match = self.condition.eval(context)
except template.VariableDoesNotExist:
match = None
if match:
return self._render(context)
return ''
def _render(self, context):
# Instantiating a Template object uses a new Parser instance, so
# none of the loaded libraries carry over. Each new Parser instance
# imports builtin libraries by default. Since we can't access the
# Parser instance created to parse the template, we temporarily add
# a library to builtins that contains all of the libraries that are
# currently loaded. A better way to do this would be a mechanism
# to override the Parser instance (or compile_string() function)
# used in the new Template.
temp_lib = self._aggregate_libraries(template.libraries.values())
builtins = template.builtins
builtins.append(temp_lib)
cursor = builtins.index(temp_lib)
try:
return template.Template(self.text).render(context)
finally:
# Remove our temporary library from builtins, so that it
# doesn't pollute other parsers.
del builtins[cursor]
def _aggregate_libraries(self, libraries):
temp_lib = template.Library()
for library in libraries:
temp_lib.tags.update(library.tags)
temp_lib.filters.update(library.filters)
return temp_lib
|
<commit_before><commit_msg>Add a template tag to conditionally *parse* the tag's nested content.<commit_after>
|
# Loosely based on noparse.py from https://code.djangoproject.com/ticket/14502
from django import template
from django.template.defaulttags import TemplateIfParser
register = template.Library()
token_formats = {
template.TOKEN_TEXT: u'%s',
template.TOKEN_VAR: u'%s%%s%s' % (template.VARIABLE_TAG_START, template.VARIABLE_TAG_END),
template.TOKEN_COMMENT: u'%s%%s%s' % (template.COMMENT_TAG_START, template.COMMENT_TAG_END),
# template.TOKEN_BLOCK is handled in place; formatting breaks on '%}'.
}
@register.tag
def parse_if(parser, token):
bits = token.split_contents()
tag_name = bits[0]
end_tag = 'end%s' % tag_name
condition = TemplateIfParser(parser, bits[1:]).parse()
text = []
while parser.tokens:
token = parser.next_token()
if (token.token_type == template.TOKEN_BLOCK) and (token.contents == end_tag):
return ParseIfNode(condition, u''.join(text))
if token.token_type == template.TOKEN_BLOCK:
text.append(u'%s%s%s' % (template.BLOCK_TAG_START, token.contents,
template.BLOCK_TAG_END))
else:
text.append(token_formats[token.token_type] % token.contents)
parser.unclosed_block_tag(end_tag)
class ParseIfNode(template.Node):
def __init__(self, condition, text):
self.condition = condition
self.text = text
def render(self, context):
try:
match = self.condition.eval(context)
except template.VariableDoesNotExist:
match = None
if match:
return self._render(context)
return ''
def _render(self, context):
# Instantiating a Template object uses a new Parser instance, so
# none of the loaded libraries carry over. Each new Parser instance
# imports builtin libraries by default. Since we can't access the
# Parser instance created to parse the template, we temporarily add
# a library to builtins that contains all of the libraries that are
# currently loaded. A better way to do this would be a mechanism
# to override the Parser instance (or compile_string() function)
# used in the new Template.
temp_lib = self._aggregate_libraries(template.libraries.values())
builtins = template.builtins
builtins.append(temp_lib)
cursor = builtins.index(temp_lib)
try:
return template.Template(self.text).render(context)
finally:
# Remove our temporary library from builtins, so that it
# doesn't pollute other parsers.
del builtins[cursor]
def _aggregate_libraries(self, libraries):
temp_lib = template.Library()
for library in libraries:
temp_lib.tags.update(library.tags)
temp_lib.filters.update(library.filters)
return temp_lib
|
Add a template tag to conditionally *parse* the tag's nested content.# Loosely based on noparse.py from https://code.djangoproject.com/ticket/14502
from django import template
from django.template.defaulttags import TemplateIfParser
register = template.Library()
token_formats = {
template.TOKEN_TEXT: u'%s',
template.TOKEN_VAR: u'%s%%s%s' % (template.VARIABLE_TAG_START, template.VARIABLE_TAG_END),
template.TOKEN_COMMENT: u'%s%%s%s' % (template.COMMENT_TAG_START, template.COMMENT_TAG_END),
# template.TOKEN_BLOCK is handled in place; formatting breaks on '%}'.
}
@register.tag
def parse_if(parser, token):
bits = token.split_contents()
tag_name = bits[0]
end_tag = 'end%s' % tag_name
condition = TemplateIfParser(parser, bits[1:]).parse()
text = []
while parser.tokens:
token = parser.next_token()
if (token.token_type == template.TOKEN_BLOCK) and (token.contents == end_tag):
return ParseIfNode(condition, u''.join(text))
if token.token_type == template.TOKEN_BLOCK:
text.append(u'%s%s%s' % (template.BLOCK_TAG_START, token.contents,
template.BLOCK_TAG_END))
else:
text.append(token_formats[token.token_type] % token.contents)
parser.unclosed_block_tag(end_tag)
class ParseIfNode(template.Node):
def __init__(self, condition, text):
self.condition = condition
self.text = text
def render(self, context):
try:
match = self.condition.eval(context)
except template.VariableDoesNotExist:
match = None
if match:
return self._render(context)
return ''
def _render(self, context):
# Instantiating a Template object uses a new Parser instance, so
# none of the loaded libraries carry over. Each new Parser instance
# imports builtin libraries by default. Since we can't access the
# Parser instance created to parse the template, we temporarily add
# a library to builtins that contains all of the libraries that are
# currently loaded. A better way to do this would be a mechanism
# to override the Parser instance (or compile_string() function)
# used in the new Template.
temp_lib = self._aggregate_libraries(template.libraries.values())
builtins = template.builtins
builtins.append(temp_lib)
cursor = builtins.index(temp_lib)
try:
return template.Template(self.text).render(context)
finally:
# Remove our temporary library from builtins, so that it
# doesn't pollute other parsers.
del builtins[cursor]
def _aggregate_libraries(self, libraries):
temp_lib = template.Library()
for library in libraries:
temp_lib.tags.update(library.tags)
temp_lib.filters.update(library.filters)
return temp_lib
|
<commit_before><commit_msg>Add a template tag to conditionally *parse* the tag's nested content.<commit_after># Loosely based on noparse.py from https://code.djangoproject.com/ticket/14502
from django import template
from django.template.defaulttags import TemplateIfParser
register = template.Library()
token_formats = {
template.TOKEN_TEXT: u'%s',
template.TOKEN_VAR: u'%s%%s%s' % (template.VARIABLE_TAG_START, template.VARIABLE_TAG_END),
template.TOKEN_COMMENT: u'%s%%s%s' % (template.COMMENT_TAG_START, template.COMMENT_TAG_END),
# template.TOKEN_BLOCK is handled in place; formatting breaks on '%}'.
}
@register.tag
def parse_if(parser, token):
bits = token.split_contents()
tag_name = bits[0]
end_tag = 'end%s' % tag_name
condition = TemplateIfParser(parser, bits[1:]).parse()
text = []
while parser.tokens:
token = parser.next_token()
if (token.token_type == template.TOKEN_BLOCK) and (token.contents == end_tag):
return ParseIfNode(condition, u''.join(text))
if token.token_type == template.TOKEN_BLOCK:
text.append(u'%s%s%s' % (template.BLOCK_TAG_START, token.contents,
template.BLOCK_TAG_END))
else:
text.append(token_formats[token.token_type] % token.contents)
parser.unclosed_block_tag(end_tag)
class ParseIfNode(template.Node):
def __init__(self, condition, text):
self.condition = condition
self.text = text
def render(self, context):
try:
match = self.condition.eval(context)
except template.VariableDoesNotExist:
match = None
if match:
return self._render(context)
return ''
def _render(self, context):
# Instantiating a Template object uses a new Parser instance, so
# none of the loaded libraries carry over. Each new Parser instance
# imports builtin libraries by default. Since we can't access the
# Parser instance created to parse the template, we temporarily add
# a library to builtins that contains all of the libraries that are
# currently loaded. A better way to do this would be a mechanism
# to override the Parser instance (or compile_string() function)
# used in the new Template.
temp_lib = self._aggregate_libraries(template.libraries.values())
builtins = template.builtins
builtins.append(temp_lib)
cursor = builtins.index(temp_lib)
try:
return template.Template(self.text).render(context)
finally:
# Remove our temporary library from builtins, so that it
# doesn't pollute other parsers.
del builtins[cursor]
def _aggregate_libraries(self, libraries):
temp_lib = template.Library()
for library in libraries:
temp_lib.tags.update(library.tags)
temp_lib.filters.update(library.filters)
return temp_lib
|
|
882944fb5c3afcd9eed086d996998f9921df6198
|
dropbox_login.py
|
dropbox_login.py
|
#!/usr/bin/env python
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
import webbrowser
from dropbox import client, rest, session
import pickle
import yaml
def load_config(config_name):
with open(config_name) as f:
return yaml.load(f)
def save_config(config_name, config):
with open(config_name, 'w') as f:
f.write(yaml.dump(config, default_flow_style=True))
def _get_request_token(app_key, app_secret, access_type):
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Press enter after allowing application..."
raw_input()
return request_token
def _get_access_token(app_key, app_secret, access_type):
request_token = _get_request_token(app_key, app_secret, access_type)
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
return (access_token.key, access_token.secret)
def get_client():
config_name = "dropbox_config.yaml"
config = load_config(config_name)
app_key = config['app_key']
app_secret = config['app_secret']
access_type = config['access_type']
key = config.get('access_key', None)
secret = config.get('access_secret', None)
if key is None or secret is None:
key, secret = _get_access_token(app_key, app_secret, access_type)
# Store access tokens for later
config['access_key'] = key
config['access_secret'] = secret
save_config(config_name, config)
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(key, secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
print 'Getting account info...'
dropbox_client = get_client()
account_info = dropbox_client.account_info()
print 'linked account:', account_info
if __name__ == '__main__':
main()
|
Add script to use Dropbox API
|
Add script to use Dropbox API
|
Python
|
mit
|
philipbl/instagram2dayone
|
Add script to use Dropbox API
|
#!/usr/bin/env python
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
import webbrowser
from dropbox import client, rest, session
import pickle
import yaml
def load_config(config_name):
with open(config_name) as f:
return yaml.load(f)
def save_config(config_name, config):
with open(config_name, 'w') as f:
f.write(yaml.dump(config, default_flow_style=True))
def _get_request_token(app_key, app_secret, access_type):
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Press enter after allowing application..."
raw_input()
return request_token
def _get_access_token(app_key, app_secret, access_type):
request_token = _get_request_token(app_key, app_secret, access_type)
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
return (access_token.key, access_token.secret)
def get_client():
config_name = "dropbox_config.yaml"
config = load_config(config_name)
app_key = config['app_key']
app_secret = config['app_secret']
access_type = config['access_type']
key = config.get('access_key', None)
secret = config.get('access_secret', None)
if key is None or secret is None:
key, secret = _get_access_token(app_key, app_secret, access_type)
# Store access tokens for later
config['access_key'] = key
config['access_secret'] = secret
save_config(config_name, config)
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(key, secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
print 'Getting account info...'
dropbox_client = get_client()
account_info = dropbox_client.account_info()
print 'linked account:', account_info
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to use Dropbox API<commit_after>
|
#!/usr/bin/env python
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
import webbrowser
from dropbox import client, rest, session
import pickle
import yaml
def load_config(config_name):
with open(config_name) as f:
return yaml.load(f)
def save_config(config_name, config):
with open(config_name, 'w') as f:
f.write(yaml.dump(config, default_flow_style=True))
def _get_request_token(app_key, app_secret, access_type):
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Press enter after allowing application..."
raw_input()
return request_token
def _get_access_token(app_key, app_secret, access_type):
request_token = _get_request_token(app_key, app_secret, access_type)
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
return (access_token.key, access_token.secret)
def get_client():
config_name = "dropbox_config.yaml"
config = load_config(config_name)
app_key = config['app_key']
app_secret = config['app_secret']
access_type = config['access_type']
key = config.get('access_key', None)
secret = config.get('access_secret', None)
if key is None or secret is None:
key, secret = _get_access_token(app_key, app_secret, access_type)
# Store access tokens for later
config['access_key'] = key
config['access_secret'] = secret
save_config(config_name, config)
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(key, secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
print 'Getting account info...'
dropbox_client = get_client()
account_info = dropbox_client.account_info()
print 'linked account:', account_info
if __name__ == '__main__':
main()
|
Add script to use Dropbox API#!/usr/bin/env python
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
import webbrowser
from dropbox import client, rest, session
import pickle
import yaml
def load_config(config_name):
with open(config_name) as f:
return yaml.load(f)
def save_config(config_name, config):
with open(config_name, 'w') as f:
f.write(yaml.dump(config, default_flow_style=True))
def _get_request_token(app_key, app_secret, access_type):
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Press enter after allowing application..."
raw_input()
return request_token
def _get_access_token(app_key, app_secret, access_type):
request_token = _get_request_token(app_key, app_secret, access_type)
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
return (access_token.key, access_token.secret)
def get_client():
config_name = "dropbox_config.yaml"
config = load_config(config_name)
app_key = config['app_key']
app_secret = config['app_secret']
access_type = config['access_type']
key = config.get('access_key', None)
secret = config.get('access_secret', None)
if key is None or secret is None:
key, secret = _get_access_token(app_key, app_secret, access_type)
# Store access tokens for later
config['access_key'] = key
config['access_secret'] = secret
save_config(config_name, config)
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(key, secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
print 'Getting account info...'
dropbox_client = get_client()
account_info = dropbox_client.account_info()
print 'linked account:', account_info
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to use Dropbox API<commit_after>#!/usr/bin/env python
# YOU NEED TO INSERT YOUR APP KEY AND SECRET BELOW!
# Go to dropbox.com/developers/apps to create an app.
import webbrowser
from dropbox import client, rest, session
import pickle
import yaml
def load_config(config_name):
with open(config_name) as f:
return yaml.load(f)
def save_config(config_name, config):
with open(config_name, 'w') as f:
f.write(yaml.dump(config, default_flow_style=True))
def _get_request_token(app_key, app_secret, access_type):
sess = session.DropboxSession(app_key, app_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Press enter after allowing application..."
raw_input()
return request_token
def _get_access_token(app_key, app_secret, access_type):
request_token = _get_request_token(app_key, app_secret, access_type)
sess = session.DropboxSession(app_key, app_secret, access_type)
access_token = sess.obtain_access_token(request_token)
return (access_token.key, access_token.secret)
def get_client():
config_name = "dropbox_config.yaml"
config = load_config(config_name)
app_key = config['app_key']
app_secret = config['app_secret']
access_type = config['access_type']
key = config.get('access_key', None)
secret = config.get('access_secret', None)
if key is None or secret is None:
key, secret = _get_access_token(app_key, app_secret, access_type)
# Store access tokens for later
config['access_key'] = key
config['access_secret'] = secret
save_config(config_name, config)
sess = session.DropboxSession(app_key, app_secret, access_type)
sess.set_token(key, secret)
dropbox_client = client.DropboxClient(sess)
return dropbox_client
def main():
# Demo if started run as a script...
# Just print the account info to verify that the authentication worked:
print 'Getting account info...'
dropbox_client = get_client()
account_info = dropbox_client.account_info()
print 'linked account:', account_info
if __name__ == '__main__':
main()
|
|
7036bfe5740ff1a3027485dab60615c2596bce11
|
hackerrank/pdfviewer/solution.py
|
hackerrank/pdfviewer/solution.py
|
"""
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
Solve by exploiting ascii values
|
Solve by exploiting ascii values
|
Python
|
mit
|
lemming52/white_pawn,lemming52/white_pawn
|
Solve by exploiting ascii values
|
"""
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
<commit_before><commit_msg>Solve by exploiting ascii values<commit_after>
|
"""
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
Solve by exploiting ascii values"""
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
<commit_before><commit_msg>Solve by exploiting ascii values<commit_after>"""
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
|
474d3379d6cc04c1f4bac441c83399860333de3d
|
excel_convert.py
|
excel_convert.py
|
import csv
import xlwt
import os
import sys
import subprocess
def convert(filename):
# Look for input file in same location as script file:
inputfilename = os.path.join(os.path.dirname(sys.argv[0]), filename)
# Strip off the path
basefilename = os.path.basename(inputfilename)
# Strip off the extension
basefilename_noext = os.path.splitext(basefilename)[0]
# Get the path of the input file as the target output path
targetoutputpath = os.path.dirname(inputfilename)
# Generate the output filename
outputfilename = os.path.join(targetoutputpath, basefilename_noext+'.xls')
# Create a workbook object
workbook = xlwt.Workbook()
# Add a sheet object
worksheet = workbook.add_sheet(basefilename_noext, cell_overwrite_ok=True)
# Get a CSV reader object set up for reading the input file with tab delimiters
datareader = csv.reader(open(inputfilename, 'r'),
delimiter='\t', quotechar='-')
# Process the file and output to Excel sheet
for rowno, row in enumerate(datareader):
for colno, colitem in enumerate(row):
worksheet.write(rowno, colno, colitem)
# Write the output file.
workbook.save(outputfilename)
# Open it via the operating system (will only work on Windows)
# On Linux/Unix you would use subprocess.Popen(['xdg-open', filename])
#os.startfile(outputfilename)
subprocess.Popen(['xdg-open', outputfilename])
my_files = os.listdir()
for file in my_files:
if file.endswith("txt"):
convert(file)
|
Add excel converter which converts tab spaced text file data to excel file
|
Add excel converter which converts tab spaced text file data to excel file
|
Python
|
mit
|
prashanth-nani/sentiment-analysis,prashanth-nani/snapdeal-review-grabber
|
Add excel converter which converts tab spaced text file data to excel file
|
import csv
import xlwt
import os
import sys
import subprocess
def convert(filename):
# Look for input file in same location as script file:
inputfilename = os.path.join(os.path.dirname(sys.argv[0]), filename)
# Strip off the path
basefilename = os.path.basename(inputfilename)
# Strip off the extension
basefilename_noext = os.path.splitext(basefilename)[0]
# Get the path of the input file as the target output path
targetoutputpath = os.path.dirname(inputfilename)
# Generate the output filename
outputfilename = os.path.join(targetoutputpath, basefilename_noext+'.xls')
# Create a workbook object
workbook = xlwt.Workbook()
# Add a sheet object
worksheet = workbook.add_sheet(basefilename_noext, cell_overwrite_ok=True)
# Get a CSV reader object set up for reading the input file with tab delimiters
datareader = csv.reader(open(inputfilename, 'r'),
delimiter='\t', quotechar='-')
# Process the file and output to Excel sheet
for rowno, row in enumerate(datareader):
for colno, colitem in enumerate(row):
worksheet.write(rowno, colno, colitem)
# Write the output file.
workbook.save(outputfilename)
# Open it via the operating system (will only work on Windows)
# On Linux/Unix you would use subprocess.Popen(['xdg-open', filename])
#os.startfile(outputfilename)
subprocess.Popen(['xdg-open', outputfilename])
my_files = os.listdir()
for file in my_files:
if file.endswith("txt"):
convert(file)
|
<commit_before><commit_msg>Add excel converter which converts tab spaced text file data to excel file<commit_after>
|
import csv
import xlwt
import os
import sys
import subprocess
def convert(filename):
# Look for input file in same location as script file:
inputfilename = os.path.join(os.path.dirname(sys.argv[0]), filename)
# Strip off the path
basefilename = os.path.basename(inputfilename)
# Strip off the extension
basefilename_noext = os.path.splitext(basefilename)[0]
# Get the path of the input file as the target output path
targetoutputpath = os.path.dirname(inputfilename)
# Generate the output filename
outputfilename = os.path.join(targetoutputpath, basefilename_noext+'.xls')
# Create a workbook object
workbook = xlwt.Workbook()
# Add a sheet object
worksheet = workbook.add_sheet(basefilename_noext, cell_overwrite_ok=True)
# Get a CSV reader object set up for reading the input file with tab delimiters
datareader = csv.reader(open(inputfilename, 'r'),
delimiter='\t', quotechar='-')
# Process the file and output to Excel sheet
for rowno, row in enumerate(datareader):
for colno, colitem in enumerate(row):
worksheet.write(rowno, colno, colitem)
# Write the output file.
workbook.save(outputfilename)
# Open it via the operating system (will only work on Windows)
# On Linux/Unix you would use subprocess.Popen(['xdg-open', filename])
#os.startfile(outputfilename)
subprocess.Popen(['xdg-open', outputfilename])
my_files = os.listdir()
for file in my_files:
if file.endswith("txt"):
convert(file)
|
Add excel converter which converts tab spaced text file data to excel fileimport csv
import xlwt
import os
import sys
import subprocess
def convert(filename):
# Look for input file in same location as script file:
inputfilename = os.path.join(os.path.dirname(sys.argv[0]), filename)
# Strip off the path
basefilename = os.path.basename(inputfilename)
# Strip off the extension
basefilename_noext = os.path.splitext(basefilename)[0]
# Get the path of the input file as the target output path
targetoutputpath = os.path.dirname(inputfilename)
# Generate the output filename
outputfilename = os.path.join(targetoutputpath, basefilename_noext+'.xls')
# Create a workbook object
workbook = xlwt.Workbook()
# Add a sheet object
worksheet = workbook.add_sheet(basefilename_noext, cell_overwrite_ok=True)
# Get a CSV reader object set up for reading the input file with tab delimiters
datareader = csv.reader(open(inputfilename, 'r'),
delimiter='\t', quotechar='-')
# Process the file and output to Excel sheet
for rowno, row in enumerate(datareader):
for colno, colitem in enumerate(row):
worksheet.write(rowno, colno, colitem)
# Write the output file.
workbook.save(outputfilename)
# Open it via the operating system (will only work on Windows)
# On Linux/Unix you would use subprocess.Popen(['xdg-open', filename])
#os.startfile(outputfilename)
subprocess.Popen(['xdg-open', outputfilename])
my_files = os.listdir()
for file in my_files:
if file.endswith("txt"):
convert(file)
|
<commit_before><commit_msg>Add excel converter which converts tab spaced text file data to excel file<commit_after>import csv
import xlwt
import os
import sys
import subprocess
def convert(filename):
# Look for input file in same location as script file:
inputfilename = os.path.join(os.path.dirname(sys.argv[0]), filename)
# Strip off the path
basefilename = os.path.basename(inputfilename)
# Strip off the extension
basefilename_noext = os.path.splitext(basefilename)[0]
# Get the path of the input file as the target output path
targetoutputpath = os.path.dirname(inputfilename)
# Generate the output filename
outputfilename = os.path.join(targetoutputpath, basefilename_noext+'.xls')
# Create a workbook object
workbook = xlwt.Workbook()
# Add a sheet object
worksheet = workbook.add_sheet(basefilename_noext, cell_overwrite_ok=True)
# Get a CSV reader object set up for reading the input file with tab delimiters
datareader = csv.reader(open(inputfilename, 'r'),
delimiter='\t', quotechar='-')
# Process the file and output to Excel sheet
for rowno, row in enumerate(datareader):
for colno, colitem in enumerate(row):
worksheet.write(rowno, colno, colitem)
# Write the output file.
workbook.save(outputfilename)
# Open it via the operating system (will only work on Windows)
# On Linux/Unix you would use subprocess.Popen(['xdg-open', filename])
#os.startfile(outputfilename)
subprocess.Popen(['xdg-open', outputfilename])
my_files = os.listdir()
for file in my_files:
if file.endswith("txt"):
convert(file)
|
|
02fad660afbb6b5ca1fc4f1c3a1fcf3c95f9fd0d
|
pypeerassets/providers/node.py
|
pypeerassets/providers/node.py
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
my_addresses = [i["address"] for i in cls.listreceivedbyaddress()]
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
for v in cls.getrawtransaction(tx["txid"])["vout"]:
if v["scriptPubKey"]["addresses"][0] in my_addresses:
utxo.append({
"txid": tx["txid"],
"vout": v["n"],
"ScriptSig": v["scriptPubKey"]["hex"]
})
utxo_sum += float(v["value"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
utxo.append({
"txid": tx["txid"],
"vout": tx["vout"],
"ScriptSig": tx["scriptPubKey"]
})
utxo_sum += float(tx["amount"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
Refactor unspent utxo data gathering
|
Refactor unspent utxo data gathering
|
Python
|
bsd-3-clause
|
PeerAssets/pypeerassets,backpacker69/pypeerassets
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
my_addresses = [i["address"] for i in cls.listreceivedbyaddress()]
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
for v in cls.getrawtransaction(tx["txid"])["vout"]:
if v["scriptPubKey"]["addresses"][0] in my_addresses:
utxo.append({
"txid": tx["txid"],
"vout": v["n"],
"ScriptSig": v["scriptPubKey"]["hex"]
})
utxo_sum += float(v["value"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
Refactor unspent utxo data gathering
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
utxo.append({
"txid": tx["txid"],
"vout": tx["vout"],
"ScriptSig": tx["scriptPubKey"]
})
utxo_sum += float(tx["amount"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
<commit_before>
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
my_addresses = [i["address"] for i in cls.listreceivedbyaddress()]
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
for v in cls.getrawtransaction(tx["txid"])["vout"]:
if v["scriptPubKey"]["addresses"][0] in my_addresses:
utxo.append({
"txid": tx["txid"],
"vout": v["n"],
"ScriptSig": v["scriptPubKey"]["hex"]
})
utxo_sum += float(v["value"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
<commit_msg>Refactor unspent utxo data gathering<commit_after>
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
utxo.append({
"txid": tx["txid"],
"vout": tx["vout"],
"ScriptSig": tx["scriptPubKey"]
})
utxo_sum += float(tx["amount"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
my_addresses = [i["address"] for i in cls.listreceivedbyaddress()]
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
for v in cls.getrawtransaction(tx["txid"])["vout"]:
if v["scriptPubKey"]["addresses"][0] in my_addresses:
utxo.append({
"txid": tx["txid"],
"vout": v["n"],
"ScriptSig": v["scriptPubKey"]["hex"]
})
utxo_sum += float(v["value"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
Refactor unspent utxo data gathering
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
utxo.append({
"txid": tx["txid"],
"vout": tx["vout"],
"ScriptSig": tx["scriptPubKey"]
})
utxo_sum += float(tx["amount"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
<commit_before>
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
my_addresses = [i["address"] for i in cls.listreceivedbyaddress()]
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
for v in cls.getrawtransaction(tx["txid"])["vout"]:
if v["scriptPubKey"]["addresses"][0] in my_addresses:
utxo.append({
"txid": tx["txid"],
"vout": v["n"],
"ScriptSig": v["scriptPubKey"]["hex"]
})
utxo_sum += float(v["value"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
<commit_msg>Refactor unspent utxo data gathering<commit_after>
'''Communicate with local or remote peercoin-daemon via JSON-RPC'''
from operator import itemgetter
try:
from peercoin_rpc import Client
except:
raise EnvironmentError("peercoin_rpc library is required for this to work,\
use pip to install it.")
def select_inputs(cls, total_amount):
'''finds apropriate utxo's to include in rawtx, while being careful
to never spend old transactions with a lot of coin age.
Argument is intiger, returns list of apropriate UTXO's'''
utxo = []
utxo_sum = float(-0.01) ## starts from negative due to minimal fee
for tx in sorted(cls.listunspent(), key=itemgetter('confirmations')):
utxo.append({
"txid": tx["txid"],
"vout": tx["vout"],
"ScriptSig": tx["scriptPubKey"]
})
utxo_sum += float(tx["amount"])
if utxo_sum >= total_amount:
return utxo
if utxo_sum < total_amount:
raise ValueError("Not enough funds.")
Client.select_inputs = select_inputs
|
5ad7e4f7b1be1203b63ff4f530d57d0bd3e092c4
|
add_filename_suffix.py
|
add_filename_suffix.py
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shutil
import sys
if len(sys.argv) < 3:
print u'add_file_suffix.py suffix file...'
sys.exit(1)
added_suffix = sys.argv[1]
for file_path in sys.argv[2:]:
new_file_path = file_path + added_suffix
shutil.move(file_path, new_file_path)
|
Add the script on Python 2.x.
|
Add the script on Python 2.x.
|
Python
|
mit
|
mikanbako/add-filename-suffix
|
Add the script on Python 2.x.
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shutil
import sys
if len(sys.argv) < 3:
print u'add_file_suffix.py suffix file...'
sys.exit(1)
added_suffix = sys.argv[1]
for file_path in sys.argv[2:]:
new_file_path = file_path + added_suffix
shutil.move(file_path, new_file_path)
|
<commit_before><commit_msg>Add the script on Python 2.x.<commit_after>
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shutil
import sys
if len(sys.argv) < 3:
print u'add_file_suffix.py suffix file...'
sys.exit(1)
added_suffix = sys.argv[1]
for file_path in sys.argv[2:]:
new_file_path = file_path + added_suffix
shutil.move(file_path, new_file_path)
|
Add the script on Python 2.x.#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shutil
import sys
if len(sys.argv) < 3:
print u'add_file_suffix.py suffix file...'
sys.exit(1)
added_suffix = sys.argv[1]
for file_path in sys.argv[2:]:
new_file_path = file_path + added_suffix
shutil.move(file_path, new_file_path)
|
<commit_before><commit_msg>Add the script on Python 2.x.<commit_after>#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2014 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shutil
import sys
if len(sys.argv) < 3:
print u'add_file_suffix.py suffix file...'
sys.exit(1)
added_suffix = sys.argv[1]
for file_path in sys.argv[2:]:
new_file_path = file_path + added_suffix
shutil.move(file_path, new_file_path)
|
|
a78f918849e35dca110eec38741001ab11279c65
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandAddUpdatePeople.py
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandAddUpdatePeople.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddUpdatePeople(EventState):
'''
Add or update all known persons in wonderland.
<= done return when the add correctly append
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddUpdatePeople, self).__init__(outcomes=['done'])
self.url = "http://wonderland:8000/api/people/"
def execute(self, userdata):
# Generate URL to contact
s = 0
def addPerson(self, entity):
if entity.face.id is None:
Logger.logwarn('Need face ID !')
return 'bad_request'
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is None:
data.update({'peopleIsOperator': False})
else:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(self.url, data=data)
if response.status_code == 201:
return 'done'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][0] \
== u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
Add a state for update entire list of persons.
|
Add a state for update entire list of persons.
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a state for update entire list of persons.
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddUpdatePeople(EventState):
'''
Add or update all known persons in wonderland.
<= done return when the add correctly append
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddUpdatePeople, self).__init__(outcomes=['done'])
self.url = "http://wonderland:8000/api/people/"
def execute(self, userdata):
# Generate URL to contact
s = 0
def addPerson(self, entity):
if entity.face.id is None:
Logger.logwarn('Need face ID !')
return 'bad_request'
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is None:
data.update({'peopleIsOperator': False})
else:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(self.url, data=data)
if response.status_code == 201:
return 'done'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][0] \
== u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
<commit_before><commit_msg>Add a state for update entire list of persons.<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddUpdatePeople(EventState):
'''
Add or update all known persons in wonderland.
<= done return when the add correctly append
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddUpdatePeople, self).__init__(outcomes=['done'])
self.url = "http://wonderland:8000/api/people/"
def execute(self, userdata):
# Generate URL to contact
s = 0
def addPerson(self, entity):
if entity.face.id is None:
Logger.logwarn('Need face ID !')
return 'bad_request'
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is None:
data.update({'peopleIsOperator': False})
else:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(self.url, data=data)
if response.status_code == 201:
return 'done'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][0] \
== u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
Add a state for update entire list of persons.#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddUpdatePeople(EventState):
'''
Add or update all known persons in wonderland.
<= done return when the add correctly append
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddUpdatePeople, self).__init__(outcomes=['done'])
self.url = "http://wonderland:8000/api/people/"
def execute(self, userdata):
# Generate URL to contact
s = 0
def addPerson(self, entity):
if entity.face.id is None:
Logger.logwarn('Need face ID !')
return 'bad_request'
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is None:
data.update({'peopleIsOperator': False})
else:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(self.url, data=data)
if response.status_code == 201:
return 'done'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][0] \
== u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
<commit_before><commit_msg>Add a state for update entire list of persons.<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddUpdatePeople(EventState):
'''
Add or update all known persons in wonderland.
<= done return when the add correctly append
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddUpdatePeople, self).__init__(outcomes=['done'])
self.url = "http://wonderland:8000/api/people/"
def execute(self, userdata):
# Generate URL to contact
s = 0
def addPerson(self, entity):
if entity.face.id is None:
Logger.logwarn('Need face ID !')
return 'bad_request'
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.poseProbability})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is None:
data.update({'peopleIsOperator': False})
else:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(self.url, data=data)
if response.status_code == 201:
return 'done'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][0] \
== u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
|
c7c7c3aaf466af37fb8e632b959b08ce34d143dc
|
calico/felix/test/test_endpoint.py
|
calico/felix/test/test_endpoint.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Test the endpoint handling code.
"""
import mock
import sys
import unittest
import uuid
import calico.felix.devices as devices
# Stub out the iptables code.
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
import calico.felix.endpoint as endpoint
class TestEndpoint(unittest.TestCase):
def test_program_bails_early(self):
"""
Test that programming an endpoint fails early if the endpoint is down.
"""
devices.interface_up = mock.MagicMock()
devices.interface_up.return_value = False
ep = endpoint.Endpoint(str(uuid.uuid4()), 'aa:bb:cc:dd:ee:ff')
retval = ep.program_endpoint()
self.assertFalse(retval)
|
Add initial tests for endpoint code.
|
Add initial tests for endpoint code.
|
Python
|
apache-2.0
|
ocadotechnology/calico,alexhersh/calico,nbartos/calico,neiljerram/felix,beddari/calico,alexaltair/calico,alexhersh/calico,matthewdupre/felix,TrimBiggs/calico,kasisnu/calico,alexaltair/calico,neiljerram/felix,neiljerram/felix,beddari/calico,anortef/calico,ocadotechnology/calico,fasaxc/felix,matthewdupre/felix,Metaswitch/calico,neiljerram/felix,anortef/calico,Metaswitch/calico,fasaxc/felix,nbartos/calico,kasisnu/calico,TrimBiggs/calico
|
Add initial tests for endpoint code.
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Test the endpoint handling code.
"""
import mock
import sys
import unittest
import uuid
import calico.felix.devices as devices
# Stub out the iptables code.
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
import calico.felix.endpoint as endpoint
class TestEndpoint(unittest.TestCase):
def test_program_bails_early(self):
"""
Test that programming an endpoint fails early if the endpoint is down.
"""
devices.interface_up = mock.MagicMock()
devices.interface_up.return_value = False
ep = endpoint.Endpoint(str(uuid.uuid4()), 'aa:bb:cc:dd:ee:ff')
retval = ep.program_endpoint()
self.assertFalse(retval)
|
<commit_before><commit_msg>Add initial tests for endpoint code.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Test the endpoint handling code.
"""
import mock
import sys
import unittest
import uuid
import calico.felix.devices as devices
# Stub out the iptables code.
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
import calico.felix.endpoint as endpoint
class TestEndpoint(unittest.TestCase):
def test_program_bails_early(self):
"""
Test that programming an endpoint fails early if the endpoint is down.
"""
devices.interface_up = mock.MagicMock()
devices.interface_up.return_value = False
ep = endpoint.Endpoint(str(uuid.uuid4()), 'aa:bb:cc:dd:ee:ff')
retval = ep.program_endpoint()
self.assertFalse(retval)
|
Add initial tests for endpoint code.# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Test the endpoint handling code.
"""
import mock
import sys
import unittest
import uuid
import calico.felix.devices as devices
# Stub out the iptables code.
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
import calico.felix.endpoint as endpoint
class TestEndpoint(unittest.TestCase):
def test_program_bails_early(self):
"""
Test that programming an endpoint fails early if the endpoint is down.
"""
devices.interface_up = mock.MagicMock()
devices.interface_up.return_value = False
ep = endpoint.Endpoint(str(uuid.uuid4()), 'aa:bb:cc:dd:ee:ff')
retval = ep.program_endpoint()
self.assertFalse(retval)
|
<commit_before><commit_msg>Add initial tests for endpoint code.<commit_after># -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_endpoint
~~~~~~~~~~~~~~~~~~~~~~~~
Test the endpoint handling code.
"""
import mock
import sys
import unittest
import uuid
import calico.felix.devices as devices
# Stub out the iptables code.
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
import calico.felix.endpoint as endpoint
class TestEndpoint(unittest.TestCase):
def test_program_bails_early(self):
"""
Test that programming an endpoint fails early if the endpoint is down.
"""
devices.interface_up = mock.MagicMock()
devices.interface_up.return_value = False
ep = endpoint.Endpoint(str(uuid.uuid4()), 'aa:bb:cc:dd:ee:ff')
retval = ep.program_endpoint()
self.assertFalse(retval)
|
|
942315bb5baa45df4dfde9b04b99685a6be6f574
|
diagnose_error.py
|
diagnose_error.py
|
from __future__ import with_statement
import os, sys, tempfile, subprocess, re
__all__ = ["has_error", "get_error_line_number", "make_reg_string", "get_coq_output"]
DEFAULT_ERROR_REG_STRING = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n([^\n]+)'
DEFAULT_ERROR_REG_STRING_GENERIC = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n(%s)'
def has_error(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns True if the coq output encoded in output has an error
matching the given regular expression, False otherwise.
"""
errors = re.search(reg_string, output)
if errors:
return True
else:
return False
def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the line number that the error matching reg_string
occured on.
Precondition: has_error(output, reg_string)
"""
return int(re.search(reg_string, output).groups()[1])
def get_error_string(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the error string of the error matching reg_string.
Precondition: has_error(output, reg_string)
"""
return re.search(reg_string, output).groups()[0]
def make_reg_string(output):
"""Returns a regular expression for matching the particular error
in output.
Precondition: has_error(output)
"""
return DEFAULT_ERROR_REG_STRING_GENERIC % get_error_string(output)
def get_coq_output(contents):
"""Returns the coqc output of running through the given
contents."""
with tempfile.NamedTemporaryFile(suffix='.v', delete=False) as f:
f.write(contents)
file_name = f.name
p = subprocess.Popen(['coqc', '-q', file_name], stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr
|
Add a file to extract error strings
|
Add a file to extract error strings
|
Python
|
mit
|
JasonGross/coq-tools,JasonGross/coq-tools
|
Add a file to extract error strings
|
from __future__ import with_statement
import os, sys, tempfile, subprocess, re
__all__ = ["has_error", "get_error_line_number", "make_reg_string", "get_coq_output"]
DEFAULT_ERROR_REG_STRING = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n([^\n]+)'
DEFAULT_ERROR_REG_STRING_GENERIC = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n(%s)'
def has_error(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns True if the coq output encoded in output has an error
matching the given regular expression, False otherwise.
"""
errors = re.search(reg_string, output)
if errors:
return True
else:
return False
def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the line number that the error matching reg_string
occured on.
Precondition: has_error(output, reg_string)
"""
return int(re.search(reg_string, output).groups()[1])
def get_error_string(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the error string of the error matching reg_string.
Precondition: has_error(output, reg_string)
"""
return re.search(reg_string, output).groups()[0]
def make_reg_string(output):
"""Returns a regular expression for matching the particular error
in output.
Precondition: has_error(output)
"""
return DEFAULT_ERROR_REG_STRING_GENERIC % get_error_string(output)
def get_coq_output(contents):
"""Returns the coqc output of running through the given
contents."""
with tempfile.NamedTemporaryFile(suffix='.v', delete=False) as f:
f.write(contents)
file_name = f.name
p = subprocess.Popen(['coqc', '-q', file_name], stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr
|
<commit_before><commit_msg>Add a file to extract error strings<commit_after>
|
from __future__ import with_statement
import os, sys, tempfile, subprocess, re
__all__ = ["has_error", "get_error_line_number", "make_reg_string", "get_coq_output"]
DEFAULT_ERROR_REG_STRING = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n([^\n]+)'
DEFAULT_ERROR_REG_STRING_GENERIC = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n(%s)'
def has_error(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns True if the coq output encoded in output has an error
matching the given regular expression, False otherwise.
"""
errors = re.search(reg_string, output)
if errors:
return True
else:
return False
def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the line number that the error matching reg_string
occured on.
Precondition: has_error(output, reg_string)
"""
return int(re.search(reg_string, output).groups()[1])
def get_error_string(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the error string of the error matching reg_string.
Precondition: has_error(output, reg_string)
"""
return re.search(reg_string, output).groups()[0]
def make_reg_string(output):
"""Returns a regular expression for matching the particular error
in output.
Precondition: has_error(output)
"""
return DEFAULT_ERROR_REG_STRING_GENERIC % get_error_string(output)
def get_coq_output(contents):
"""Returns the coqc output of running through the given
contents."""
with tempfile.NamedTemporaryFile(suffix='.v', delete=False) as f:
f.write(contents)
file_name = f.name
p = subprocess.Popen(['coqc', '-q', file_name], stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr
|
Add a file to extract error stringsfrom __future__ import with_statement
import os, sys, tempfile, subprocess, re
__all__ = ["has_error", "get_error_line_number", "make_reg_string", "get_coq_output"]
DEFAULT_ERROR_REG_STRING = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n([^\n]+)'
DEFAULT_ERROR_REG_STRING_GENERIC = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n(%s)'
def has_error(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns True if the coq output encoded in output has an error
matching the given regular expression, False otherwise.
"""
errors = re.search(reg_string, output)
if errors:
return True
else:
return False
def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the line number that the error matching reg_string
occured on.
Precondition: has_error(output, reg_string)
"""
return int(re.search(reg_string, output).groups()[1])
def get_error_string(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the error string of the error matching reg_string.
Precondition: has_error(output, reg_string)
"""
return re.search(reg_string, output).groups()[0]
def make_reg_string(output):
"""Returns a regular expression for matching the particular error
in output.
Precondition: has_error(output)
"""
return DEFAULT_ERROR_REG_STRING_GENERIC % get_error_string(output)
def get_coq_output(contents):
"""Returns the coqc output of running through the given
contents."""
with tempfile.NamedTemporaryFile(suffix='.v', delete=False) as f:
f.write(contents)
file_name = f.name
p = subprocess.Popen(['coqc', '-q', file_name], stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr
|
<commit_before><commit_msg>Add a file to extract error strings<commit_after>from __future__ import with_statement
import os, sys, tempfile, subprocess, re
__all__ = ["has_error", "get_error_line_number", "make_reg_string", "get_coq_output"]
DEFAULT_ERROR_REG_STRING = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n([^\n]+)'
DEFAULT_ERROR_REG_STRING_GENERIC = 'File "[^"]+", line ([0-9]+), characters [0-9-]+:\n(%s)'
def has_error(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns True if the coq output encoded in output has an error
matching the given regular expression, False otherwise.
"""
errors = re.search(reg_string, output)
if errors:
return True
else:
return False
def get_error_line_number(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the line number that the error matching reg_string
occured on.
Precondition: has_error(output, reg_string)
"""
return int(re.search(reg_string, output).groups()[1])
def get_error_string(output, reg_string=DEFAULT_ERROR_REG_STRING):
"""Returns the error string of the error matching reg_string.
Precondition: has_error(output, reg_string)
"""
return re.search(reg_string, output).groups()[0]
def make_reg_string(output):
"""Returns a regular expression for matching the particular error
in output.
Precondition: has_error(output)
"""
return DEFAULT_ERROR_REG_STRING_GENERIC % get_error_string(output)
def get_coq_output(contents):
"""Returns the coqc output of running through the given
contents."""
with tempfile.NamedTemporaryFile(suffix='.v', delete=False) as f:
f.write(contents)
file_name = f.name
p = subprocess.Popen(['coqc', '-q', file_name], stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return stderr
|
|
df07313b0fdb7ca5caba3aefcbf8eb3a3f7f3191
|
pythran/tests/cases/nd_local_maxima.py
|
pythran/tests/cases/nd_local_maxima.py
|
#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#pythran export local_maxima(float [][][][])
#runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x)
import numpy as np
def wrap(pos, offset, bound):
return ( pos + offset ) % bound
def clamp(pos, offset, bound):
return min(bound-1,max(0,pos+offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def local_maxima(data, mode=wrap):
wsize = data.shape
result = np.ones(data.shape, bool)
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
|
Add a difficult test case from parakeet
|
Add a difficult test case from parakeet
|
Python
|
bsd-3-clause
|
artas360/pythran,artas360/pythran,pbrunet/pythran,serge-sans-paille/pythran,serge-sans-paille/pythran,artas360/pythran,pbrunet/pythran,hainm/pythran,pombredanne/pythran,pombredanne/pythran,pombredanne/pythran,pbrunet/pythran,hainm/pythran,hainm/pythran
|
Add a difficult test case from parakeet
|
#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#pythran export local_maxima(float [][][][])
#runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x)
import numpy as np
def wrap(pos, offset, bound):
return ( pos + offset ) % bound
def clamp(pos, offset, bound):
return min(bound-1,max(0,pos+offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def local_maxima(data, mode=wrap):
wsize = data.shape
result = np.ones(data.shape, bool)
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
|
<commit_before><commit_msg>Add a difficult test case from parakeet<commit_after>
|
#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#pythran export local_maxima(float [][][][])
#runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x)
import numpy as np
def wrap(pos, offset, bound):
return ( pos + offset ) % bound
def clamp(pos, offset, bound):
return min(bound-1,max(0,pos+offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def local_maxima(data, mode=wrap):
wsize = data.shape
result = np.ones(data.shape, bool)
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
|
Add a difficult test case from parakeet#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#pythran export local_maxima(float [][][][])
#runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x)
import numpy as np
def wrap(pos, offset, bound):
return ( pos + offset ) % bound
def clamp(pos, offset, bound):
return min(bound-1,max(0,pos+offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def local_maxima(data, mode=wrap):
wsize = data.shape
result = np.ones(data.shape, bool)
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
|
<commit_before><commit_msg>Add a difficult test case from parakeet<commit_after>#from https://github.com/iskandr/parakeet/blob/master/benchmarks/nd_local_maxima.py
#pythran export local_maxima(float [][][][])
#runas import numpy as np ; shape = (8,6,4,2) ; x = np.arange(8*6*4*2, dtype=np.float64).reshape(*shape) ; local_maxima(x)
import numpy as np
def wrap(pos, offset, bound):
return ( pos + offset ) % bound
def clamp(pos, offset, bound):
return min(bound-1,max(0,pos+offset))
def reflect(pos, offset, bound):
idx = pos+offset
return min(2*(bound-1)-idx,max(idx,-idx))
def local_maxima(data, mode=wrap):
wsize = data.shape
result = np.ones(data.shape, bool)
for pos in np.ndindex(data.shape):
myval = data[pos]
for offset in np.ndindex(wsize):
neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
result[pos] &= (data[neighbor_idx] <= myval)
return result
|
|
4ca101b1e7527deba3bf660745b0048def309170
|
opps/article/views.py
|
opps/article/views.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.detail import DetailView
from opps.article.models import Post
class OppsDetail(DetailView):
context_object_name = "context"
queryset = Post.objects.all()
|
Create basic view on opps article
|
Create basic view on opps article
|
Python
|
mit
|
williamroot/opps,YACOWS/opps,williamroot/opps,opps/opps,opps/opps,williamroot/opps,opps/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,YACOWS/opps,opps/opps,jeanmask/opps,jeanmask/opps
|
Create basic view on opps article
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.detail import DetailView
from opps.article.models import Post
class OppsDetail(DetailView):
context_object_name = "context"
queryset = Post.objects.all()
|
<commit_before><commit_msg>Create basic view on opps article<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.detail import DetailView
from opps.article.models import Post
class OppsDetail(DetailView):
context_object_name = "context"
queryset = Post.objects.all()
|
Create basic view on opps article#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.detail import DetailView
from opps.article.models import Post
class OppsDetail(DetailView):
context_object_name = "context"
queryset = Post.objects.all()
|
<commit_before><commit_msg>Create basic view on opps article<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.detail import DetailView
from opps.article.models import Post
class OppsDetail(DetailView):
context_object_name = "context"
queryset = Post.objects.all()
|
|
5601271051fee2e24024843b0a74ca6a3047f25c
|
Algol/consts.py
|
Algol/consts.py
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import modbv
class Consts:
# control signals
Y = True
N = False
# PC Select Signal
PC_4 = modbv(0)[2:]
PC_BRJMP = modbv(1)[2:]
PC_JALR = modbv(2)[2:]
PC_EXC = modbv(3)[2:]
# Branch type
BR_N = modbv(0)[4:]
BR_NE = modbv(1)[4:]
BR_EQ = modbv(2)[4:]
BR_GE = modbv(3)[4:]
BR_GEU = modbv(4)[4:]
BR_LT = modbv(5)[4:]
BR_LTU = modbv(6)[4:]
BR_J = modbv(7)[4:]
BR_JR = modbv(8)[4:]
# RS1 Operand Select Signal
OP1_X = modbv(0)[2:]
OP1_RS1 = modbv(0)[2:]
OP1_PC = modbv(1)[2:]
OP1_IMZ = modbv(2)[2:]
# RS2 Operand Select Signal
OP2_X = modbv(0)[3:]
OP2_RS2 = modbv(1)[3:]
OP2_ITYPE = modbv(2)[3:]
OP2_STYPE = modbv(3)[3:]
OP2_SBTYPE = modbv(4)[3:]
OP2_UTYPE = modbv(5)[3:]
OP2_JUTYPE = modbv(6)[3:]
# PRIV
MTVEC = 0x100
START_ADDR = MTVEC + 0x100
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
Add the const module: List of control constants.
|
Add the const module: List of control constants.
|
Python
|
mit
|
AngelTerrones/Algol,AngelTerrones/Algol
|
Add the const module: List of control constants.
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import modbv
class Consts:
# control signals
Y = True
N = False
# PC Select Signal
PC_4 = modbv(0)[2:]
PC_BRJMP = modbv(1)[2:]
PC_JALR = modbv(2)[2:]
PC_EXC = modbv(3)[2:]
# Branch type
BR_N = modbv(0)[4:]
BR_NE = modbv(1)[4:]
BR_EQ = modbv(2)[4:]
BR_GE = modbv(3)[4:]
BR_GEU = modbv(4)[4:]
BR_LT = modbv(5)[4:]
BR_LTU = modbv(6)[4:]
BR_J = modbv(7)[4:]
BR_JR = modbv(8)[4:]
# RS1 Operand Select Signal
OP1_X = modbv(0)[2:]
OP1_RS1 = modbv(0)[2:]
OP1_PC = modbv(1)[2:]
OP1_IMZ = modbv(2)[2:]
# RS2 Operand Select Signal
OP2_X = modbv(0)[3:]
OP2_RS2 = modbv(1)[3:]
OP2_ITYPE = modbv(2)[3:]
OP2_STYPE = modbv(3)[3:]
OP2_SBTYPE = modbv(4)[3:]
OP2_UTYPE = modbv(5)[3:]
OP2_JUTYPE = modbv(6)[3:]
# PRIV
MTVEC = 0x100
START_ADDR = MTVEC + 0x100
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
<commit_before><commit_msg>Add the const module: List of control constants.<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import modbv
class Consts:
# control signals
Y = True
N = False
# PC Select Signal
PC_4 = modbv(0)[2:]
PC_BRJMP = modbv(1)[2:]
PC_JALR = modbv(2)[2:]
PC_EXC = modbv(3)[2:]
# Branch type
BR_N = modbv(0)[4:]
BR_NE = modbv(1)[4:]
BR_EQ = modbv(2)[4:]
BR_GE = modbv(3)[4:]
BR_GEU = modbv(4)[4:]
BR_LT = modbv(5)[4:]
BR_LTU = modbv(6)[4:]
BR_J = modbv(7)[4:]
BR_JR = modbv(8)[4:]
# RS1 Operand Select Signal
OP1_X = modbv(0)[2:]
OP1_RS1 = modbv(0)[2:]
OP1_PC = modbv(1)[2:]
OP1_IMZ = modbv(2)[2:]
# RS2 Operand Select Signal
OP2_X = modbv(0)[3:]
OP2_RS2 = modbv(1)[3:]
OP2_ITYPE = modbv(2)[3:]
OP2_STYPE = modbv(3)[3:]
OP2_SBTYPE = modbv(4)[3:]
OP2_UTYPE = modbv(5)[3:]
OP2_JUTYPE = modbv(6)[3:]
# PRIV
MTVEC = 0x100
START_ADDR = MTVEC + 0x100
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
Add the const module: List of control constants.#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import modbv
class Consts:
# control signals
Y = True
N = False
# PC Select Signal
PC_4 = modbv(0)[2:]
PC_BRJMP = modbv(1)[2:]
PC_JALR = modbv(2)[2:]
PC_EXC = modbv(3)[2:]
# Branch type
BR_N = modbv(0)[4:]
BR_NE = modbv(1)[4:]
BR_EQ = modbv(2)[4:]
BR_GE = modbv(3)[4:]
BR_GEU = modbv(4)[4:]
BR_LT = modbv(5)[4:]
BR_LTU = modbv(6)[4:]
BR_J = modbv(7)[4:]
BR_JR = modbv(8)[4:]
# RS1 Operand Select Signal
OP1_X = modbv(0)[2:]
OP1_RS1 = modbv(0)[2:]
OP1_PC = modbv(1)[2:]
OP1_IMZ = modbv(2)[2:]
# RS2 Operand Select Signal
OP2_X = modbv(0)[3:]
OP2_RS2 = modbv(1)[3:]
OP2_ITYPE = modbv(2)[3:]
OP2_STYPE = modbv(3)[3:]
OP2_SBTYPE = modbv(4)[3:]
OP2_UTYPE = modbv(5)[3:]
OP2_JUTYPE = modbv(6)[3:]
# PRIV
MTVEC = 0x100
START_ADDR = MTVEC + 0x100
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
<commit_before><commit_msg>Add the const module: List of control constants.<commit_after>#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import modbv
class Consts:
# control signals
Y = True
N = False
# PC Select Signal
PC_4 = modbv(0)[2:]
PC_BRJMP = modbv(1)[2:]
PC_JALR = modbv(2)[2:]
PC_EXC = modbv(3)[2:]
# Branch type
BR_N = modbv(0)[4:]
BR_NE = modbv(1)[4:]
BR_EQ = modbv(2)[4:]
BR_GE = modbv(3)[4:]
BR_GEU = modbv(4)[4:]
BR_LT = modbv(5)[4:]
BR_LTU = modbv(6)[4:]
BR_J = modbv(7)[4:]
BR_JR = modbv(8)[4:]
# RS1 Operand Select Signal
OP1_X = modbv(0)[2:]
OP1_RS1 = modbv(0)[2:]
OP1_PC = modbv(1)[2:]
OP1_IMZ = modbv(2)[2:]
# RS2 Operand Select Signal
OP2_X = modbv(0)[3:]
OP2_RS2 = modbv(1)[3:]
OP2_ITYPE = modbv(2)[3:]
OP2_STYPE = modbv(3)[3:]
OP2_SBTYPE = modbv(4)[3:]
OP2_UTYPE = modbv(5)[3:]
OP2_JUTYPE = modbv(6)[3:]
# PRIV
MTVEC = 0x100
START_ADDR = MTVEC + 0x100
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.