commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4eb81354583902e95a41affd0f0bb6572cf8bde8
|
examples/hello.py
|
examples/hello.py
|
from toil.common import Toil
from toil.job import Job
class HelloWorld(Job):
def __init__(self, message):
Job.__init__(self, memory="2G", cores=2, disk="3G")
self.message = message
def run(self, fileStore):
return "Hello, world!, here's a message: %s" % self.message
if __name__=="__main__":
parser = Job.Runner.getDefaultArgumentParser()
options = parser.parse_args()
hello_job = HelloWorld("Woot")
with Toil(options) as toil:
print(toil.start(hello_job))
|
Add a tiny example workflow to support manual testing
|
Add a tiny example workflow to support manual testing
|
Python
|
apache-2.0
|
BD2KGenomics/slugflow,BD2KGenomics/slugflow
|
Add a tiny example workflow to support manual testing
|
from toil.common import Toil
from toil.job import Job
class HelloWorld(Job):
def __init__(self, message):
Job.__init__(self, memory="2G", cores=2, disk="3G")
self.message = message
def run(self, fileStore):
return "Hello, world!, here's a message: %s" % self.message
if __name__=="__main__":
parser = Job.Runner.getDefaultArgumentParser()
options = parser.parse_args()
hello_job = HelloWorld("Woot")
with Toil(options) as toil:
print(toil.start(hello_job))
|
<commit_before><commit_msg>Add a tiny example workflow to support manual testing<commit_after>
|
from toil.common import Toil
from toil.job import Job
class HelloWorld(Job):
def __init__(self, message):
Job.__init__(self, memory="2G", cores=2, disk="3G")
self.message = message
def run(self, fileStore):
return "Hello, world!, here's a message: %s" % self.message
if __name__=="__main__":
parser = Job.Runner.getDefaultArgumentParser()
options = parser.parse_args()
hello_job = HelloWorld("Woot")
with Toil(options) as toil:
print(toil.start(hello_job))
|
Add a tiny example workflow to support manual testingfrom toil.common import Toil
from toil.job import Job
class HelloWorld(Job):
def __init__(self, message):
Job.__init__(self, memory="2G", cores=2, disk="3G")
self.message = message
def run(self, fileStore):
return "Hello, world!, here's a message: %s" % self.message
if __name__=="__main__":
parser = Job.Runner.getDefaultArgumentParser()
options = parser.parse_args()
hello_job = HelloWorld("Woot")
with Toil(options) as toil:
print(toil.start(hello_job))
|
<commit_before><commit_msg>Add a tiny example workflow to support manual testing<commit_after>from toil.common import Toil
from toil.job import Job
class HelloWorld(Job):
def __init__(self, message):
Job.__init__(self, memory="2G", cores=2, disk="3G")
self.message = message
def run(self, fileStore):
return "Hello, world!, here's a message: %s" % self.message
if __name__=="__main__":
parser = Job.Runner.getDefaultArgumentParser()
options = parser.parse_args()
hello_job = HelloWorld("Woot")
with Toil(options) as toil:
print(toil.start(hello_job))
|
|
7d18daa13b16a64d56b06e19d9f2966a79e75755
|
tests/test_lesson_3_calculator.py
|
tests/test_lesson_3_calculator.py
|
import unittest
from lessons.lesson_3_calculator import calculator
class AddTestCase(unittest.TestCase):
def test_add_returns_sum_of_two_numbers(self):
five = calculator.add(2, 3)
self.assertEqual(five, 5)
ten = calculator.add(7, 3)
self.assertEqual(ten, 10)
def test_position_of_arguments_does_not_matter(self):
a = calculator.add(2, 3)
b = calculator.add(3, 2)
self.assertEqual(a, b)
def test_add_handles_negative_numbers(self):
a = calculator.add(-3, 5)
self.assertEqual(a, 2)
class SubtractTestCase(unittest.TestCase):
def test_subtract_returns_second_argument_minus_first(self):
three = calculator.subtract(10, 7)
self.assertEqual(three, 3)
eleven = calculator.subtract(15, 4)
self.assertEqual(eleven, 11)
def test_position_matters_for_subtraction(self):
a = calculator.subtract(10, 7)
b = calculator.subtract(7, 10)
self.assertNotEqual(a, b)
def test_subtract_handles_negative_numbers(self):
a = calculator.subtract(-7 - 8)
self.assertEqual(a, 1)
class TotalSumTestCase(unittest.TestCase):
def test_total_sum_accepts_list_and_returns_sum_of_list(self):
result = calculator.total_sum([1, 2, 3])
self.assertEqual(result, 6)
def test_total_sum_handles_negative_integers(self):
result = calculator.total_sum([-1, -2, -3])
self.assertEqual(result, -6)
def test_total_sum_handles_mix_of_positive_and_negative_integers(self):
result = calculator.total_sum([1, 2, -3, -5])
self.assertEqual(result, -5)
def test_empty_array_input_returns_0(self):
result = calculator.total_sum([])
self.assertEqual(result, 0)
|
Add unit test file for lesson 3: calculator.
|
Add unit test file for lesson 3: calculator.
|
Python
|
mit
|
thejessleigh/test_driven_python,thejessleigh/test_driven_python,thejessleigh/test_driven_python
|
Add unit test file for lesson 3: calculator.
|
import unittest
from lessons.lesson_3_calculator import calculator
class AddTestCase(unittest.TestCase):
def test_add_returns_sum_of_two_numbers(self):
five = calculator.add(2, 3)
self.assertEqual(five, 5)
ten = calculator.add(7, 3)
self.assertEqual(ten, 10)
def test_position_of_arguments_does_not_matter(self):
a = calculator.add(2, 3)
b = calculator.add(3, 2)
self.assertEqual(a, b)
def test_add_handles_negative_numbers(self):
a = calculator.add(-3, 5)
self.assertEqual(a, 2)
class SubtractTestCase(unittest.TestCase):
def test_subtract_returns_second_argument_minus_first(self):
three = calculator.subtract(10, 7)
self.assertEqual(three, 3)
eleven = calculator.subtract(15, 4)
self.assertEqual(eleven, 11)
def test_position_matters_for_subtraction(self):
a = calculator.subtract(10, 7)
b = calculator.subtract(7, 10)
self.assertNotEqual(a, b)
def test_subtract_handles_negative_numbers(self):
a = calculator.subtract(-7 - 8)
self.assertEqual(a, 1)
class TotalSumTestCase(unittest.TestCase):
def test_total_sum_accepts_list_and_returns_sum_of_list(self):
result = calculator.total_sum([1, 2, 3])
self.assertEqual(result, 6)
def test_total_sum_handles_negative_integers(self):
result = calculator.total_sum([-1, -2, -3])
self.assertEqual(result, -6)
def test_total_sum_handles_mix_of_positive_and_negative_integers(self):
result = calculator.total_sum([1, 2, -3, -5])
self.assertEqual(result, -5)
def test_empty_array_input_returns_0(self):
result = calculator.total_sum([])
self.assertEqual(result, 0)
|
<commit_before><commit_msg>Add unit test file for lesson 3: calculator.<commit_after>
|
import unittest
from lessons.lesson_3_calculator import calculator
class AddTestCase(unittest.TestCase):
def test_add_returns_sum_of_two_numbers(self):
five = calculator.add(2, 3)
self.assertEqual(five, 5)
ten = calculator.add(7, 3)
self.assertEqual(ten, 10)
def test_position_of_arguments_does_not_matter(self):
a = calculator.add(2, 3)
b = calculator.add(3, 2)
self.assertEqual(a, b)
def test_add_handles_negative_numbers(self):
a = calculator.add(-3, 5)
self.assertEqual(a, 2)
class SubtractTestCase(unittest.TestCase):
def test_subtract_returns_second_argument_minus_first(self):
three = calculator.subtract(10, 7)
self.assertEqual(three, 3)
eleven = calculator.subtract(15, 4)
self.assertEqual(eleven, 11)
def test_position_matters_for_subtraction(self):
a = calculator.subtract(10, 7)
b = calculator.subtract(7, 10)
self.assertNotEqual(a, b)
def test_subtract_handles_negative_numbers(self):
a = calculator.subtract(-7 - 8)
self.assertEqual(a, 1)
class TotalSumTestCase(unittest.TestCase):
def test_total_sum_accepts_list_and_returns_sum_of_list(self):
result = calculator.total_sum([1, 2, 3])
self.assertEqual(result, 6)
def test_total_sum_handles_negative_integers(self):
result = calculator.total_sum([-1, -2, -3])
self.assertEqual(result, -6)
def test_total_sum_handles_mix_of_positive_and_negative_integers(self):
result = calculator.total_sum([1, 2, -3, -5])
self.assertEqual(result, -5)
def test_empty_array_input_returns_0(self):
result = calculator.total_sum([])
self.assertEqual(result, 0)
|
Add unit test file for lesson 3: calculator.import unittest
from lessons.lesson_3_calculator import calculator
class AddTestCase(unittest.TestCase):
def test_add_returns_sum_of_two_numbers(self):
five = calculator.add(2, 3)
self.assertEqual(five, 5)
ten = calculator.add(7, 3)
self.assertEqual(ten, 10)
def test_position_of_arguments_does_not_matter(self):
a = calculator.add(2, 3)
b = calculator.add(3, 2)
self.assertEqual(a, b)
def test_add_handles_negative_numbers(self):
a = calculator.add(-3, 5)
self.assertEqual(a, 2)
class SubtractTestCase(unittest.TestCase):
def test_subtract_returns_second_argument_minus_first(self):
three = calculator.subtract(10, 7)
self.assertEqual(three, 3)
eleven = calculator.subtract(15, 4)
self.assertEqual(eleven, 11)
def test_position_matters_for_subtraction(self):
a = calculator.subtract(10, 7)
b = calculator.subtract(7, 10)
self.assertNotEqual(a, b)
def test_subtract_handles_negative_numbers(self):
a = calculator.subtract(-7 - 8)
self.assertEqual(a, 1)
class TotalSumTestCase(unittest.TestCase):
def test_total_sum_accepts_list_and_returns_sum_of_list(self):
result = calculator.total_sum([1, 2, 3])
self.assertEqual(result, 6)
def test_total_sum_handles_negative_integers(self):
result = calculator.total_sum([-1, -2, -3])
self.assertEqual(result, -6)
def test_total_sum_handles_mix_of_positive_and_negative_integers(self):
result = calculator.total_sum([1, 2, -3, -5])
self.assertEqual(result, -5)
def test_empty_array_input_returns_0(self):
result = calculator.total_sum([])
self.assertEqual(result, 0)
|
<commit_before><commit_msg>Add unit test file for lesson 3: calculator.<commit_after>import unittest
from lessons.lesson_3_calculator import calculator
class AddTestCase(unittest.TestCase):
def test_add_returns_sum_of_two_numbers(self):
five = calculator.add(2, 3)
self.assertEqual(five, 5)
ten = calculator.add(7, 3)
self.assertEqual(ten, 10)
def test_position_of_arguments_does_not_matter(self):
a = calculator.add(2, 3)
b = calculator.add(3, 2)
self.assertEqual(a, b)
def test_add_handles_negative_numbers(self):
a = calculator.add(-3, 5)
self.assertEqual(a, 2)
class SubtractTestCase(unittest.TestCase):
def test_subtract_returns_second_argument_minus_first(self):
three = calculator.subtract(10, 7)
self.assertEqual(three, 3)
eleven = calculator.subtract(15, 4)
self.assertEqual(eleven, 11)
def test_position_matters_for_subtraction(self):
a = calculator.subtract(10, 7)
b = calculator.subtract(7, 10)
self.assertNotEqual(a, b)
def test_subtract_handles_negative_numbers(self):
a = calculator.subtract(-7 - 8)
self.assertEqual(a, 1)
class TotalSumTestCase(unittest.TestCase):
def test_total_sum_accepts_list_and_returns_sum_of_list(self):
result = calculator.total_sum([1, 2, 3])
self.assertEqual(result, 6)
def test_total_sum_handles_negative_integers(self):
result = calculator.total_sum([-1, -2, -3])
self.assertEqual(result, -6)
def test_total_sum_handles_mix_of_positive_and_negative_integers(self):
result = calculator.total_sum([1, 2, -3, -5])
self.assertEqual(result, -5)
def test_empty_array_input_returns_0(self):
result = calculator.total_sum([])
self.assertEqual(result, 0)
|
|
887413520fa06433e19bed093b466ec1282fbbc1
|
CycleOpsFluid2PowerCalculator.py
|
CycleOpsFluid2PowerCalculator.py
|
from AbstractPowerCalculator import AbstractPowerCalculator
'''
Linear interpolation. Numpy could be used here,
but the app should be kept thin
'''
def interp(x_arr, y_arr, x):
for i, xi in enumerate(x_arr):
if xi >= x:
break
else:
return 611
x_min = x_arr[i - 1]
y_min = y_arr[i - 1]
y_max = y_arr[i]
factor = (x - x_min) / (xi - x_min)
return y_min + (y_max - y_min) * factor
'''
CycleOps Fluid2 power calculator.
'''
class CycleOpsFluid2PowerCalculator(AbstractPowerCalculator):
def __init__(self):
super(CycleOpsFluid2PowerCalculator, self).__init__()
self.wheel_circumference = 2.105 # default value - can be overridden in config.py
# Data from the diagram of CycleOps Fluid2:
# http://thebikegeek.blogspot.com/2009/12/while-we-wait-for-better-and-better.html
# speed values
xp = [0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
# power values
yp = [0.0, 28.0, 58.0, 92.0, 132.0, 179.0, 237.0, 307.0, 391.0, 492.0, 611.0]
def power_from_speed(self, revs_per_sec):
kms_per_rev = self.wheel_circumference / 1000.0
speed = revs_per_sec * 3600 * kms_per_rev
power = int(interp(self.xp, self.yp, speed))
return power
def set_wheel_circumference(self, circumference):
self.wheel_circumference = circumference
|
Add CycleOps Fluid2 power calculator
|
Add CycleOps Fluid2 power calculator
|
Python
|
mit
|
dhague/vpower,dhague/vpower
|
Add CycleOps Fluid2 power calculator
|
from AbstractPowerCalculator import AbstractPowerCalculator
'''
Linear interpolation. Numpy could be used here,
but the app should be kept thin
'''
def interp(x_arr, y_arr, x):
for i, xi in enumerate(x_arr):
if xi >= x:
break
else:
return 611
x_min = x_arr[i - 1]
y_min = y_arr[i - 1]
y_max = y_arr[i]
factor = (x - x_min) / (xi - x_min)
return y_min + (y_max - y_min) * factor
'''
CycleOps Fluid2 power calculator.
'''
class CycleOpsFluid2PowerCalculator(AbstractPowerCalculator):
def __init__(self):
super(CycleOpsFluid2PowerCalculator, self).__init__()
self.wheel_circumference = 2.105 # default value - can be overridden in config.py
# Data from the diagram of CycleOps Fluid2:
# http://thebikegeek.blogspot.com/2009/12/while-we-wait-for-better-and-better.html
# speed values
xp = [0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
# power values
yp = [0.0, 28.0, 58.0, 92.0, 132.0, 179.0, 237.0, 307.0, 391.0, 492.0, 611.0]
def power_from_speed(self, revs_per_sec):
kms_per_rev = self.wheel_circumference / 1000.0
speed = revs_per_sec * 3600 * kms_per_rev
power = int(interp(self.xp, self.yp, speed))
return power
def set_wheel_circumference(self, circumference):
self.wheel_circumference = circumference
|
<commit_before><commit_msg>Add CycleOps Fluid2 power calculator<commit_after>
|
from AbstractPowerCalculator import AbstractPowerCalculator
'''
Linear interpolation. Numpy could be used here,
but the app should be kept thin
'''
def interp(x_arr, y_arr, x):
for i, xi in enumerate(x_arr):
if xi >= x:
break
else:
return 611
x_min = x_arr[i - 1]
y_min = y_arr[i - 1]
y_max = y_arr[i]
factor = (x - x_min) / (xi - x_min)
return y_min + (y_max - y_min) * factor
'''
CycleOps Fluid2 power calculator.
'''
class CycleOpsFluid2PowerCalculator(AbstractPowerCalculator):
def __init__(self):
super(CycleOpsFluid2PowerCalculator, self).__init__()
self.wheel_circumference = 2.105 # default value - can be overridden in config.py
# Data from the diagram of CycleOps Fluid2:
# http://thebikegeek.blogspot.com/2009/12/while-we-wait-for-better-and-better.html
# speed values
xp = [0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
# power values
yp = [0.0, 28.0, 58.0, 92.0, 132.0, 179.0, 237.0, 307.0, 391.0, 492.0, 611.0]
def power_from_speed(self, revs_per_sec):
kms_per_rev = self.wheel_circumference / 1000.0
speed = revs_per_sec * 3600 * kms_per_rev
power = int(interp(self.xp, self.yp, speed))
return power
def set_wheel_circumference(self, circumference):
self.wheel_circumference = circumference
|
Add CycleOps Fluid2 power calculatorfrom AbstractPowerCalculator import AbstractPowerCalculator
'''
Linear interpolation. Numpy could be used here,
but the app should be kept thin
'''
def interp(x_arr, y_arr, x):
for i, xi in enumerate(x_arr):
if xi >= x:
break
else:
return 611
x_min = x_arr[i - 1]
y_min = y_arr[i - 1]
y_max = y_arr[i]
factor = (x - x_min) / (xi - x_min)
return y_min + (y_max - y_min) * factor
'''
CycleOps Fluid2 power calculator.
'''
class CycleOpsFluid2PowerCalculator(AbstractPowerCalculator):
def __init__(self):
super(CycleOpsFluid2PowerCalculator, self).__init__()
self.wheel_circumference = 2.105 # default value - can be overridden in config.py
# Data from the diagram of CycleOps Fluid2:
# http://thebikegeek.blogspot.com/2009/12/while-we-wait-for-better-and-better.html
# speed values
xp = [0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
# power values
yp = [0.0, 28.0, 58.0, 92.0, 132.0, 179.0, 237.0, 307.0, 391.0, 492.0, 611.0]
def power_from_speed(self, revs_per_sec):
kms_per_rev = self.wheel_circumference / 1000.0
speed = revs_per_sec * 3600 * kms_per_rev
power = int(interp(self.xp, self.yp, speed))
return power
def set_wheel_circumference(self, circumference):
self.wheel_circumference = circumference
|
<commit_before><commit_msg>Add CycleOps Fluid2 power calculator<commit_after>from AbstractPowerCalculator import AbstractPowerCalculator
'''
Linear interpolation. Numpy could be used here,
but the app should be kept thin
'''
def interp(x_arr, y_arr, x):
for i, xi in enumerate(x_arr):
if xi >= x:
break
else:
return 611
x_min = x_arr[i - 1]
y_min = y_arr[i - 1]
y_max = y_arr[i]
factor = (x - x_min) / (xi - x_min)
return y_min + (y_max - y_min) * factor
'''
CycleOps Fluid2 power calculator.
'''
class CycleOpsFluid2PowerCalculator(AbstractPowerCalculator):
def __init__(self):
super(CycleOpsFluid2PowerCalculator, self).__init__()
self.wheel_circumference = 2.105 # default value - can be overridden in config.py
# Data from the diagram of CycleOps Fluid2:
# http://thebikegeek.blogspot.com/2009/12/while-we-wait-for-better-and-better.html
# speed values
xp = [0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
# power values
yp = [0.0, 28.0, 58.0, 92.0, 132.0, 179.0, 237.0, 307.0, 391.0, 492.0, 611.0]
def power_from_speed(self, revs_per_sec):
kms_per_rev = self.wheel_circumference / 1000.0
speed = revs_per_sec * 3600 * kms_per_rev
power = int(interp(self.xp, self.yp, speed))
return power
def set_wheel_circumference(self, circumference):
self.wheel_circumference = circumference
|
|
46731933b2146bcac81c85cbae711d163a8758dc
|
Genotype_Matrix_To_Fasta.py
|
Genotype_Matrix_To_Fasta.py
|
#!/usr/bin/env python
"""A script to take a genotyping matrix with population assignment and produce
FASTA files for each population. This was written with K. Thornton's
libsequence tools in mind. This script will also remove monomorphic sites.
Assumes that samples are rows and markers are columns. The first column has a
population of origin and the second column is the individual ID."""
import sys
genotype_matrix = sys.argv[1]
missing = 'NA'
# This dictionary will contain the population genetic data that we are
# reading from the input file. It will be of the form
# {
# 'Pop1_ID': {
# 'PIs': [PIs],
# 'Genotypes': [genotypes],
# ...
# }
# 'Pop2_ID': {
# ...
# }
# ...
# }
popdata = {}
with open(genotype_matrix, 'r') as f:
for index, line in enumerate(f):
if index == 0:
continue
else:
tmp = line.strip().split()
# Get the population identifier and PI number
popid = tmp[0]
pi_no = tmp[1]
# Assign some population IDs. This is to do the combined
# population summary stats.
if popid == '2' or popid == '3':
popid = '23'
# Stick it into the dictionary
if popid not in popdata:
popdata[popid] = {
'PIs': [],
'Genotypes': []
}
popdata[popid]['PIs'].append(pi_no)
popdata[popid]['Genotypes'].append(tmp[2:])
# Next, we iterate through the dictionary, removing monomorphic markers
for pop in popdata:
gen_mat = popdata[pop]['Genotypes']
# Transpose the genotype matrix so we can iterate through the markers
gen_mat = zip(*gen_mat)
# And start a new matrix for filtered data
filtered = []
for marker in gen_mat:
# what are the states?
alleles = set(marker)
# Discard any missing data
alleles.discard(missing)
# If they are monomorphic after discarding missing data, then toss it
if len(alleles) == 1:
continue
else:
filtered.append(marker)
# Then, we transpose it back
filtered = zip(*filtered)
# And write it out!
handle = open('Pop_' + pop + '.fasta', 'w')
for pi, genotypes in zip(popdata[pop]['PIs'], filtered):
# We need to convert the NA into N
new_geno = ['N' if x == 'NA' else x for x in genotypes]
towrite = '>' + pi + '\n' + ''.join(new_geno) + '\n'
handle.write(towrite)
handle.flush()
handle.close()
|
Add script for converting genotype matrix to libsequence-friendly FASTA
|
Add script for converting genotype matrix to libsequence-friendly FASTA
|
Python
|
unlicense
|
MeeshCompBio/Misc_Utils,MeeshCompBio/Misc_Utils,TomJKono/Misc_Utils,MeeshCompBio/Misc_Utils,TomJKono/Misc_Utils,TomJKono/Misc_Utils
|
Add script for converting genotype matrix to libsequence-friendly FASTA
|
#!/usr/bin/env python
"""A script to take a genotyping matrix with population assignment and produce
FASTA files for each population. This was written with K. Thornton's
libsequence tools in mind. This script will also remove monomorphic sites.
Assumes that samples are rows and markers are columns. The first column has a
population of origin and the second column is the individual ID."""
import sys
genotype_matrix = sys.argv[1]
missing = 'NA'
# This dictionary will contain the population genetic data that we are
# reading from the input file. It will be of the form
# {
# 'Pop1_ID': {
# 'PIs': [PIs],
# 'Genotypes': [genotypes],
# ...
# }
# 'Pop2_ID': {
# ...
# }
# ...
# }
popdata = {}
with open(genotype_matrix, 'r') as f:
for index, line in enumerate(f):
if index == 0:
continue
else:
tmp = line.strip().split()
# Get the population identifier and PI number
popid = tmp[0]
pi_no = tmp[1]
# Assign some population IDs. This is to do the combined
# population summary stats.
if popid == '2' or popid == '3':
popid = '23'
# Stick it into the dictionary
if popid not in popdata:
popdata[popid] = {
'PIs': [],
'Genotypes': []
}
popdata[popid]['PIs'].append(pi_no)
popdata[popid]['Genotypes'].append(tmp[2:])
# Next, we iterate through the dictionary, removing monomorphic markers
for pop in popdata:
gen_mat = popdata[pop]['Genotypes']
# Transpose the genotype matrix so we can iterate through the markers
gen_mat = zip(*gen_mat)
# And start a new matrix for filtered data
filtered = []
for marker in gen_mat:
# what are the states?
alleles = set(marker)
# Discard any missing data
alleles.discard(missing)
# If they are monomorphic after discarding missing data, then toss it
if len(alleles) == 1:
continue
else:
filtered.append(marker)
# Then, we transpose it back
filtered = zip(*filtered)
# And write it out!
handle = open('Pop_' + pop + '.fasta', 'w')
for pi, genotypes in zip(popdata[pop]['PIs'], filtered):
# We need to convert the NA into N
new_geno = ['N' if x == 'NA' else x for x in genotypes]
towrite = '>' + pi + '\n' + ''.join(new_geno) + '\n'
handle.write(towrite)
handle.flush()
handle.close()
|
<commit_before><commit_msg>Add script for converting genotype matrix to libsequence-friendly FASTA<commit_after>
|
#!/usr/bin/env python
"""A script to take a genotyping matrix with population assignment and produce
FASTA files for each population. This was written with K. Thornton's
libsequence tools in mind. This script will also remove monomorphic sites.
Assumes that samples are rows and markers are columns. The first column has a
population of origin and the second column is the individual ID."""
import sys
genotype_matrix = sys.argv[1]
missing = 'NA'
# This dictionary will contain the population genetic data that we are
# reading from the input file. It will be of the form
# {
# 'Pop1_ID': {
# 'PIs': [PIs],
# 'Genotypes': [genotypes],
# ...
# }
# 'Pop2_ID': {
# ...
# }
# ...
# }
popdata = {}
with open(genotype_matrix, 'r') as f:
for index, line in enumerate(f):
if index == 0:
continue
else:
tmp = line.strip().split()
# Get the population identifier and PI number
popid = tmp[0]
pi_no = tmp[1]
# Assign some population IDs. This is to do the combined
# population summary stats.
if popid == '2' or popid == '3':
popid = '23'
# Stick it into the dictionary
if popid not in popdata:
popdata[popid] = {
'PIs': [],
'Genotypes': []
}
popdata[popid]['PIs'].append(pi_no)
popdata[popid]['Genotypes'].append(tmp[2:])
# Next, we iterate through the dictionary, removing monomorphic markers
for pop in popdata:
gen_mat = popdata[pop]['Genotypes']
# Transpose the genotype matrix so we can iterate through the markers
gen_mat = zip(*gen_mat)
# And start a new matrix for filtered data
filtered = []
for marker in gen_mat:
# what are the states?
alleles = set(marker)
# Discard any missing data
alleles.discard(missing)
# If they are monomorphic after discarding missing data, then toss it
if len(alleles) == 1:
continue
else:
filtered.append(marker)
# Then, we transpose it back
filtered = zip(*filtered)
# And write it out!
handle = open('Pop_' + pop + '.fasta', 'w')
for pi, genotypes in zip(popdata[pop]['PIs'], filtered):
# We need to convert the NA into N
new_geno = ['N' if x == 'NA' else x for x in genotypes]
towrite = '>' + pi + '\n' + ''.join(new_geno) + '\n'
handle.write(towrite)
handle.flush()
handle.close()
|
Add script for converting genotype matrix to libsequence-friendly FASTA#!/usr/bin/env python
"""A script to take a genotyping matrix with population assignment and produce
FASTA files for each population. This was written with K. Thornton's
libsequence tools in mind. This script will also remove monomorphic sites.
Assumes that samples are rows and markers are columns. The first column has a
population of origin and the second column is the individual ID."""
import sys
genotype_matrix = sys.argv[1]
missing = 'NA'
# This dictionary will contain the population genetic data that we are
# reading from the input file. It will be of the form
# {
# 'Pop1_ID': {
# 'PIs': [PIs],
# 'Genotypes': [genotypes],
# ...
# }
# 'Pop2_ID': {
# ...
# }
# ...
# }
popdata = {}
with open(genotype_matrix, 'r') as f:
for index, line in enumerate(f):
if index == 0:
continue
else:
tmp = line.strip().split()
# Get the population identifier and PI number
popid = tmp[0]
pi_no = tmp[1]
# Assign some population IDs. This is to do the combined
# population summary stats.
if popid == '2' or popid == '3':
popid = '23'
# Stick it into the dictionary
if popid not in popdata:
popdata[popid] = {
'PIs': [],
'Genotypes': []
}
popdata[popid]['PIs'].append(pi_no)
popdata[popid]['Genotypes'].append(tmp[2:])
# Next, we iterate through the dictionary, removing monomorphic markers
for pop in popdata:
gen_mat = popdata[pop]['Genotypes']
# Transpose the genotype matrix so we can iterate through the markers
gen_mat = zip(*gen_mat)
# And start a new matrix for filtered data
filtered = []
for marker in gen_mat:
# what are the states?
alleles = set(marker)
# Discard any missing data
alleles.discard(missing)
# If they are monomorphic after discarding missing data, then toss it
if len(alleles) == 1:
continue
else:
filtered.append(marker)
# Then, we transpose it back
filtered = zip(*filtered)
# And write it out!
handle = open('Pop_' + pop + '.fasta', 'w')
for pi, genotypes in zip(popdata[pop]['PIs'], filtered):
# We need to convert the NA into N
new_geno = ['N' if x == 'NA' else x for x in genotypes]
towrite = '>' + pi + '\n' + ''.join(new_geno) + '\n'
handle.write(towrite)
handle.flush()
handle.close()
|
<commit_before><commit_msg>Add script for converting genotype matrix to libsequence-friendly FASTA<commit_after>#!/usr/bin/env python
"""A script to take a genotyping matrix with population assignment and produce
FASTA files for each population. This was written with K. Thornton's
libsequence tools in mind. This script will also remove monomorphic sites.
Assumes that samples are rows and markers are columns. The first column has a
population of origin and the second column is the individual ID."""
import sys
genotype_matrix = sys.argv[1]
missing = 'NA'
# This dictionary will contain the population genetic data that we are
# reading from the input file. It will be of the form
# {
# 'Pop1_ID': {
# 'PIs': [PIs],
# 'Genotypes': [genotypes],
# ...
# }
# 'Pop2_ID': {
# ...
# }
# ...
# }
popdata = {}
with open(genotype_matrix, 'r') as f:
for index, line in enumerate(f):
if index == 0:
continue
else:
tmp = line.strip().split()
# Get the population identifier and PI number
popid = tmp[0]
pi_no = tmp[1]
# Assign some population IDs. This is to do the combined
# population summary stats.
if popid == '2' or popid == '3':
popid = '23'
# Stick it into the dictionary
if popid not in popdata:
popdata[popid] = {
'PIs': [],
'Genotypes': []
}
popdata[popid]['PIs'].append(pi_no)
popdata[popid]['Genotypes'].append(tmp[2:])
# Next, we iterate through the dictionary, removing monomorphic markers
for pop in popdata:
gen_mat = popdata[pop]['Genotypes']
# Transpose the genotype matrix so we can iterate through the markers
gen_mat = zip(*gen_mat)
# And start a new matrix for filtered data
filtered = []
for marker in gen_mat:
# what are the states?
alleles = set(marker)
# Discard any missing data
alleles.discard(missing)
# If they are monomorphic after discarding missing data, then toss it
if len(alleles) == 1:
continue
else:
filtered.append(marker)
# Then, we transpose it back
filtered = zip(*filtered)
# And write it out!
handle = open('Pop_' + pop + '.fasta', 'w')
for pi, genotypes in zip(popdata[pop]['PIs'], filtered):
# We need to convert the NA into N
new_geno = ['N' if x == 'NA' else x for x in genotypes]
towrite = '>' + pi + '\n' + ''.join(new_geno) + '\n'
handle.write(towrite)
handle.flush()
handle.close()
|
|
350f88747c15e08fc7c58f431ea5a93eb650e789
|
ibmcnx/config/addNode.py
|
ibmcnx/config/addNode.py
|
######
# Create Cluster Servers for an additional Node
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-13
#
# License: Apache 2.0
#
def selectNode( nodelist ):
result = nodelist
counter = len( result )
index = 0
count = 0
node_id = []
node_name = []
numberlist = []
node_number = -1
i = 0
print '\nAvailable Nodes:'
print '\n'
for i in range( len( result ) ):
print str( i ) + '\t' + result[i]
i += 1
count += 1
print '\n'
go_on = ''
while go_on != 'TRUE':
node_number = raw_input( 'Please select the number of the node? ' )
try:
node_number = int( node_number )
except ( TypeError, ValueError ):
continue
if count - 1 >= node_number >= 0:
break
else:
continue
nodename = result[i]
return ( nodename, 1 )
cell = AdminControl.getCell()
cellname = "/Cell:" + cell + "/"
clusterlist = AdminConfig.list( 'ServerCluster', AdminConfig.getid( cellname ) ).splitlines()
nodelist = AdminTask.listNodes().splitlines()
nodename, nodevalid = selectNode( nodelist )
print nodename
# for cluster in clusterlist:
# AdminTask.createClusterMember( '[-clusterName Cluster1 -memberConfig [-memberNode cnxwas2Node01 -memberName Cluster1_server2 -memberWeight 2 -genUniquePorts true -replicatorEntry false]]' )
|
Test all scripts on Windows
|
10: Test all scripts on Windows
Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/10
|
Python
|
apache-2.0
|
stoeps13/ibmcnx2,stoeps13/ibmcnx2
|
10: Test all scripts on Windows
Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/10
|
######
# Create Cluster Servers for an additional Node
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-13
#
# License: Apache 2.0
#
def selectNode( nodelist ):
result = nodelist
counter = len( result )
index = 0
count = 0
node_id = []
node_name = []
numberlist = []
node_number = -1
i = 0
print '\nAvailable Nodes:'
print '\n'
for i in range( len( result ) ):
print str( i ) + '\t' + result[i]
i += 1
count += 1
print '\n'
go_on = ''
while go_on != 'TRUE':
node_number = raw_input( 'Please select the number of the node? ' )
try:
node_number = int( node_number )
except ( TypeError, ValueError ):
continue
if count - 1 >= node_number >= 0:
break
else:
continue
nodename = result[i]
return ( nodename, 1 )
cell = AdminControl.getCell()
cellname = "/Cell:" + cell + "/"
clusterlist = AdminConfig.list( 'ServerCluster', AdminConfig.getid( cellname ) ).splitlines()
nodelist = AdminTask.listNodes().splitlines()
nodename, nodevalid = selectNode( nodelist )
print nodename
# for cluster in clusterlist:
# AdminTask.createClusterMember( '[-clusterName Cluster1 -memberConfig [-memberNode cnxwas2Node01 -memberName Cluster1_server2 -memberWeight 2 -genUniquePorts true -replicatorEntry false]]' )
|
<commit_before><commit_msg>10: Test all scripts on Windows
Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/10<commit_after>
|
######
# Create Cluster Servers for an additional Node
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-13
#
# License: Apache 2.0
#
def selectNode( nodelist ):
result = nodelist
counter = len( result )
index = 0
count = 0
node_id = []
node_name = []
numberlist = []
node_number = -1
i = 0
print '\nAvailable Nodes:'
print '\n'
for i in range( len( result ) ):
print str( i ) + '\t' + result[i]
i += 1
count += 1
print '\n'
go_on = ''
while go_on != 'TRUE':
node_number = raw_input( 'Please select the number of the node? ' )
try:
node_number = int( node_number )
except ( TypeError, ValueError ):
continue
if count - 1 >= node_number >= 0:
break
else:
continue
nodename = result[i]
return ( nodename, 1 )
cell = AdminControl.getCell()
cellname = "/Cell:" + cell + "/"
clusterlist = AdminConfig.list( 'ServerCluster', AdminConfig.getid( cellname ) ).splitlines()
nodelist = AdminTask.listNodes().splitlines()
nodename, nodevalid = selectNode( nodelist )
print nodename
# for cluster in clusterlist:
# AdminTask.createClusterMember( '[-clusterName Cluster1 -memberConfig [-memberNode cnxwas2Node01 -memberName Cluster1_server2 -memberWeight 2 -genUniquePorts true -replicatorEntry false]]' )
|
10: Test all scripts on Windows
Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/10######
# Create Cluster Servers for an additional Node
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-13
#
# License: Apache 2.0
#
def selectNode( nodelist ):
result = nodelist
counter = len( result )
index = 0
count = 0
node_id = []
node_name = []
numberlist = []
node_number = -1
i = 0
print '\nAvailable Nodes:'
print '\n'
for i in range( len( result ) ):
print str( i ) + '\t' + result[i]
i += 1
count += 1
print '\n'
go_on = ''
while go_on != 'TRUE':
node_number = raw_input( 'Please select the number of the node? ' )
try:
node_number = int( node_number )
except ( TypeError, ValueError ):
continue
if count - 1 >= node_number >= 0:
break
else:
continue
nodename = result[i]
return ( nodename, 1 )
cell = AdminControl.getCell()
cellname = "/Cell:" + cell + "/"
clusterlist = AdminConfig.list( 'ServerCluster', AdminConfig.getid( cellname ) ).splitlines()
nodelist = AdminTask.listNodes().splitlines()
nodename, nodevalid = selectNode( nodelist )
print nodename
# for cluster in clusterlist:
# AdminTask.createClusterMember( '[-clusterName Cluster1 -memberConfig [-memberNode cnxwas2Node01 -memberName Cluster1_server2 -memberWeight 2 -genUniquePorts true -replicatorEntry false]]' )
|
<commit_before><commit_msg>10: Test all scripts on Windows
Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/10<commit_after>######
# Create Cluster Servers for an additional Node
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-13
#
# License: Apache 2.0
#
def selectNode( nodelist ):
result = nodelist
counter = len( result )
index = 0
count = 0
node_id = []
node_name = []
numberlist = []
node_number = -1
i = 0
print '\nAvailable Nodes:'
print '\n'
for i in range( len( result ) ):
print str( i ) + '\t' + result[i]
i += 1
count += 1
print '\n'
go_on = ''
while go_on != 'TRUE':
node_number = raw_input( 'Please select the number of the node? ' )
try:
node_number = int( node_number )
except ( TypeError, ValueError ):
continue
if count - 1 >= node_number >= 0:
break
else:
continue
nodename = result[i]
return ( nodename, 1 )
cell = AdminControl.getCell()
cellname = "/Cell:" + cell + "/"
clusterlist = AdminConfig.list( 'ServerCluster', AdminConfig.getid( cellname ) ).splitlines()
nodelist = AdminTask.listNodes().splitlines()
nodename, nodevalid = selectNode( nodelist )
print nodename
# for cluster in clusterlist:
# AdminTask.createClusterMember( '[-clusterName Cluster1 -memberConfig [-memberNode cnxwas2Node01 -memberName Cluster1_server2 -memberWeight 2 -genUniquePorts true -replicatorEntry false]]' )
|
|
e6369a2b4954356ed6b43cb83fb0aba41c6abc16
|
py/house-robber-iii.py
|
py/house-robber-iii.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def do_rob(self, cur):
if not cur:
return 0, 0
else:
robl, no_robl = self.do_rob(cur.left)
robr, no_robr = self.do_rob(cur.right)
rob_cur, no_rob_cur = cur.val + no_robl + no_robr, max(robl, no_robl) + max(robr, no_robr)
return rob_cur, no_rob_cur
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.do_rob(root))
|
Add py solution for 337. House Robber III
|
Add py solution for 337. House Robber III
337. House Robber III: https://leetcode.com/problems/house-robber-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 337. House Robber III
337. House Robber III: https://leetcode.com/problems/house-robber-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def do_rob(self, cur):
if not cur:
return 0, 0
else:
robl, no_robl = self.do_rob(cur.left)
robr, no_robr = self.do_rob(cur.right)
rob_cur, no_rob_cur = cur.val + no_robl + no_robr, max(robl, no_robl) + max(robr, no_robr)
return rob_cur, no_rob_cur
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.do_rob(root))
|
<commit_before><commit_msg>Add py solution for 337. House Robber III
337. House Robber III: https://leetcode.com/problems/house-robber-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def do_rob(self, cur):
if not cur:
return 0, 0
else:
robl, no_robl = self.do_rob(cur.left)
robr, no_robr = self.do_rob(cur.right)
rob_cur, no_rob_cur = cur.val + no_robl + no_robr, max(robl, no_robl) + max(robr, no_robr)
return rob_cur, no_rob_cur
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.do_rob(root))
|
Add py solution for 337. House Robber III
337. House Robber III: https://leetcode.com/problems/house-robber-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def do_rob(self, cur):
if not cur:
return 0, 0
else:
robl, no_robl = self.do_rob(cur.left)
robr, no_robr = self.do_rob(cur.right)
rob_cur, no_rob_cur = cur.val + no_robl + no_robr, max(robl, no_robl) + max(robr, no_robr)
return rob_cur, no_rob_cur
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.do_rob(root))
|
<commit_before><commit_msg>Add py solution for 337. House Robber III
337. House Robber III: https://leetcode.com/problems/house-robber-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def do_rob(self, cur):
if not cur:
return 0, 0
else:
robl, no_robl = self.do_rob(cur.left)
robr, no_robr = self.do_rob(cur.right)
rob_cur, no_rob_cur = cur.val + no_robl + no_robr, max(robl, no_robl) + max(robr, no_robr)
return rob_cur, no_rob_cur
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.do_rob(root))
|
|
a5dda3d167d460fd60defe2debc0460d577c008d
|
src/ifd.blog/ifd/blog/subjects.py
|
src/ifd.blog/ifd/blog/subjects.py
|
# -*- coding: UTF-8 -*-
from collective.z3cform.widgets.token_input_widget import TokenInputFieldWidget
from plone.app.dexterity.behaviors.metadata import ICategorization
from plone.app.z3cform.interfaces import IPloneFormLayer
from z3c.form.interfaces import IFieldWidget
from z3c.form.util import getSpecification
from z3c.form.widget import FieldWidget
from zope.component import adapter
from zope.interface import implementer
@adapter(getSpecification(ICategorization['subjects']), IPloneFormLayer)
@implementer(IFieldWidget)
def SubjectsFieldWidget(field, request):
widget = FieldWidget(field, TokenInputFieldWidget(field, request))
return widget
|
Add categorization behavior adaptor for better widget
|
Add categorization behavior adaptor for better widget
|
Python
|
mit
|
potzenheimer/buildout.ifd,potzenheimer/buildout.ifd
|
Add categorization behavior adaptor for better widget
|
# -*- coding: UTF-8 -*-
from collective.z3cform.widgets.token_input_widget import TokenInputFieldWidget
from plone.app.dexterity.behaviors.metadata import ICategorization
from plone.app.z3cform.interfaces import IPloneFormLayer
from z3c.form.interfaces import IFieldWidget
from z3c.form.util import getSpecification
from z3c.form.widget import FieldWidget
from zope.component import adapter
from zope.interface import implementer
@adapter(getSpecification(ICategorization['subjects']), IPloneFormLayer)
@implementer(IFieldWidget)
def SubjectsFieldWidget(field, request):
widget = FieldWidget(field, TokenInputFieldWidget(field, request))
return widget
|
<commit_before><commit_msg>Add categorization behavior adaptor for better widget<commit_after>
|
# -*- coding: UTF-8 -*-
from collective.z3cform.widgets.token_input_widget import TokenInputFieldWidget
from plone.app.dexterity.behaviors.metadata import ICategorization
from plone.app.z3cform.interfaces import IPloneFormLayer
from z3c.form.interfaces import IFieldWidget
from z3c.form.util import getSpecification
from z3c.form.widget import FieldWidget
from zope.component import adapter
from zope.interface import implementer
@adapter(getSpecification(ICategorization['subjects']), IPloneFormLayer)
@implementer(IFieldWidget)
def SubjectsFieldWidget(field, request):
widget = FieldWidget(field, TokenInputFieldWidget(field, request))
return widget
|
Add categorization behavior adaptor for better widget# -*- coding: UTF-8 -*-
from collective.z3cform.widgets.token_input_widget import TokenInputFieldWidget
from plone.app.dexterity.behaviors.metadata import ICategorization
from plone.app.z3cform.interfaces import IPloneFormLayer
from z3c.form.interfaces import IFieldWidget
from z3c.form.util import getSpecification
from z3c.form.widget import FieldWidget
from zope.component import adapter
from zope.interface import implementer
@adapter(getSpecification(ICategorization['subjects']), IPloneFormLayer)
@implementer(IFieldWidget)
def SubjectsFieldWidget(field, request):
widget = FieldWidget(field, TokenInputFieldWidget(field, request))
return widget
|
<commit_before><commit_msg>Add categorization behavior adaptor for better widget<commit_after># -*- coding: UTF-8 -*-
from collective.z3cform.widgets.token_input_widget import TokenInputFieldWidget
from plone.app.dexterity.behaviors.metadata import ICategorization
from plone.app.z3cform.interfaces import IPloneFormLayer
from z3c.form.interfaces import IFieldWidget
from z3c.form.util import getSpecification
from z3c.form.widget import FieldWidget
from zope.component import adapter
from zope.interface import implementer
@adapter(getSpecification(ICategorization['subjects']), IPloneFormLayer)
@implementer(IFieldWidget)
def SubjectsFieldWidget(field, request):
widget = FieldWidget(field, TokenInputFieldWidget(field, request))
return widget
|
|
e947e9b0a5c3804d50bc6a602433861cca10debb
|
rwt/tests/test_deps.py
|
rwt/tests/test_deps.py
|
import pytest
import pkg_resources
from rwt import deps
@pytest.mark.xfail(reason="Technique fails to load entry points")
def test_entry_points():
"""
Ensure entry points are visible after making packages visible
"""
with deps.on_sys_path('jaraco.mongodb'):
eps = pkg_resources.iter_entry_points('pytest11')
assert list(eps), "Entry points not found"
|
Add test capturing expectation that entry points will be visible after installing packages.
|
Add test capturing expectation that entry points will be visible after installing packages.
|
Python
|
mit
|
jaraco/rwt
|
Add test capturing expectation that entry points will be visible after installing packages.
|
import pytest
import pkg_resources
from rwt import deps
@pytest.mark.xfail(reason="Technique fails to load entry points")
def test_entry_points():
"""
Ensure entry points are visible after making packages visible
"""
with deps.on_sys_path('jaraco.mongodb'):
eps = pkg_resources.iter_entry_points('pytest11')
assert list(eps), "Entry points not found"
|
<commit_before><commit_msg>Add test capturing expectation that entry points will be visible after installing packages.<commit_after>
|
import pytest
import pkg_resources
from rwt import deps
@pytest.mark.xfail(reason="Technique fails to load entry points")
def test_entry_points():
"""
Ensure entry points are visible after making packages visible
"""
with deps.on_sys_path('jaraco.mongodb'):
eps = pkg_resources.iter_entry_points('pytest11')
assert list(eps), "Entry points not found"
|
Add test capturing expectation that entry points will be visible after installing packages.import pytest
import pkg_resources
from rwt import deps
@pytest.mark.xfail(reason="Technique fails to load entry points")
def test_entry_points():
"""
Ensure entry points are visible after making packages visible
"""
with deps.on_sys_path('jaraco.mongodb'):
eps = pkg_resources.iter_entry_points('pytest11')
assert list(eps), "Entry points not found"
|
<commit_before><commit_msg>Add test capturing expectation that entry points will be visible after installing packages.<commit_after>import pytest
import pkg_resources
from rwt import deps
@pytest.mark.xfail(reason="Technique fails to load entry points")
def test_entry_points():
"""
Ensure entry points are visible after making packages visible
"""
with deps.on_sys_path('jaraco.mongodb'):
eps = pkg_resources.iter_entry_points('pytest11')
assert list(eps), "Entry points not found"
|
|
769f982d58c75e4c3f07d68d93a3dd549d548efa
|
scripts/get-instances.py
|
scripts/get-instances.py
|
import sys
from boto import ec2
def get_instances(conn, environment):
for reservation in conn.get_all_reservations():
env_suffix = '-{}'.format(environment)
if reservation.instances[0].tags['Name'].endswith(env_suffix):
yield reservation.instances[0]
if __name__ == '__main__':
environment = sys.argv[1]
conn = ec2.connect_to_region('eu-west-1')
for instance in get_instances(conn, environment):
print(','.join([
instance.id, instance.ip_address, instance.tags['Name']
]))
|
Add a script to list instances
|
Add a script to list instances
This is a quick and dirty script to list out instances from an
environment.
|
Python
|
mit
|
alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws
|
Add a script to list instances
This is a quick and dirty script to list out instances from an
environment.
|
import sys
from boto import ec2
def get_instances(conn, environment):
for reservation in conn.get_all_reservations():
env_suffix = '-{}'.format(environment)
if reservation.instances[0].tags['Name'].endswith(env_suffix):
yield reservation.instances[0]
if __name__ == '__main__':
environment = sys.argv[1]
conn = ec2.connect_to_region('eu-west-1')
for instance in get_instances(conn, environment):
print(','.join([
instance.id, instance.ip_address, instance.tags['Name']
]))
|
<commit_before><commit_msg>Add a script to list instances
This is a quick and dirty script to list out instances from an
environment.<commit_after>
|
import sys
from boto import ec2
def get_instances(conn, environment):
for reservation in conn.get_all_reservations():
env_suffix = '-{}'.format(environment)
if reservation.instances[0].tags['Name'].endswith(env_suffix):
yield reservation.instances[0]
if __name__ == '__main__':
environment = sys.argv[1]
conn = ec2.connect_to_region('eu-west-1')
for instance in get_instances(conn, environment):
print(','.join([
instance.id, instance.ip_address, instance.tags['Name']
]))
|
Add a script to list instances
This is a quick and dirty script to list out instances from an
environment.import sys
from boto import ec2
def get_instances(conn, environment):
for reservation in conn.get_all_reservations():
env_suffix = '-{}'.format(environment)
if reservation.instances[0].tags['Name'].endswith(env_suffix):
yield reservation.instances[0]
if __name__ == '__main__':
environment = sys.argv[1]
conn = ec2.connect_to_region('eu-west-1')
for instance in get_instances(conn, environment):
print(','.join([
instance.id, instance.ip_address, instance.tags['Name']
]))
|
<commit_before><commit_msg>Add a script to list instances
This is a quick and dirty script to list out instances from an
environment.<commit_after>import sys
from boto import ec2
def get_instances(conn, environment):
for reservation in conn.get_all_reservations():
env_suffix = '-{}'.format(environment)
if reservation.instances[0].tags['Name'].endswith(env_suffix):
yield reservation.instances[0]
if __name__ == '__main__':
environment = sys.argv[1]
conn = ec2.connect_to_region('eu-west-1')
for instance in get_instances(conn, environment):
print(','.join([
instance.id, instance.ip_address, instance.tags['Name']
]))
|
|
046bacdc8e7a92785f12bf8e3b3a6c698df3f86f
|
bin/benchmark_embed.py
|
bin/benchmark_embed.py
|
import nltk
import plac
import os
from os import path
import io
import gzip
from collections import defaultdict
import cProfile
import pstats
from thinc.neural.eeap import Embed
from thinc.neural.eeap import NumpyOps
def iter_files(giga_dir):
i = 0
for subdir in os.listdir(giga_dir):
if not path.isdir(path.join(giga_dir, subdir)):
continue
for filename in os.listdir(path.join(giga_dir, subdir)):
if filename.endswith('gz'):
print(filename)
yield path.join(giga_dir, subdir, filename)
i += 1
if i >= 1:
break
break
def main(giga_dir):
ops = NumpyOps()
vectors = defaultdict(lambda: ops.allocate((300,)))
W = ops.allocate((200, 300))
embed = Embed(vectors=vectors, W=W, ops=ops)
nr_word = 0
for loc in iter_files(giga_dir):
with gzip.open(loc, 'r') as file_:
text = file_.read()
words = text.split()
vectors = embed.predict_batch(words)
for word in words:
if word not in embed.vectors:
embed.vectors[word] = embed.ops.allocate((300,))
nr_word += len(words)
print(nr_word)
if __name__ == '__main__':
if 0:
plac.call(main)
else:
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
Add existing script to benchmark embed
|
Add existing script to benchmark embed
|
Python
|
mit
|
explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc
|
Add existing script to benchmark embed
|
import nltk
import plac
import os
from os import path
import io
import gzip
from collections import defaultdict
import cProfile
import pstats
from thinc.neural.eeap import Embed
from thinc.neural.eeap import NumpyOps
def iter_files(giga_dir):
i = 0
for subdir in os.listdir(giga_dir):
if not path.isdir(path.join(giga_dir, subdir)):
continue
for filename in os.listdir(path.join(giga_dir, subdir)):
if filename.endswith('gz'):
print(filename)
yield path.join(giga_dir, subdir, filename)
i += 1
if i >= 1:
break
break
def main(giga_dir):
ops = NumpyOps()
vectors = defaultdict(lambda: ops.allocate((300,)))
W = ops.allocate((200, 300))
embed = Embed(vectors=vectors, W=W, ops=ops)
nr_word = 0
for loc in iter_files(giga_dir):
with gzip.open(loc, 'r') as file_:
text = file_.read()
words = text.split()
vectors = embed.predict_batch(words)
for word in words:
if word not in embed.vectors:
embed.vectors[word] = embed.ops.allocate((300,))
nr_word += len(words)
print(nr_word)
if __name__ == '__main__':
if 0:
plac.call(main)
else:
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
<commit_before><commit_msg>Add existing script to benchmark embed<commit_after>
|
import nltk
import plac
import os
from os import path
import io
import gzip
from collections import defaultdict
import cProfile
import pstats
from thinc.neural.eeap import Embed
from thinc.neural.eeap import NumpyOps
def iter_files(giga_dir):
i = 0
for subdir in os.listdir(giga_dir):
if not path.isdir(path.join(giga_dir, subdir)):
continue
for filename in os.listdir(path.join(giga_dir, subdir)):
if filename.endswith('gz'):
print(filename)
yield path.join(giga_dir, subdir, filename)
i += 1
if i >= 1:
break
break
def main(giga_dir):
ops = NumpyOps()
vectors = defaultdict(lambda: ops.allocate((300,)))
W = ops.allocate((200, 300))
embed = Embed(vectors=vectors, W=W, ops=ops)
nr_word = 0
for loc in iter_files(giga_dir):
with gzip.open(loc, 'r') as file_:
text = file_.read()
words = text.split()
vectors = embed.predict_batch(words)
for word in words:
if word not in embed.vectors:
embed.vectors[word] = embed.ops.allocate((300,))
nr_word += len(words)
print(nr_word)
if __name__ == '__main__':
if 0:
plac.call(main)
else:
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
Add existing script to benchmark embedimport nltk
import plac
import os
from os import path
import io
import gzip
from collections import defaultdict
import cProfile
import pstats
from thinc.neural.eeap import Embed
from thinc.neural.eeap import NumpyOps
def iter_files(giga_dir):
i = 0
for subdir in os.listdir(giga_dir):
if not path.isdir(path.join(giga_dir, subdir)):
continue
for filename in os.listdir(path.join(giga_dir, subdir)):
if filename.endswith('gz'):
print(filename)
yield path.join(giga_dir, subdir, filename)
i += 1
if i >= 1:
break
break
def main(giga_dir):
ops = NumpyOps()
vectors = defaultdict(lambda: ops.allocate((300,)))
W = ops.allocate((200, 300))
embed = Embed(vectors=vectors, W=W, ops=ops)
nr_word = 0
for loc in iter_files(giga_dir):
with gzip.open(loc, 'r') as file_:
text = file_.read()
words = text.split()
vectors = embed.predict_batch(words)
for word in words:
if word not in embed.vectors:
embed.vectors[word] = embed.ops.allocate((300,))
nr_word += len(words)
print(nr_word)
if __name__ == '__main__':
if 0:
plac.call(main)
else:
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
<commit_before><commit_msg>Add existing script to benchmark embed<commit_after>import nltk
import plac
import os
from os import path
import io
import gzip
from collections import defaultdict
import cProfile
import pstats
from thinc.neural.eeap import Embed
from thinc.neural.eeap import NumpyOps
def iter_files(giga_dir):
i = 0
for subdir in os.listdir(giga_dir):
if not path.isdir(path.join(giga_dir, subdir)):
continue
for filename in os.listdir(path.join(giga_dir, subdir)):
if filename.endswith('gz'):
print(filename)
yield path.join(giga_dir, subdir, filename)
i += 1
if i >= 1:
break
break
def main(giga_dir):
ops = NumpyOps()
vectors = defaultdict(lambda: ops.allocate((300,)))
W = ops.allocate((200, 300))
embed = Embed(vectors=vectors, W=W, ops=ops)
nr_word = 0
for loc in iter_files(giga_dir):
with gzip.open(loc, 'r') as file_:
text = file_.read()
words = text.split()
vectors = embed.predict_batch(words)
for word in words:
if word not in embed.vectors:
embed.vectors[word] = embed.ops.allocate((300,))
nr_word += len(words)
print(nr_word)
if __name__ == '__main__':
if 0:
plac.call(main)
else:
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
|
a68760976cfbfa276b16ed465d6312783407dc8c
|
examples/flask_context.py
|
examples/flask_context.py
|
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def show_users():
with db.app.app_context():
print(User.query.all())
class Config(object):
JOBS = [
{
'id': 'job1',
'func': show_users,
'trigger': 'interval',
'seconds': 2
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
db.app = app
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
|
Add example to show how to get a flask context within a task.
|
Add example to show how to get a flask context within a task.
|
Python
|
apache-2.0
|
viniciuschiele/flask-apscheduler
|
Add example to show how to get a flask context within a task.
|
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def show_users():
with db.app.app_context():
print(User.query.all())
class Config(object):
JOBS = [
{
'id': 'job1',
'func': show_users,
'trigger': 'interval',
'seconds': 2
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
db.app = app
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
|
<commit_before><commit_msg>Add example to show how to get a flask context within a task.<commit_after>
|
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def show_users():
with db.app.app_context():
print(User.query.all())
class Config(object):
JOBS = [
{
'id': 'job1',
'func': show_users,
'trigger': 'interval',
'seconds': 2
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
db.app = app
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
|
Add example to show how to get a flask context within a task.from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def show_users():
with db.app.app_context():
print(User.query.all())
class Config(object):
JOBS = [
{
'id': 'job1',
'func': show_users,
'trigger': 'interval',
'seconds': 2
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
db.app = app
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
|
<commit_before><commit_msg>Add example to show how to get a flask context within a task.<commit_after>from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask
from flask_apscheduler import APScheduler
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def show_users():
with db.app.app_context():
print(User.query.all())
class Config(object):
JOBS = [
{
'id': 'job1',
'func': show_users,
'trigger': 'interval',
'seconds': 2
}
]
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url='sqlite:///flask_context.db')
}
SCHEDULER_API_ENABLED = True
if __name__ == '__main__':
app = Flask(__name__)
app.config.from_object(Config())
db.app = app
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run()
|
|
f9b4544b359c48be3bcae9fd2d6e3a99c8a18e44
|
t3f/shapes_test.py
|
t3f/shapes_test.py
|
import tensorflow as tf
from t3f import initializers
from t3f import shapes
class ShapesTest(tf.test.TestCase):
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape], batch_size=5)
self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor))
if __name__ == "__main__":
tf.test.main()
|
Test lazy shape as well
|
Test lazy shape as well
|
Python
|
mit
|
Bihaqo/t3f
|
Test lazy shape as well
|
import tensorflow as tf
from t3f import initializers
from t3f import shapes
class ShapesTest(tf.test.TestCase):
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape], batch_size=5)
self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor))
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Test lazy shape as well<commit_after>
|
import tensorflow as tf
from t3f import initializers
from t3f import shapes
class ShapesTest(tf.test.TestCase):
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape], batch_size=5)
self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor))
if __name__ == "__main__":
tf.test.main()
|
Test lazy shape as wellimport tensorflow as tf
from t3f import initializers
from t3f import shapes
class ShapesTest(tf.test.TestCase):
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape], batch_size=5)
self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor))
if __name__ == "__main__":
tf.test.main()
|
<commit_before><commit_msg>Test lazy shape as well<commit_after>import tensorflow as tf
from t3f import initializers
from t3f import shapes
class ShapesTest(tf.test.TestCase):
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape], batch_size=5)
self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor))
if __name__ == "__main__":
tf.test.main()
|
|
561d9db2693c4bd63dd8fce32192f43d92a67b36
|
job-logs/python/check_log.py
|
job-logs/python/check_log.py
|
import sys
import argparse
import csv
def examine_log(filename, save_raw=False):
"""
Download job log files from Amazon EC2 machines
parameters:
filename - beginning date to start downloading from
work_directory - directory to download files to
"""
input_file =- open(filename, 'r')
csv_input = csv.reader(input_file)
error = 0
for row in csv_input:
if len(row) != 87:
error += 1
print error
sys.stderr.write("{0} lines skipped due to errors".format(error_lines))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process ATLAS job records')
parser.add_argument('--filename', dest='filename', default=None,
help='filename of input file')
parser.add_argument('--save-raw', dest='save_raw',
action='store_true',
help='Save raw log files instead of replacing in place')
args = parser.parse_args(sys.argv[1:])
examine_log(args.filename, args.save_raw)
|
Add script for checking csv files
|
Add script for checking csv files
|
Python
|
apache-2.0
|
DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs
|
Add script for checking csv files
|
import sys
import argparse
import csv
def examine_log(filename, save_raw=False):
"""
Download job log files from Amazon EC2 machines
parameters:
filename - beginning date to start downloading from
work_directory - directory to download files to
"""
input_file =- open(filename, 'r')
csv_input = csv.reader(input_file)
error = 0
for row in csv_input:
if len(row) != 87:
error += 1
print error
sys.stderr.write("{0} lines skipped due to errors".format(error_lines))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process ATLAS job records')
parser.add_argument('--filename', dest='filename', default=None,
help='filename of input file')
parser.add_argument('--save-raw', dest='save_raw',
action='store_true',
help='Save raw log files instead of replacing in place')
args = parser.parse_args(sys.argv[1:])
examine_log(args.filename, args.save_raw)
|
<commit_before><commit_msg>Add script for checking csv files<commit_after>
|
import sys
import argparse
import csv
def examine_log(filename, save_raw=False):
"""
Download job log files from Amazon EC2 machines
parameters:
filename - beginning date to start downloading from
work_directory - directory to download files to
"""
input_file =- open(filename, 'r')
csv_input = csv.reader(input_file)
error = 0
for row in csv_input:
if len(row) != 87:
error += 1
print error
sys.stderr.write("{0} lines skipped due to errors".format(error_lines))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process ATLAS job records')
parser.add_argument('--filename', dest='filename', default=None,
help='filename of input file')
parser.add_argument('--save-raw', dest='save_raw',
action='store_true',
help='Save raw log files instead of replacing in place')
args = parser.parse_args(sys.argv[1:])
examine_log(args.filename, args.save_raw)
|
Add script for checking csv filesimport sys
import argparse
import csv
def examine_log(filename, save_raw=False):
"""
Download job log files from Amazon EC2 machines
parameters:
filename - beginning date to start downloading from
work_directory - directory to download files to
"""
input_file =- open(filename, 'r')
csv_input = csv.reader(input_file)
error = 0
for row in csv_input:
if len(row) != 87:
error += 1
print error
sys.stderr.write("{0} lines skipped due to errors".format(error_lines))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process ATLAS job records')
parser.add_argument('--filename', dest='filename', default=None,
help='filename of input file')
parser.add_argument('--save-raw', dest='save_raw',
action='store_true',
help='Save raw log files instead of replacing in place')
args = parser.parse_args(sys.argv[1:])
examine_log(args.filename, args.save_raw)
|
<commit_before><commit_msg>Add script for checking csv files<commit_after>import sys
import argparse
import csv
def examine_log(filename, save_raw=False):
"""
Download job log files from Amazon EC2 machines
parameters:
filename - beginning date to start downloading from
work_directory - directory to download files to
"""
input_file =- open(filename, 'r')
csv_input = csv.reader(input_file)
error = 0
for row in csv_input:
if len(row) != 87:
error += 1
print error
sys.stderr.write("{0} lines skipped due to errors".format(error_lines))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process ATLAS job records')
parser.add_argument('--filename', dest='filename', default=None,
help='filename of input file')
parser.add_argument('--save-raw', dest='save_raw',
action='store_true',
help='Save raw log files instead of replacing in place')
args = parser.parse_args(sys.argv[1:])
examine_log(args.filename, args.save_raw)
|
|
567e3c57762e13e0d43138940bc0b8d4cc15b08b
|
tests/unit/dataactcore/test_models_userModel.py
|
tests/unit/dataactcore/test_models_userModel.py
|
from dataactcore.models.domainModels import CGAC
from dataactcore.models.lookups import PERMISSION_TYPE_DICT
from dataactcore.models.userModel import User, UserAffiliation
from tests.unit.dataactcore.factories.domain import CGACFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_user_affiliation_fks(database, user_constants):
sess = database.session
users = [UserFactory() for _ in range(3)]
cgacs = [CGACFactory() for _ in range(6)]
permission = PERMISSION_TYPE_DICT['reader']
for idx, user in enumerate(users):
user.affiliations = [
UserAffiliation(cgac=cgacs[idx*2], permission_type_id=permission),
UserAffiliation(cgac=cgacs[idx*2+1], permission_type_id=permission),
]
sess.add_all(users)
sess.commit()
assert sess.query(UserAffiliation).count() == 6
# Deleting a user also deletes the affiliations
sess.delete(users[0])
sess.commit()
assert sess.query(UserAffiliation).count() == 4
# Deleting a CGAC also deletes the affiliations
sess.delete(cgacs[2])
sess.commit()
assert sess.query(UserAffiliation).count() == 3
assert len(users[1].affiliations) == 1
assert users[1].affiliations[0].cgac == cgacs[3]
# Deleting an affiliation doesn't delete the user or CGAC
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 3
sess.delete(users[2].affiliations[0])
sess.commit()
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 2
|
Add FK tests for user affiliations
|
Add FK tests for user affiliations
cc @nmonga91
|
Python
|
cc0-1.0
|
fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend
|
Add FK tests for user affiliations
cc @nmonga91
|
from dataactcore.models.domainModels import CGAC
from dataactcore.models.lookups import PERMISSION_TYPE_DICT
from dataactcore.models.userModel import User, UserAffiliation
from tests.unit.dataactcore.factories.domain import CGACFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_user_affiliation_fks(database, user_constants):
sess = database.session
users = [UserFactory() for _ in range(3)]
cgacs = [CGACFactory() for _ in range(6)]
permission = PERMISSION_TYPE_DICT['reader']
for idx, user in enumerate(users):
user.affiliations = [
UserAffiliation(cgac=cgacs[idx*2], permission_type_id=permission),
UserAffiliation(cgac=cgacs[idx*2+1], permission_type_id=permission),
]
sess.add_all(users)
sess.commit()
assert sess.query(UserAffiliation).count() == 6
# Deleting a user also deletes the affiliations
sess.delete(users[0])
sess.commit()
assert sess.query(UserAffiliation).count() == 4
# Deleting a CGAC also deletes the affiliations
sess.delete(cgacs[2])
sess.commit()
assert sess.query(UserAffiliation).count() == 3
assert len(users[1].affiliations) == 1
assert users[1].affiliations[0].cgac == cgacs[3]
# Deleting an affiliation doesn't delete the user or CGAC
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 3
sess.delete(users[2].affiliations[0])
sess.commit()
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 2
|
<commit_before><commit_msg>Add FK tests for user affiliations
cc @nmonga91<commit_after>
|
from dataactcore.models.domainModels import CGAC
from dataactcore.models.lookups import PERMISSION_TYPE_DICT
from dataactcore.models.userModel import User, UserAffiliation
from tests.unit.dataactcore.factories.domain import CGACFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_user_affiliation_fks(database, user_constants):
sess = database.session
users = [UserFactory() for _ in range(3)]
cgacs = [CGACFactory() for _ in range(6)]
permission = PERMISSION_TYPE_DICT['reader']
for idx, user in enumerate(users):
user.affiliations = [
UserAffiliation(cgac=cgacs[idx*2], permission_type_id=permission),
UserAffiliation(cgac=cgacs[idx*2+1], permission_type_id=permission),
]
sess.add_all(users)
sess.commit()
assert sess.query(UserAffiliation).count() == 6
# Deleting a user also deletes the affiliations
sess.delete(users[0])
sess.commit()
assert sess.query(UserAffiliation).count() == 4
# Deleting a CGAC also deletes the affiliations
sess.delete(cgacs[2])
sess.commit()
assert sess.query(UserAffiliation).count() == 3
assert len(users[1].affiliations) == 1
assert users[1].affiliations[0].cgac == cgacs[3]
# Deleting an affiliation doesn't delete the user or CGAC
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 3
sess.delete(users[2].affiliations[0])
sess.commit()
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 2
|
Add FK tests for user affiliations
cc @nmonga91from dataactcore.models.domainModels import CGAC
from dataactcore.models.lookups import PERMISSION_TYPE_DICT
from dataactcore.models.userModel import User, UserAffiliation
from tests.unit.dataactcore.factories.domain import CGACFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_user_affiliation_fks(database, user_constants):
sess = database.session
users = [UserFactory() for _ in range(3)]
cgacs = [CGACFactory() for _ in range(6)]
permission = PERMISSION_TYPE_DICT['reader']
for idx, user in enumerate(users):
user.affiliations = [
UserAffiliation(cgac=cgacs[idx*2], permission_type_id=permission),
UserAffiliation(cgac=cgacs[idx*2+1], permission_type_id=permission),
]
sess.add_all(users)
sess.commit()
assert sess.query(UserAffiliation).count() == 6
# Deleting a user also deletes the affiliations
sess.delete(users[0])
sess.commit()
assert sess.query(UserAffiliation).count() == 4
# Deleting a CGAC also deletes the affiliations
sess.delete(cgacs[2])
sess.commit()
assert sess.query(UserAffiliation).count() == 3
assert len(users[1].affiliations) == 1
assert users[1].affiliations[0].cgac == cgacs[3]
# Deleting an affiliation doesn't delete the user or CGAC
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 3
sess.delete(users[2].affiliations[0])
sess.commit()
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 2
|
<commit_before><commit_msg>Add FK tests for user affiliations
cc @nmonga91<commit_after>from dataactcore.models.domainModels import CGAC
from dataactcore.models.lookups import PERMISSION_TYPE_DICT
from dataactcore.models.userModel import User, UserAffiliation
from tests.unit.dataactcore.factories.domain import CGACFactory
from tests.unit.dataactcore.factories.user import UserFactory
def test_user_affiliation_fks(database, user_constants):
sess = database.session
users = [UserFactory() for _ in range(3)]
cgacs = [CGACFactory() for _ in range(6)]
permission = PERMISSION_TYPE_DICT['reader']
for idx, user in enumerate(users):
user.affiliations = [
UserAffiliation(cgac=cgacs[idx*2], permission_type_id=permission),
UserAffiliation(cgac=cgacs[idx*2+1], permission_type_id=permission),
]
sess.add_all(users)
sess.commit()
assert sess.query(UserAffiliation).count() == 6
# Deleting a user also deletes the affiliations
sess.delete(users[0])
sess.commit()
assert sess.query(UserAffiliation).count() == 4
# Deleting a CGAC also deletes the affiliations
sess.delete(cgacs[2])
sess.commit()
assert sess.query(UserAffiliation).count() == 3
assert len(users[1].affiliations) == 1
assert users[1].affiliations[0].cgac == cgacs[3]
# Deleting an affiliation doesn't delete the user or CGAC
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 3
sess.delete(users[2].affiliations[0])
sess.commit()
assert sess.query(User).count() == 2
assert sess.query(CGAC).count() == 5
assert sess.query(UserAffiliation).count() == 2
|
|
ab08b50774170f4d3df6cfb58c447878ff646465
|
create-vm-opensteak.py
|
create-vm-opensteak.py
|
#!/usr/bin/python
import os
import pprint
import novaclient.v1_1.client as novaclient
pp = pprint.PrettyPrinter(indent=4)
def p(value):
"""Shortcut for pretty printing"""
pp.pprint(value)
def print_title(title):
"""Print title of things"""
print "\n"+"#"*32+"\n# "+title+"\n"+"#"*32+"\n"
def get_creds():
"""Retrieve creds from environment"""
d = {}
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def main():
creds = get_creds()
nova = novaclient.Client(**creds)
#print_title("servers list")
#servers = nova.servers.list()
#p(servers)
#sprout = nova.servers.find(name='Sprout')
#p(sprout.__dict__)
f = nova.flavors.find(name = 'instantserver-small')
i = nova.images.find(name = 'Ubuntu 14.10 64b')
n = nova.networks.find(label = 'instantserver')
k = "idrsa-sansmotdepasse"
u = "#cloud-config\npassword: moutarde\nchpasswd: { expire: False }\nssh_pwauth: True"
new_server = nova.servers.create(
name = "instantserver-1",
flavor = f,
image = i,
nics = [{"net-id": n.id}],
key_name = k,
userdata = u,
security_groups = ["Tout-Autorise-Entrant-Sortant"]
)
if __name__ == '__main__':
main()
|
Add back create vm on opensteak
|
Add back create vm on opensteak
|
Python
|
apache-2.0
|
arnaudmorin/instantserver,arnaudmorin/instantserver,arnaudmorin/instantserver
|
Add back create vm on opensteak
|
#!/usr/bin/python
import os
import pprint
import novaclient.v1_1.client as novaclient
pp = pprint.PrettyPrinter(indent=4)
def p(value):
"""Shortcut for pretty printing"""
pp.pprint(value)
def print_title(title):
"""Print title of things"""
print "\n"+"#"*32+"\n# "+title+"\n"+"#"*32+"\n"
def get_creds():
"""Retrieve creds from environment"""
d = {}
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def main():
creds = get_creds()
nova = novaclient.Client(**creds)
#print_title("servers list")
#servers = nova.servers.list()
#p(servers)
#sprout = nova.servers.find(name='Sprout')
#p(sprout.__dict__)
f = nova.flavors.find(name = 'instantserver-small')
i = nova.images.find(name = 'Ubuntu 14.10 64b')
n = nova.networks.find(label = 'instantserver')
k = "idrsa-sansmotdepasse"
u = "#cloud-config\npassword: moutarde\nchpasswd: { expire: False }\nssh_pwauth: True"
new_server = nova.servers.create(
name = "instantserver-1",
flavor = f,
image = i,
nics = [{"net-id": n.id}],
key_name = k,
userdata = u,
security_groups = ["Tout-Autorise-Entrant-Sortant"]
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add back create vm on opensteak<commit_after>
|
#!/usr/bin/python
import os
import pprint
import novaclient.v1_1.client as novaclient
pp = pprint.PrettyPrinter(indent=4)
def p(value):
"""Shortcut for pretty printing"""
pp.pprint(value)
def print_title(title):
"""Print title of things"""
print "\n"+"#"*32+"\n# "+title+"\n"+"#"*32+"\n"
def get_creds():
"""Retrieve creds from environment"""
d = {}
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def main():
creds = get_creds()
nova = novaclient.Client(**creds)
#print_title("servers list")
#servers = nova.servers.list()
#p(servers)
#sprout = nova.servers.find(name='Sprout')
#p(sprout.__dict__)
f = nova.flavors.find(name = 'instantserver-small')
i = nova.images.find(name = 'Ubuntu 14.10 64b')
n = nova.networks.find(label = 'instantserver')
k = "idrsa-sansmotdepasse"
u = "#cloud-config\npassword: moutarde\nchpasswd: { expire: False }\nssh_pwauth: True"
new_server = nova.servers.create(
name = "instantserver-1",
flavor = f,
image = i,
nics = [{"net-id": n.id}],
key_name = k,
userdata = u,
security_groups = ["Tout-Autorise-Entrant-Sortant"]
)
if __name__ == '__main__':
main()
|
Add back create vm on opensteak#!/usr/bin/python
import os
import pprint
import novaclient.v1_1.client as novaclient
pp = pprint.PrettyPrinter(indent=4)
def p(value):
"""Shortcut for pretty printing"""
pp.pprint(value)
def print_title(title):
"""Print title of things"""
print "\n"+"#"*32+"\n# "+title+"\n"+"#"*32+"\n"
def get_creds():
"""Retrieve creds from environment"""
d = {}
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def main():
creds = get_creds()
nova = novaclient.Client(**creds)
#print_title("servers list")
#servers = nova.servers.list()
#p(servers)
#sprout = nova.servers.find(name='Sprout')
#p(sprout.__dict__)
f = nova.flavors.find(name = 'instantserver-small')
i = nova.images.find(name = 'Ubuntu 14.10 64b')
n = nova.networks.find(label = 'instantserver')
k = "idrsa-sansmotdepasse"
u = "#cloud-config\npassword: moutarde\nchpasswd: { expire: False }\nssh_pwauth: True"
new_server = nova.servers.create(
name = "instantserver-1",
flavor = f,
image = i,
nics = [{"net-id": n.id}],
key_name = k,
userdata = u,
security_groups = ["Tout-Autorise-Entrant-Sortant"]
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add back create vm on opensteak<commit_after>#!/usr/bin/python
import os
import pprint
import novaclient.v1_1.client as novaclient
pp = pprint.PrettyPrinter(indent=4)
def p(value):
"""Shortcut for pretty printing"""
pp.pprint(value)
def print_title(title):
"""Print title of things"""
print "\n"+"#"*32+"\n# "+title+"\n"+"#"*32+"\n"
def get_creds():
"""Retrieve creds from environment"""
d = {}
d['username'] = os.environ['OS_USERNAME']
d['api_key'] = os.environ['OS_PASSWORD']
d['auth_url'] = os.environ['OS_AUTH_URL']
d['project_id'] = os.environ['OS_TENANT_NAME']
return d
def main():
creds = get_creds()
nova = novaclient.Client(**creds)
#print_title("servers list")
#servers = nova.servers.list()
#p(servers)
#sprout = nova.servers.find(name='Sprout')
#p(sprout.__dict__)
f = nova.flavors.find(name = 'instantserver-small')
i = nova.images.find(name = 'Ubuntu 14.10 64b')
n = nova.networks.find(label = 'instantserver')
k = "idrsa-sansmotdepasse"
u = "#cloud-config\npassword: moutarde\nchpasswd: { expire: False }\nssh_pwauth: True"
new_server = nova.servers.create(
name = "instantserver-1",
flavor = f,
image = i,
nics = [{"net-id": n.id}],
key_name = k,
userdata = u,
security_groups = ["Tout-Autorise-Entrant-Sortant"]
)
if __name__ == '__main__':
main()
|
|
f6fce37d3121a27e9ddb0a78cc17926ac7062f94
|
osf/migrations/0142_auto_20181029_1701.py
|
osf/migrations/0142_auto_20181029_1701.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-29 17:01
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Subquery
from osf.models import NodeLog, Node
from django_bulk_update.helper import bulk_update
def untransfer_forked_date(state, schema):
"""
Reverse mig.
Revert the last logged date of nodes whose last log is forking to the previous log's date
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.order_by('-date')[1].date
bulk_update(nodes, update_fields=['last_logged'])
def transfer_forked_date(state, schema):
"""
If the most recent node log is forking, transfer that log's date to the node's last_logged field
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.first().date
bulk_update(nodes, update_fields=['last_logged'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
migrations.RunPython(
transfer_forked_date, untransfer_forked_date
),
]
|
Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.
|
Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.
|
Python
|
apache-2.0
|
mfraezz/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,mattclark/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,felliott/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,mfraezz/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,baylee-d/osf.io,adlius/osf.io,felliott/osf.io,aaxelb/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,adlius/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,adlius/osf.io,mattclark/osf.io,cslzchen/osf.io,cslzchen/osf.io,felliott/osf.io,adlius/osf.io,pattisdr/osf.io,saradbowman/osf.io
|
Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-29 17:01
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Subquery
from osf.models import NodeLog, Node
from django_bulk_update.helper import bulk_update
def untransfer_forked_date(state, schema):
"""
Reverse mig.
Revert the last logged date of nodes whose last log is forking to the previous log's date
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.order_by('-date')[1].date
bulk_update(nodes, update_fields=['last_logged'])
def transfer_forked_date(state, schema):
"""
If the most recent node log is forking, transfer that log's date to the node's last_logged field
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.first().date
bulk_update(nodes, update_fields=['last_logged'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
migrations.RunPython(
transfer_forked_date, untransfer_forked_date
),
]
|
<commit_before><commit_msg>Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-29 17:01
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Subquery
from osf.models import NodeLog, Node
from django_bulk_update.helper import bulk_update
def untransfer_forked_date(state, schema):
"""
Reverse mig.
Revert the last logged date of nodes whose last log is forking to the previous log's date
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.order_by('-date')[1].date
bulk_update(nodes, update_fields=['last_logged'])
def transfer_forked_date(state, schema):
"""
If the most recent node log is forking, transfer that log's date to the node's last_logged field
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.first().date
bulk_update(nodes, update_fields=['last_logged'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
migrations.RunPython(
transfer_forked_date, untransfer_forked_date
),
]
|
Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-29 17:01
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Subquery
from osf.models import NodeLog, Node
from django_bulk_update.helper import bulk_update
def untransfer_forked_date(state, schema):
"""
Reverse mig.
Revert the last logged date of nodes whose last log is forking to the previous log's date
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.order_by('-date')[1].date
bulk_update(nodes, update_fields=['last_logged'])
def transfer_forked_date(state, schema):
"""
If the most recent node log is forking, transfer that log's date to the node's last_logged field
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.first().date
bulk_update(nodes, update_fields=['last_logged'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
migrations.RunPython(
transfer_forked_date, untransfer_forked_date
),
]
|
<commit_before><commit_msg>Add a data migration to iterate over forks and find where the last log is a fork log and set the last_logged date on the node to the date of that log.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-29 17:01
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Subquery
from osf.models import NodeLog, Node
from django_bulk_update.helper import bulk_update
def untransfer_forked_date(state, schema):
"""
Reverse mig.
Revert the last logged date of nodes whose last log is forking to the previous log's date
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.order_by('-date')[1].date
bulk_update(nodes, update_fields=['last_logged'])
def transfer_forked_date(state, schema):
"""
If the most recent node log is forking, transfer that log's date to the node's last_logged field
"""
newest = NodeLog.objects.filter(node=OuterRef('pk')).order_by('-date')
nodes = Node.objects.annotate(latest_log=Subquery(newest.values('action')[:1])).filter(latest_log='node_forked')
for node in nodes:
node.last_logged = node.logs.first().date
bulk_update(nodes, update_fields=['last_logged'])
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_merge_20181023_1526'),
]
operations = [
migrations.RunPython(
transfer_forked_date, untransfer_forked_date
),
]
|
|
9e2f770c560f35cb67aac96596d92cd2d1cc1d30
|
examples/geoms.py
|
examples/geoms.py
|
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis([-2.0, 2.0, -1.5, 1.5])
pylab.axis('off')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
Add tutorial geometries, with plot
|
Add tutorial geometries, with plot
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@871 b426a367-1105-0410-b9ff-cdf4ab011145
|
Python
|
bsd-3-clause
|
abali96/Shapely,jdmcbr/Shapely,mouadino/Shapely,mindw/shapely,abali96/Shapely,jdmcbr/Shapely,mindw/shapely,mouadino/Shapely
|
Add tutorial geometries, with plot
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@871 b426a367-1105-0410-b9ff-cdf4ab011145
|
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis([-2.0, 2.0, -1.5, 1.5])
pylab.axis('off')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
<commit_before><commit_msg>Add tutorial geometries, with plot
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@871 b426a367-1105-0410-b9ff-cdf4ab011145<commit_after>
|
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis([-2.0, 2.0, -1.5, 1.5])
pylab.axis('off')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
Add tutorial geometries, with plot
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@871 b426a367-1105-0410-b9ff-cdf4ab011145
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis([-2.0, 2.0, -1.5, 1.5])
pylab.axis('off')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
<commit_before><commit_msg>Add tutorial geometries, with plot
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@871 b426a367-1105-0410-b9ff-cdf4ab011145<commit_after>
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis([-2.0, 2.0, -1.5, 1.5])
pylab.axis('off')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
|
84c4097caf0db678859252c58c1822d12d11c924
|
polly/plugins/publish/upload_avalon_asset.py
|
polly/plugins/publish/upload_avalon_asset.py
|
from pyblish import api
from avalon.api import Session
class UploadAvalonAsset(api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Upload"
order = api.IntegratorOrder + 0.1
depends = ["IntegrateAvalonAsset"]
optional = True
active = bool(Session.get("AVALON_UPLOAD"))
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
from avalon import api
from avalon.vendor import requests
# Dependencies
AVALON_LOCATION = api.Session["AVALON_LOCATION"]
AVALON_USERNAME = api.Session["AVALON_USERNAME"]
AVALON_PASSWORD = api.Session["AVALON_PASSWORD"]
for src in instance.data["output"]:
assert src.startswith(api.registered_root()), (
"Output didn't reside on root, this is a bug"
)
dst = src.replace(
api.registered_root(),
AVALON_LOCATION + "/upload"
).replace("\\", "/")
self.log.info("Uploading %s -> %s" % (src, dst))
auth = requests.auth.HTTPBasicAuth(
AVALON_USERNAME, AVALON_PASSWORD
)
with open(src) as f:
response = requests.put(
dst,
data=f,
auth=auth,
headers={"Content-Type": "application/octet-stream"}
)
if not response.ok:
raise Exception(response.text)
|
Implement automatic upload, enabled via AVALON_UPLOAD
|
Implement automatic upload, enabled via AVALON_UPLOAD
|
Python
|
mit
|
mindbender-studio/config
|
Implement automatic upload, enabled via AVALON_UPLOAD
|
from pyblish import api
from avalon.api import Session
class UploadAvalonAsset(api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Upload"
order = api.IntegratorOrder + 0.1
depends = ["IntegrateAvalonAsset"]
optional = True
active = bool(Session.get("AVALON_UPLOAD"))
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
from avalon import api
from avalon.vendor import requests
# Dependencies
AVALON_LOCATION = api.Session["AVALON_LOCATION"]
AVALON_USERNAME = api.Session["AVALON_USERNAME"]
AVALON_PASSWORD = api.Session["AVALON_PASSWORD"]
for src in instance.data["output"]:
assert src.startswith(api.registered_root()), (
"Output didn't reside on root, this is a bug"
)
dst = src.replace(
api.registered_root(),
AVALON_LOCATION + "/upload"
).replace("\\", "/")
self.log.info("Uploading %s -> %s" % (src, dst))
auth = requests.auth.HTTPBasicAuth(
AVALON_USERNAME, AVALON_PASSWORD
)
with open(src) as f:
response = requests.put(
dst,
data=f,
auth=auth,
headers={"Content-Type": "application/octet-stream"}
)
if not response.ok:
raise Exception(response.text)
|
<commit_before><commit_msg>Implement automatic upload, enabled via AVALON_UPLOAD<commit_after>
|
from pyblish import api
from avalon.api import Session
class UploadAvalonAsset(api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Upload"
order = api.IntegratorOrder + 0.1
depends = ["IntegrateAvalonAsset"]
optional = True
active = bool(Session.get("AVALON_UPLOAD"))
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
from avalon import api
from avalon.vendor import requests
# Dependencies
AVALON_LOCATION = api.Session["AVALON_LOCATION"]
AVALON_USERNAME = api.Session["AVALON_USERNAME"]
AVALON_PASSWORD = api.Session["AVALON_PASSWORD"]
for src in instance.data["output"]:
assert src.startswith(api.registered_root()), (
"Output didn't reside on root, this is a bug"
)
dst = src.replace(
api.registered_root(),
AVALON_LOCATION + "/upload"
).replace("\\", "/")
self.log.info("Uploading %s -> %s" % (src, dst))
auth = requests.auth.HTTPBasicAuth(
AVALON_USERNAME, AVALON_PASSWORD
)
with open(src) as f:
response = requests.put(
dst,
data=f,
auth=auth,
headers={"Content-Type": "application/octet-stream"}
)
if not response.ok:
raise Exception(response.text)
|
Implement automatic upload, enabled via AVALON_UPLOADfrom pyblish import api
from avalon.api import Session
class UploadAvalonAsset(api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Upload"
order = api.IntegratorOrder + 0.1
depends = ["IntegrateAvalonAsset"]
optional = True
active = bool(Session.get("AVALON_UPLOAD"))
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
from avalon import api
from avalon.vendor import requests
# Dependencies
AVALON_LOCATION = api.Session["AVALON_LOCATION"]
AVALON_USERNAME = api.Session["AVALON_USERNAME"]
AVALON_PASSWORD = api.Session["AVALON_PASSWORD"]
for src in instance.data["output"]:
assert src.startswith(api.registered_root()), (
"Output didn't reside on root, this is a bug"
)
dst = src.replace(
api.registered_root(),
AVALON_LOCATION + "/upload"
).replace("\\", "/")
self.log.info("Uploading %s -> %s" % (src, dst))
auth = requests.auth.HTTPBasicAuth(
AVALON_USERNAME, AVALON_PASSWORD
)
with open(src) as f:
response = requests.put(
dst,
data=f,
auth=auth,
headers={"Content-Type": "application/octet-stream"}
)
if not response.ok:
raise Exception(response.text)
|
<commit_before><commit_msg>Implement automatic upload, enabled via AVALON_UPLOAD<commit_after>from pyblish import api
from avalon.api import Session
class UploadAvalonAsset(api.InstancePlugin):
"""Write to files and metadata
This plug-in exposes your data to others by encapsulating it
into a new version.
"""
label = "Upload"
order = api.IntegratorOrder + 0.1
depends = ["IntegrateAvalonAsset"]
optional = True
active = bool(Session.get("AVALON_UPLOAD"))
families = [
"mindbender.model",
"mindbender.rig",
"mindbender.animation",
"mindbender.lookdev",
"mindbender.historyLookdev",
"mindbender.group",
"mindbender.imagesequence",
]
def process(self, instance):
from avalon import api
from avalon.vendor import requests
# Dependencies
AVALON_LOCATION = api.Session["AVALON_LOCATION"]
AVALON_USERNAME = api.Session["AVALON_USERNAME"]
AVALON_PASSWORD = api.Session["AVALON_PASSWORD"]
for src in instance.data["output"]:
assert src.startswith(api.registered_root()), (
"Output didn't reside on root, this is a bug"
)
dst = src.replace(
api.registered_root(),
AVALON_LOCATION + "/upload"
).replace("\\", "/")
self.log.info("Uploading %s -> %s" % (src, dst))
auth = requests.auth.HTTPBasicAuth(
AVALON_USERNAME, AVALON_PASSWORD
)
with open(src) as f:
response = requests.put(
dst,
data=f,
auth=auth,
headers={"Content-Type": "application/octet-stream"}
)
if not response.ok:
raise Exception(response.text)
|
|
ec31a66014c00a916eb49d78557e6ddb0c4dbb50
|
dakota_utils/tests/test_write.py
|
dakota_utils/tests/test_write.py
|
#!/usr/bin/env python
#
# Tests for dakota_utils.write.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import touch, remove
from dakota_utils.write import *
nonfile = 'fbwiBVBVFVBvVB.txt'
def setup_module():
print('Write tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_strip_interface_column_no_input():
'''
Tests for no input parameter to strip_interface_column().
'''
strip_interface_column()
@raises(IOError)
def test_strip_interface_column_file_does_not_exist():
'''
Tests for nonexistent input to strip_interface_column().
'''
strip_interface_column(nonfile)
|
Add unit tests for write module
|
Add unit tests for write module
|
Python
|
mit
|
mdpiper/dakota-experiments,mdpiper/dakota-experiments,mcflugen/dakota-experiments,mcflugen/dakota-experiments,mdpiper/dakota-experiments
|
Add unit tests for write module
|
#!/usr/bin/env python
#
# Tests for dakota_utils.write.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import touch, remove
from dakota_utils.write import *
nonfile = 'fbwiBVBVFVBvVB.txt'
def setup_module():
print('Write tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_strip_interface_column_no_input():
'''
Tests for no input parameter to strip_interface_column().
'''
strip_interface_column()
@raises(IOError)
def test_strip_interface_column_file_does_not_exist():
'''
Tests for nonexistent input to strip_interface_column().
'''
strip_interface_column(nonfile)
|
<commit_before><commit_msg>Add unit tests for write module<commit_after>
|
#!/usr/bin/env python
#
# Tests for dakota_utils.write.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import touch, remove
from dakota_utils.write import *
nonfile = 'fbwiBVBVFVBvVB.txt'
def setup_module():
print('Write tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_strip_interface_column_no_input():
'''
Tests for no input parameter to strip_interface_column().
'''
strip_interface_column()
@raises(IOError)
def test_strip_interface_column_file_does_not_exist():
'''
Tests for nonexistent input to strip_interface_column().
'''
strip_interface_column(nonfile)
|
Add unit tests for write module#!/usr/bin/env python
#
# Tests for dakota_utils.write.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import touch, remove
from dakota_utils.write import *
nonfile = 'fbwiBVBVFVBvVB.txt'
def setup_module():
print('Write tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_strip_interface_column_no_input():
'''
Tests for no input parameter to strip_interface_column().
'''
strip_interface_column()
@raises(IOError)
def test_strip_interface_column_file_does_not_exist():
'''
Tests for nonexistent input to strip_interface_column().
'''
strip_interface_column(nonfile)
|
<commit_before><commit_msg>Add unit tests for write module<commit_after>#!/usr/bin/env python
#
# Tests for dakota_utils.write.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import touch, remove
from dakota_utils.write import *
nonfile = 'fbwiBVBVFVBvVB.txt'
def setup_module():
print('Write tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_strip_interface_column_no_input():
'''
Tests for no input parameter to strip_interface_column().
'''
strip_interface_column()
@raises(IOError)
def test_strip_interface_column_file_does_not_exist():
'''
Tests for nonexistent input to strip_interface_column().
'''
strip_interface_column(nonfile)
|
|
5afb810c19923ff7edeb41ba084b1d7b85925840
|
markov/markov2.py
|
markov/markov2.py
|
#!python3
import string
import random
import time
import re
import sys
'''
This is an implementation of a markov chain used for text generation.
Just pass a file name as an argument and it should load it up, build a markov
chain with a state for each word(s), and start walking through the chain, writing
incoherent text to the terminal.
'''
asciiset = set(string.ascii_letters)
asciiset.add(' ')
asciiset.add('.')
def strip2ascii(txt):
return ''.join([ch for ch in txt if ch in asciiset])
def tokenize(fname):
'''
Generate tokens defined by
- Sequences of characters that aren't spaces
- Periods
For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.')
'''
with open(fname, 'r') as f:
for line in f:
stripped = strip2ascii(line)
for word in stripped.split():
if word[-1] == '.':
yield word[:-1]
yield '.'
else:
yield word
def buildtransitionmap(tokens, order):
dct = {}
prev = ('',)*order
for token in tokens:
if prev in dct:
dct[prev].append(token)
else:
dct[prev] = [token]
prev = prev[1:]+(token,)
return dct
def transition(word, transmap):
return random.choice(transmap[word])
def eternalramble(fname, order):
'''
Walk through the markov chain printing out words to the terminal one at a time
'''
transmap = buildtransitionmap(tokenize(fname), order)
prev = random.choice(list(transmap.keys()))
while True:
word = transition(prev, transmap)
print(word, end=' ')
prev = prev[1:]+(word,)
sys.stdout.flush()
time.sleep(0.25)
def printusage():
print('Usage: markov filename order')
print(' filename: the filename of the text to base the markov chain on.')
print(' order: how many consecutive words make up each state (2 works well)')
def launch():
if len(sys.argv) != 3:
printusage()
return
try:
order = int(sys.argv[2])
except:
printusage()
return
eternalramble(sys.argv[1], order)
if __name__ == '__main__':
launch()
|
Add second markov text generator v2 with order parameter
|
Add second markov text generator v2 with order parameter
|
Python
|
mit
|
tmerr/trevornet
|
Add second markov text generator v2 with order parameter
|
#!python3
import string
import random
import time
import re
import sys
'''
This is an implementation of a markov chain used for text generation.
Just pass a file name as an argument and it should load it up, build a markov
chain with a state for each word(s), and start walking through the chain, writing
incoherent text to the terminal.
'''
asciiset = set(string.ascii_letters)
asciiset.add(' ')
asciiset.add('.')
def strip2ascii(txt):
return ''.join([ch for ch in txt if ch in asciiset])
def tokenize(fname):
'''
Generate tokens defined by
- Sequences of characters that aren't spaces
- Periods
For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.')
'''
with open(fname, 'r') as f:
for line in f:
stripped = strip2ascii(line)
for word in stripped.split():
if word[-1] == '.':
yield word[:-1]
yield '.'
else:
yield word
def buildtransitionmap(tokens, order):
dct = {}
prev = ('',)*order
for token in tokens:
if prev in dct:
dct[prev].append(token)
else:
dct[prev] = [token]
prev = prev[1:]+(token,)
return dct
def transition(word, transmap):
return random.choice(transmap[word])
def eternalramble(fname, order):
'''
Walk through the markov chain printing out words to the terminal one at a time
'''
transmap = buildtransitionmap(tokenize(fname), order)
prev = random.choice(list(transmap.keys()))
while True:
word = transition(prev, transmap)
print(word, end=' ')
prev = prev[1:]+(word,)
sys.stdout.flush()
time.sleep(0.25)
def printusage():
print('Usage: markov filename order')
print(' filename: the filename of the text to base the markov chain on.')
print(' order: how many consecutive words make up each state (2 works well)')
def launch():
if len(sys.argv) != 3:
printusage()
return
try:
order = int(sys.argv[2])
except:
printusage()
return
eternalramble(sys.argv[1], order)
if __name__ == '__main__':
launch()
|
<commit_before><commit_msg>Add second markov text generator v2 with order parameter<commit_after>
|
#!python3
import string
import random
import time
import re
import sys
'''
This is an implementation of a markov chain used for text generation.
Just pass a file name as an argument and it should load it up, build a markov
chain with a state for each word(s), and start walking through the chain, writing
incoherent text to the terminal.
'''
asciiset = set(string.ascii_letters)
asciiset.add(' ')
asciiset.add('.')
def strip2ascii(txt):
return ''.join([ch for ch in txt if ch in asciiset])
def tokenize(fname):
'''
Generate tokens defined by
- Sequences of characters that aren't spaces
- Periods
For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.')
'''
with open(fname, 'r') as f:
for line in f:
stripped = strip2ascii(line)
for word in stripped.split():
if word[-1] == '.':
yield word[:-1]
yield '.'
else:
yield word
def buildtransitionmap(tokens, order):
dct = {}
prev = ('',)*order
for token in tokens:
if prev in dct:
dct[prev].append(token)
else:
dct[prev] = [token]
prev = prev[1:]+(token,)
return dct
def transition(word, transmap):
return random.choice(transmap[word])
def eternalramble(fname, order):
'''
Walk through the markov chain printing out words to the terminal one at a time
'''
transmap = buildtransitionmap(tokenize(fname), order)
prev = random.choice(list(transmap.keys()))
while True:
word = transition(prev, transmap)
print(word, end=' ')
prev = prev[1:]+(word,)
sys.stdout.flush()
time.sleep(0.25)
def printusage():
print('Usage: markov filename order')
print(' filename: the filename of the text to base the markov chain on.')
print(' order: how many consecutive words make up each state (2 works well)')
def launch():
if len(sys.argv) != 3:
printusage()
return
try:
order = int(sys.argv[2])
except:
printusage()
return
eternalramble(sys.argv[1], order)
if __name__ == '__main__':
launch()
|
Add second markov text generator v2 with order parameter#!python3
import string
import random
import time
import re
import sys
'''
This is an implementation of a markov chain used for text generation.
Just pass a file name as an argument and it should load it up, build a markov
chain with a state for each word(s), and start walking through the chain, writing
incoherent text to the terminal.
'''
asciiset = set(string.ascii_letters)
asciiset.add(' ')
asciiset.add('.')
def strip2ascii(txt):
return ''.join([ch for ch in txt if ch in asciiset])
def tokenize(fname):
'''
Generate tokens defined by
- Sequences of characters that aren't spaces
- Periods
For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.')
'''
with open(fname, 'r') as f:
for line in f:
stripped = strip2ascii(line)
for word in stripped.split():
if word[-1] == '.':
yield word[:-1]
yield '.'
else:
yield word
def buildtransitionmap(tokens, order):
dct = {}
prev = ('',)*order
for token in tokens:
if prev in dct:
dct[prev].append(token)
else:
dct[prev] = [token]
prev = prev[1:]+(token,)
return dct
def transition(word, transmap):
return random.choice(transmap[word])
def eternalramble(fname, order):
'''
Walk through the markov chain printing out words to the terminal one at a time
'''
transmap = buildtransitionmap(tokenize(fname), order)
prev = random.choice(list(transmap.keys()))
while True:
word = transition(prev, transmap)
print(word, end=' ')
prev = prev[1:]+(word,)
sys.stdout.flush()
time.sleep(0.25)
def printusage():
print('Usage: markov filename order')
print(' filename: the filename of the text to base the markov chain on.')
print(' order: how many consecutive words make up each state (2 works well)')
def launch():
if len(sys.argv) != 3:
printusage()
return
try:
order = int(sys.argv[2])
except:
printusage()
return
eternalramble(sys.argv[1], order)
if __name__ == '__main__':
launch()
|
<commit_before><commit_msg>Add second markov text generator v2 with order parameter<commit_after>#!python3
import string
import random
import time
import re
import sys
'''
This is an implementation of a markov chain used for text generation.
Just pass a file name as an argument and it should load it up, build a markov
chain with a state for each word(s), and start walking through the chain, writing
incoherent text to the terminal.
'''
asciiset = set(string.ascii_letters)
asciiset.add(' ')
asciiset.add('.')
def strip2ascii(txt):
return ''.join([ch for ch in txt if ch in asciiset])
def tokenize(fname):
'''
Generate tokens defined by
- Sequences of characters that aren't spaces
- Periods
For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.')
'''
with open(fname, 'r') as f:
for line in f:
stripped = strip2ascii(line)
for word in stripped.split():
if word[-1] == '.':
yield word[:-1]
yield '.'
else:
yield word
def buildtransitionmap(tokens, order):
dct = {}
prev = ('',)*order
for token in tokens:
if prev in dct:
dct[prev].append(token)
else:
dct[prev] = [token]
prev = prev[1:]+(token,)
return dct
def transition(word, transmap):
return random.choice(transmap[word])
def eternalramble(fname, order):
'''
Walk through the markov chain printing out words to the terminal one at a time
'''
transmap = buildtransitionmap(tokenize(fname), order)
prev = random.choice(list(transmap.keys()))
while True:
word = transition(prev, transmap)
print(word, end=' ')
prev = prev[1:]+(word,)
sys.stdout.flush()
time.sleep(0.25)
def printusage():
print('Usage: markov filename order')
print(' filename: the filename of the text to base the markov chain on.')
print(' order: how many consecutive words make up each state (2 works well)')
def launch():
if len(sys.argv) != 3:
printusage()
return
try:
order = int(sys.argv[2])
except:
printusage()
return
eternalramble(sys.argv[1], order)
if __name__ == '__main__':
launch()
|
|
05306ceea2d33c8732f339e26655927474b8f9c7
|
deploy.py
|
deploy.py
|
#!/usr/bin/python
from subprocess import check_output, call
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
deploy_code(staged_file, args)
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('dest_git_root',
action='store',
help='The remote git repository. Including hostname and file path')
arg_parser.add_argument('--method',
action='store',
default='scp',
metavar='Program for transmission',
help='The program which will do the transmission. Default is scp.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The Port Number. Default is 22.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
tag, filename = staged_file.split(' ')
if tag is 'R':
filename = filename.split('->')[1].strip()
if args.method is 'scp':
print 'scp ' + filename + ' to ' + args.dest_git_root
call(['scp', '-P', args.port , filename, args.dest_git_root + filename])
if __name__ == '__main__':
main()
|
Deploy staged files by using scp
|
Deploy staged files by using scp
|
Python
|
mit
|
csterryliu/deploy-changed-code
|
Deploy staged files by using scp
|
#!/usr/bin/python
from subprocess import check_output, call
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
deploy_code(staged_file, args)
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('dest_git_root',
action='store',
help='The remote git repository. Including hostname and file path')
arg_parser.add_argument('--method',
action='store',
default='scp',
metavar='Program for transmission',
help='The program which will do the transmission. Default is scp.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The Port Number. Default is 22.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
tag, filename = staged_file.split(' ')
if tag is 'R':
filename = filename.split('->')[1].strip()
if args.method is 'scp':
print 'scp ' + filename + ' to ' + args.dest_git_root
call(['scp', '-P', args.port , filename, args.dest_git_root + filename])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Deploy staged files by using scp<commit_after>
|
#!/usr/bin/python
from subprocess import check_output, call
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
deploy_code(staged_file, args)
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('dest_git_root',
action='store',
help='The remote git repository. Including hostname and file path')
arg_parser.add_argument('--method',
action='store',
default='scp',
metavar='Program for transmission',
help='The program which will do the transmission. Default is scp.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The Port Number. Default is 22.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
tag, filename = staged_file.split(' ')
if tag is 'R':
filename = filename.split('->')[1].strip()
if args.method is 'scp':
print 'scp ' + filename + ' to ' + args.dest_git_root
call(['scp', '-P', args.port , filename, args.dest_git_root + filename])
if __name__ == '__main__':
main()
|
Deploy staged files by using scp#!/usr/bin/python
from subprocess import check_output, call
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
deploy_code(staged_file, args)
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('dest_git_root',
action='store',
help='The remote git repository. Including hostname and file path')
arg_parser.add_argument('--method',
action='store',
default='scp',
metavar='Program for transmission',
help='The program which will do the transmission. Default is scp.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The Port Number. Default is 22.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
tag, filename = staged_file.split(' ')
if tag is 'R':
filename = filename.split('->')[1].strip()
if args.method is 'scp':
print 'scp ' + filename + ' to ' + args.dest_git_root
call(['scp', '-P', args.port , filename, args.dest_git_root + filename])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Deploy staged files by using scp<commit_after>#!/usr/bin/python
from subprocess import check_output, call
import argparse
def main():
args = process_args()
output = check_output(['git', 'status', '--porcelain', '-uno'])
filelist = output.split('\n')
for staged_file in filelist:
if staged_file:
deploy_code(staged_file, args)
def process_args():
arg_parser = argparse.ArgumentParser(description='deploy.py')
arg_parser.add_argument('dest_git_root',
action='store',
help='The remote git repository. Including hostname and file path')
arg_parser.add_argument('--method',
action='store',
default='scp',
metavar='Program for transmission',
help='The program which will do the transmission. Default is scp.')
arg_parser.add_argument('--port',
action='store',
default='22',
metavar='Port Number',
help='The Port Number. Default is 22.')
return arg_parser.parse_args()
def deploy_code(staged_file, args):
tag, filename = staged_file.split(' ')
if tag is 'R':
filename = filename.split('->')[1].strip()
if args.method is 'scp':
print 'scp ' + filename + ' to ' + args.dest_git_root
call(['scp', '-P', args.port , filename, args.dest_git_root + filename])
if __name__ == '__main__':
main()
|
|
fbc2642b0361d48579c6556817fab02e9a7cfda8
|
gen/azure/calc.py
|
gen/azure/calc.py
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/aws.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
Fix Azure to use it's ip-detect script rather than AWS'
|
Fix Azure to use it's ip-detect script rather than AWS'
|
Python
|
apache-2.0
|
surdy/dcos,kensipe/dcos,xinxian0458/dcos,mnaboka/dcos,darkonie/dcos,dcos/dcos,lingmann/dcos,mesosphere-mergebot/dcos,mellenburg/dcos,jeid64/dcos,kensipe/dcos,mnaboka/dcos,vishnu2kmohan/dcos,amitaekbote/dcos,kensipe/dcos,kensipe/dcos,xinxian0458/dcos,vishnu2kmohan/dcos,darkonie/dcos,dcos/dcos,dcos/dcos,xinxian0458/dcos,mesosphere-mergebot/dcos,vishnu2kmohan/dcos,lingmann/dcos,darkonie/dcos,xinxian0458/dcos,mesosphere-mergebot/mergebot-test-dcos,mnaboka/dcos,BenWhitehead/dcos,branden/dcos,mesosphere-mergebot/mergebot-test-dcos,surdy/dcos,mesosphere-mergebot/mergebot-test-dcos,vishnu2kmohan/dcos,mellenburg/dcos,amitaekbote/dcos,asridharan/dcos,dcos/dcos,jeid64/dcos,darkonie/dcos,mesosphere-mergebot/dcos,branden/dcos,mesosphere-mergebot/mergebot-test-dcos,mellenburg/dcos,GoelDeepak/dcos,mnaboka/dcos,asridharan/dcos,mesosphere-mergebot/dcos,darkonie/dcos,jeid64/dcos,surdy/dcos,GoelDeepak/dcos,asridharan/dcos,amitaekbote/dcos,asridharan/dcos,branden/dcos,BenWhitehead/dcos,BenWhitehead/dcos,lingmann/dcos,jeid64/dcos,amitaekbote/dcos,branden/dcos,dcos/dcos,surdy/dcos,mellenburg/dcos,GoelDeepak/dcos,BenWhitehead/dcos,mnaboka/dcos,lingmann/dcos,GoelDeepak/dcos
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/aws.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
Fix Azure to use it's ip-detect script rather than AWS'
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
<commit_before>import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/aws.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
<commit_msg>Fix Azure to use it's ip-detect script rather than AWS'<commit_after>
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/aws.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
Fix Azure to use it's ip-detect script rather than AWS'import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
<commit_before>import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/aws.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
<commit_msg>Fix Azure to use it's ip-detect script rather than AWS'<commit_after>import pkg_resources
import yaml
entry = {
'must': {
'resolvers': '["168.63.129.16"]',
'ip_detect_contents': yaml.dump(pkg_resources.resource_string('gen', 'ip-detect/azure.sh').decode()),
'master_discovery': 'static',
'exhibitor_storage_backend': 'azure',
'master_cloud_config': '{{ master_cloud_config }}',
'slave_cloud_config': '{{ slave_cloud_config }}',
'slave_public_cloud_config': '{{ slave_public_cloud_config }}',
'oauth_enabled': "[[[variables('oauthEnabled')]]]",
'oauth_available': 'true'
}
}
|
91b63107a77bc9153151c2aede3e834374aa775b
|
backend/scripts/copyproj.py
|
backend/scripts/copyproj.py
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
Add script to copy over project files to a directory tree.
|
Add script to copy over project files to a directory tree.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add script to copy over project files to a directory tree.
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
<commit_before><commit_msg>Add script to copy over project files to a directory tree.<commit_after>
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
Add script to copy over project files to a directory tree.#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
<commit_before><commit_msg>Add script to copy over project files to a directory tree.<commit_after>#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
|
5684db6e73de6a3358c4f669facbdd47fb0d0b9e
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
Add long description content type.
|
Add long description content type.
|
Python
|
mit
|
koenedaele/skosprovider
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
Add long description content type.
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
<commit_before>#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
<commit_msg>Add long description content type.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
Add long description content type.#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
<commit_before>#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
<commit_msg>Add long description content type.<commit_after>#!/usr/bin/env python
import os
import sys
import skosprovider
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'skosprovider',
]
requires = [
'language-tags',
'rfc3987',
'pyld',
'html5lib'
]
setup(
name='skosprovider',
version='0.8.0',
description='Abstraction layer for SKOS vocabularies.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='Koen Van Daele',
author_email='koen_van_daele@telenet.be',
url='http://github.com/koenedaele/skosprovider',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'skosprovider': 'skosprovider'},
include_package_data=True,
install_requires=requires,
license='MIT',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='nose.collector'
)
|
9f36fa84c2357e829d7820c1d4024decf702f04f
|
test/test_ViewLogin.py
|
test/test_ViewLogin.py
|
import pytest
from controller import db
from model.user import User
from test import C3BottlesTestCase, NAME, PASSWORD
class LoginViewTestCase(C3BottlesTestCase):
def test_login(self):
self.create_test_user()
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=NAME
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('Wrong user name or password' in str(resp.data))
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=PASSWORD
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertFalse('Wrong user name or password' in str(resp.data))
|
Add a simple login test case
|
test: Add a simple login test case
|
Python
|
mit
|
der-michik/c3bottles,der-michik/c3bottles,der-michik/c3bottles,der-michik/c3bottles
|
test: Add a simple login test case
|
import pytest
from controller import db
from model.user import User
from test import C3BottlesTestCase, NAME, PASSWORD
class LoginViewTestCase(C3BottlesTestCase):
def test_login(self):
self.create_test_user()
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=NAME
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('Wrong user name or password' in str(resp.data))
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=PASSWORD
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertFalse('Wrong user name or password' in str(resp.data))
|
<commit_before><commit_msg>test: Add a simple login test case<commit_after>
|
import pytest
from controller import db
from model.user import User
from test import C3BottlesTestCase, NAME, PASSWORD
class LoginViewTestCase(C3BottlesTestCase):
def test_login(self):
self.create_test_user()
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=NAME
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('Wrong user name or password' in str(resp.data))
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=PASSWORD
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertFalse('Wrong user name or password' in str(resp.data))
|
test: Add a simple login test caseimport pytest
from controller import db
from model.user import User
from test import C3BottlesTestCase, NAME, PASSWORD
class LoginViewTestCase(C3BottlesTestCase):
def test_login(self):
self.create_test_user()
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=NAME
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('Wrong user name or password' in str(resp.data))
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=PASSWORD
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertFalse('Wrong user name or password' in str(resp.data))
|
<commit_before><commit_msg>test: Add a simple login test case<commit_after>import pytest
from controller import db
from model.user import User
from test import C3BottlesTestCase, NAME, PASSWORD
class LoginViewTestCase(C3BottlesTestCase):
def test_login(self):
self.create_test_user()
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=NAME
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('Wrong user name or password' in str(resp.data))
resp = self.c3bottles.post('/login', data=dict(
username=NAME,
password=PASSWORD
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertFalse('Wrong user name or password' in str(resp.data))
|
|
7efea7644cdb567742532f4edd24f528badef21b
|
pandas_rs/rs.py
|
pandas_rs/rs.py
|
import psycopg2
import pandas as pd
def create_engine(dbname, user, password, host, port):
return Redshift.create_engine(dbname, user, password, host, port)
class Redshift(object):
"""
Redshift client which connect to redshfit database.
Furthermore, you can read sql from Redshift and
returns the reuslt with pandas dataframe structure
"""
@classmethod
def create_engine(kls, dbname, user, password, host, port):
rs = Redshift(
dbname,
user,
password,
host,
port
)
return rs
def __init__(self, dbname, user, password, host, port):
self.config = dict(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.con_pg = self.connect(config=self.config)
def connect(self, *args,**kwargs):
config = kwargs['config']
try:
con_pg=psycopg2.connect(
dbname=config['dbname'],
host=config['host'],
port=config['port'],
user=config['user'],
password=config['password']
)
return con_pg
except Exception as err:
print(err)
def read_sql(self, sql, index_col=None, columns=None, count=0):
try:
return pd.read_sql(sql, self.con_pg, index_col, columns=columns)
except psycopg2.InterfaceError as error:
self.con_pg = self.connect(config=self.config)
if count < 5:
return self.read_sql(sql, index_col, columns, count=count+1)
else:
raise RedshiftConnectionError(error)
class RedshiftConnectionError(Exception):
"""Exception raised for errors in the Redshift connection.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg="Failed to connect"):
self.expr = expr
self.msg = msg
def __str__(self):
return "{} {}".format(self.expr, self.msg)
|
Add Reshift class which connect to redshift
|
Add Reshift class which connect to redshift
|
Python
|
mit
|
SamuraiT/pandas-rs
|
Add Reshift class which connect to redshift
|
import psycopg2
import pandas as pd
def create_engine(dbname, user, password, host, port):
return Redshift.create_engine(dbname, user, password, host, port)
class Redshift(object):
"""
Redshift client which connect to redshfit database.
Furthermore, you can read sql from Redshift and
returns the reuslt with pandas dataframe structure
"""
@classmethod
def create_engine(kls, dbname, user, password, host, port):
rs = Redshift(
dbname,
user,
password,
host,
port
)
return rs
def __init__(self, dbname, user, password, host, port):
self.config = dict(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.con_pg = self.connect(config=self.config)
def connect(self, *args,**kwargs):
config = kwargs['config']
try:
con_pg=psycopg2.connect(
dbname=config['dbname'],
host=config['host'],
port=config['port'],
user=config['user'],
password=config['password']
)
return con_pg
except Exception as err:
print(err)
def read_sql(self, sql, index_col=None, columns=None, count=0):
try:
return pd.read_sql(sql, self.con_pg, index_col, columns=columns)
except psycopg2.InterfaceError as error:
self.con_pg = self.connect(config=self.config)
if count < 5:
return self.read_sql(sql, index_col, columns, count=count+1)
else:
raise RedshiftConnectionError(error)
class RedshiftConnectionError(Exception):
"""Exception raised for errors in the Redshift connection.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg="Failed to connect"):
self.expr = expr
self.msg = msg
def __str__(self):
return "{} {}".format(self.expr, self.msg)
|
<commit_before><commit_msg>Add Reshift class which connect to redshift<commit_after>
|
import psycopg2
import pandas as pd
def create_engine(dbname, user, password, host, port):
return Redshift.create_engine(dbname, user, password, host, port)
class Redshift(object):
"""
Redshift client which connect to redshfit database.
Furthermore, you can read sql from Redshift and
returns the reuslt with pandas dataframe structure
"""
@classmethod
def create_engine(kls, dbname, user, password, host, port):
rs = Redshift(
dbname,
user,
password,
host,
port
)
return rs
def __init__(self, dbname, user, password, host, port):
self.config = dict(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.con_pg = self.connect(config=self.config)
def connect(self, *args,**kwargs):
config = kwargs['config']
try:
con_pg=psycopg2.connect(
dbname=config['dbname'],
host=config['host'],
port=config['port'],
user=config['user'],
password=config['password']
)
return con_pg
except Exception as err:
print(err)
def read_sql(self, sql, index_col=None, columns=None, count=0):
try:
return pd.read_sql(sql, self.con_pg, index_col, columns=columns)
except psycopg2.InterfaceError as error:
self.con_pg = self.connect(config=self.config)
if count < 5:
return self.read_sql(sql, index_col, columns, count=count+1)
else:
raise RedshiftConnectionError(error)
class RedshiftConnectionError(Exception):
"""Exception raised for errors in the Redshift connection.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg="Failed to connect"):
self.expr = expr
self.msg = msg
def __str__(self):
return "{} {}".format(self.expr, self.msg)
|
Add Reshift class which connect to redshiftimport psycopg2
import pandas as pd
def create_engine(dbname, user, password, host, port):
return Redshift.create_engine(dbname, user, password, host, port)
class Redshift(object):
"""
Redshift client which connect to redshfit database.
Furthermore, you can read sql from Redshift and
returns the reuslt with pandas dataframe structure
"""
@classmethod
def create_engine(kls, dbname, user, password, host, port):
rs = Redshift(
dbname,
user,
password,
host,
port
)
return rs
def __init__(self, dbname, user, password, host, port):
self.config = dict(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.con_pg = self.connect(config=self.config)
def connect(self, *args,**kwargs):
config = kwargs['config']
try:
con_pg=psycopg2.connect(
dbname=config['dbname'],
host=config['host'],
port=config['port'],
user=config['user'],
password=config['password']
)
return con_pg
except Exception as err:
print(err)
def read_sql(self, sql, index_col=None, columns=None, count=0):
try:
return pd.read_sql(sql, self.con_pg, index_col, columns=columns)
except psycopg2.InterfaceError as error:
self.con_pg = self.connect(config=self.config)
if count < 5:
return self.read_sql(sql, index_col, columns, count=count+1)
else:
raise RedshiftConnectionError(error)
class RedshiftConnectionError(Exception):
"""Exception raised for errors in the Redshift connection.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg="Failed to connect"):
self.expr = expr
self.msg = msg
def __str__(self):
return "{} {}".format(self.expr, self.msg)
|
<commit_before><commit_msg>Add Reshift class which connect to redshift<commit_after>import psycopg2
import pandas as pd
def create_engine(dbname, user, password, host, port):
return Redshift.create_engine(dbname, user, password, host, port)
class Redshift(object):
"""
Redshift client which connect to redshfit database.
Furthermore, you can read sql from Redshift and
returns the reuslt with pandas dataframe structure
"""
@classmethod
def create_engine(kls, dbname, user, password, host, port):
rs = Redshift(
dbname,
user,
password,
host,
port
)
return rs
def __init__(self, dbname, user, password, host, port):
self.config = dict(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.con_pg = self.connect(config=self.config)
def connect(self, *args,**kwargs):
config = kwargs['config']
try:
con_pg=psycopg2.connect(
dbname=config['dbname'],
host=config['host'],
port=config['port'],
user=config['user'],
password=config['password']
)
return con_pg
except Exception as err:
print(err)
def read_sql(self, sql, index_col=None, columns=None, count=0):
try:
return pd.read_sql(sql, self.con_pg, index_col, columns=columns)
except psycopg2.InterfaceError as error:
self.con_pg = self.connect(config=self.config)
if count < 5:
return self.read_sql(sql, index_col, columns, count=count+1)
else:
raise RedshiftConnectionError(error)
class RedshiftConnectionError(Exception):
"""Exception raised for errors in the Redshift connection.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, expr, msg="Failed to connect"):
self.expr = expr
self.msg = msg
def __str__(self):
return "{} {}".format(self.expr, self.msg)
|
|
3ede8da88dc0a368fed45b696f66168898f6363e
|
tests/test_tip_json.py
|
tests/test_tip_json.py
|
import json
from nose.tools import assert_equal
from gittip.testing import TestClient
from gittip import db
CREATE_ACCOUNT = "INSERT INTO participants (id) VALUES (%s);"
def test_get_amount_and_total_back_from_api():
"Test that we get correct amounts and totals back on POSTs to tip.json"
client = TestClient()
# First, create some test data
# We need accounts
db.execute(CREATE_ACCOUNT, ("test_tippee1",))
db.execute(CREATE_ACCOUNT, ("test_tippee2",))
db.execute(CREATE_ACCOUNT, ("test_tipper",))
# We need to get ourselves a token!
response = client.get('/')
csrf_token = response.request.context['csrf_token']
# Then, add a $1.50 and $3.00 tip
response1 = client.post("/test_tippee1/tip.json",
{'amount': "1.00", 'csrf_token': csrf_token},
user='test_tipper')
response2 = client.post("/test_tippee2/tip.json",
{'amount': "3.00", 'csrf_token': csrf_token},
user='test_tipper')
# Confirm we get back the right amounts.
first_data = json.loads(response1.body)
second_data = json.loads(response2.body)
assert_equal(first_data['amount'], "1.00")
assert_equal(first_data['total_giving'], "1.00")
assert_equal(second_data['amount'], "3.00")
assert_equal(second_data['total_giving'], "4.00")
assert_equal(False, True)
|
Add test for tip.json view.
|
Add test for tip.json view.
|
Python
|
cc0-1.0
|
mccolgst/www.gittip.com,bountysource/www.gittip.com,bountysource/www.gittip.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,eXcomm/gratipay.com,bountysource/www.gittip.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,bountysource/www.gittip.com,eXcomm/gratipay.com,gratipay/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com
|
Add test for tip.json view.
|
import json
from nose.tools import assert_equal
from gittip.testing import TestClient
from gittip import db
CREATE_ACCOUNT = "INSERT INTO participants (id) VALUES (%s);"
def test_get_amount_and_total_back_from_api():
"Test that we get correct amounts and totals back on POSTs to tip.json"
client = TestClient()
# First, create some test data
# We need accounts
db.execute(CREATE_ACCOUNT, ("test_tippee1",))
db.execute(CREATE_ACCOUNT, ("test_tippee2",))
db.execute(CREATE_ACCOUNT, ("test_tipper",))
# We need to get ourselves a token!
response = client.get('/')
csrf_token = response.request.context['csrf_token']
# Then, add a $1.50 and $3.00 tip
response1 = client.post("/test_tippee1/tip.json",
{'amount': "1.00", 'csrf_token': csrf_token},
user='test_tipper')
response2 = client.post("/test_tippee2/tip.json",
{'amount': "3.00", 'csrf_token': csrf_token},
user='test_tipper')
# Confirm we get back the right amounts.
first_data = json.loads(response1.body)
second_data = json.loads(response2.body)
assert_equal(first_data['amount'], "1.00")
assert_equal(first_data['total_giving'], "1.00")
assert_equal(second_data['amount'], "3.00")
assert_equal(second_data['total_giving'], "4.00")
assert_equal(False, True)
|
<commit_before><commit_msg>Add test for tip.json view.<commit_after>
|
import json
from nose.tools import assert_equal
from gittip.testing import TestClient
from gittip import db
CREATE_ACCOUNT = "INSERT INTO participants (id) VALUES (%s);"
def test_get_amount_and_total_back_from_api():
"Test that we get correct amounts and totals back on POSTs to tip.json"
client = TestClient()
# First, create some test data
# We need accounts
db.execute(CREATE_ACCOUNT, ("test_tippee1",))
db.execute(CREATE_ACCOUNT, ("test_tippee2",))
db.execute(CREATE_ACCOUNT, ("test_tipper",))
# We need to get ourselves a token!
response = client.get('/')
csrf_token = response.request.context['csrf_token']
# Then, add a $1.50 and $3.00 tip
response1 = client.post("/test_tippee1/tip.json",
{'amount': "1.00", 'csrf_token': csrf_token},
user='test_tipper')
response2 = client.post("/test_tippee2/tip.json",
{'amount': "3.00", 'csrf_token': csrf_token},
user='test_tipper')
# Confirm we get back the right amounts.
first_data = json.loads(response1.body)
second_data = json.loads(response2.body)
assert_equal(first_data['amount'], "1.00")
assert_equal(first_data['total_giving'], "1.00")
assert_equal(second_data['amount'], "3.00")
assert_equal(second_data['total_giving'], "4.00")
assert_equal(False, True)
|
Add test for tip.json view.import json
from nose.tools import assert_equal
from gittip.testing import TestClient
from gittip import db
CREATE_ACCOUNT = "INSERT INTO participants (id) VALUES (%s);"
def test_get_amount_and_total_back_from_api():
"Test that we get correct amounts and totals back on POSTs to tip.json"
client = TestClient()
# First, create some test data
# We need accounts
db.execute(CREATE_ACCOUNT, ("test_tippee1",))
db.execute(CREATE_ACCOUNT, ("test_tippee2",))
db.execute(CREATE_ACCOUNT, ("test_tipper",))
# We need to get ourselves a token!
response = client.get('/')
csrf_token = response.request.context['csrf_token']
# Then, add a $1.50 and $3.00 tip
response1 = client.post("/test_tippee1/tip.json",
{'amount': "1.00", 'csrf_token': csrf_token},
user='test_tipper')
response2 = client.post("/test_tippee2/tip.json",
{'amount': "3.00", 'csrf_token': csrf_token},
user='test_tipper')
# Confirm we get back the right amounts.
first_data = json.loads(response1.body)
second_data = json.loads(response2.body)
assert_equal(first_data['amount'], "1.00")
assert_equal(first_data['total_giving'], "1.00")
assert_equal(second_data['amount'], "3.00")
assert_equal(second_data['total_giving'], "4.00")
assert_equal(False, True)
|
<commit_before><commit_msg>Add test for tip.json view.<commit_after>import json
from nose.tools import assert_equal
from gittip.testing import TestClient
from gittip import db
CREATE_ACCOUNT = "INSERT INTO participants (id) VALUES (%s);"
def test_get_amount_and_total_back_from_api():
"Test that we get correct amounts and totals back on POSTs to tip.json"
client = TestClient()
# First, create some test data
# We need accounts
db.execute(CREATE_ACCOUNT, ("test_tippee1",))
db.execute(CREATE_ACCOUNT, ("test_tippee2",))
db.execute(CREATE_ACCOUNT, ("test_tipper",))
# We need to get ourselves a token!
response = client.get('/')
csrf_token = response.request.context['csrf_token']
# Then, add a $1.50 and $3.00 tip
response1 = client.post("/test_tippee1/tip.json",
{'amount': "1.00", 'csrf_token': csrf_token},
user='test_tipper')
response2 = client.post("/test_tippee2/tip.json",
{'amount': "3.00", 'csrf_token': csrf_token},
user='test_tipper')
# Confirm we get back the right amounts.
first_data = json.loads(response1.body)
second_data = json.loads(response2.body)
assert_equal(first_data['amount'], "1.00")
assert_equal(first_data['total_giving'], "1.00")
assert_equal(second_data['amount'], "3.00")
assert_equal(second_data['total_giving'], "4.00")
assert_equal(False, True)
|
|
296fdb0bd202ea39d73d421ee4aa51efb079d297
|
src/waldur_core/core/migrations/0019_drop_zabbix_tables.py
|
src/waldur_core/core/migrations/0019_drop_zabbix_tables.py
|
from django.db import migrations
TABLES = (
'monitoring_resourceitem',
'monitoring_resourcesla',
'monitoring_resourceslastatetransition',
'waldur_zabbix_usergroup',
'waldur_zabbix_item',
'waldur_zabbix_trigger',
'waldur_zabbix_host_templates',
'waldur_zabbix_template',
'waldur_zabbix_template_parents',
'waldur_zabbix_host',
'waldur_zabbix_zabbixserviceprojectlink',
'waldur_zabbix_slahistory',
'waldur_zabbix_user',
'waldur_zabbix_user_groups',
'waldur_zabbix_zabbixservice',
'waldur_zabbix_itservice',
'waldur_zabbix_slahistoryevent',
)
class Migration(migrations.Migration):
dependencies = [
('core', '0018_drop_leftover_tables'),
]
operations = [
migrations.RunSQL(f'DROP TABLE IF EXISTS {table} CASCADE') for table in TABLES
]
|
Add migration for zabbix tables
|
Add migration for zabbix tables
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur
|
Add migration for zabbix tables
|
from django.db import migrations
TABLES = (
'monitoring_resourceitem',
'monitoring_resourcesla',
'monitoring_resourceslastatetransition',
'waldur_zabbix_usergroup',
'waldur_zabbix_item',
'waldur_zabbix_trigger',
'waldur_zabbix_host_templates',
'waldur_zabbix_template',
'waldur_zabbix_template_parents',
'waldur_zabbix_host',
'waldur_zabbix_zabbixserviceprojectlink',
'waldur_zabbix_slahistory',
'waldur_zabbix_user',
'waldur_zabbix_user_groups',
'waldur_zabbix_zabbixservice',
'waldur_zabbix_itservice',
'waldur_zabbix_slahistoryevent',
)
class Migration(migrations.Migration):
dependencies = [
('core', '0018_drop_leftover_tables'),
]
operations = [
migrations.RunSQL(f'DROP TABLE IF EXISTS {table} CASCADE') for table in TABLES
]
|
<commit_before><commit_msg>Add migration for zabbix tables<commit_after>
|
from django.db import migrations
TABLES = (
'monitoring_resourceitem',
'monitoring_resourcesla',
'monitoring_resourceslastatetransition',
'waldur_zabbix_usergroup',
'waldur_zabbix_item',
'waldur_zabbix_trigger',
'waldur_zabbix_host_templates',
'waldur_zabbix_template',
'waldur_zabbix_template_parents',
'waldur_zabbix_host',
'waldur_zabbix_zabbixserviceprojectlink',
'waldur_zabbix_slahistory',
'waldur_zabbix_user',
'waldur_zabbix_user_groups',
'waldur_zabbix_zabbixservice',
'waldur_zabbix_itservice',
'waldur_zabbix_slahistoryevent',
)
class Migration(migrations.Migration):
dependencies = [
('core', '0018_drop_leftover_tables'),
]
operations = [
migrations.RunSQL(f'DROP TABLE IF EXISTS {table} CASCADE') for table in TABLES
]
|
Add migration for zabbix tablesfrom django.db import migrations
TABLES = (
'monitoring_resourceitem',
'monitoring_resourcesla',
'monitoring_resourceslastatetransition',
'waldur_zabbix_usergroup',
'waldur_zabbix_item',
'waldur_zabbix_trigger',
'waldur_zabbix_host_templates',
'waldur_zabbix_template',
'waldur_zabbix_template_parents',
'waldur_zabbix_host',
'waldur_zabbix_zabbixserviceprojectlink',
'waldur_zabbix_slahistory',
'waldur_zabbix_user',
'waldur_zabbix_user_groups',
'waldur_zabbix_zabbixservice',
'waldur_zabbix_itservice',
'waldur_zabbix_slahistoryevent',
)
class Migration(migrations.Migration):
dependencies = [
('core', '0018_drop_leftover_tables'),
]
operations = [
migrations.RunSQL(f'DROP TABLE IF EXISTS {table} CASCADE') for table in TABLES
]
|
<commit_before><commit_msg>Add migration for zabbix tables<commit_after>from django.db import migrations
TABLES = (
'monitoring_resourceitem',
'monitoring_resourcesla',
'monitoring_resourceslastatetransition',
'waldur_zabbix_usergroup',
'waldur_zabbix_item',
'waldur_zabbix_trigger',
'waldur_zabbix_host_templates',
'waldur_zabbix_template',
'waldur_zabbix_template_parents',
'waldur_zabbix_host',
'waldur_zabbix_zabbixserviceprojectlink',
'waldur_zabbix_slahistory',
'waldur_zabbix_user',
'waldur_zabbix_user_groups',
'waldur_zabbix_zabbixservice',
'waldur_zabbix_itservice',
'waldur_zabbix_slahistoryevent',
)
class Migration(migrations.Migration):
dependencies = [
('core', '0018_drop_leftover_tables'),
]
operations = [
migrations.RunSQL(f'DROP TABLE IF EXISTS {table} CASCADE') for table in TABLES
]
|
|
a284f097b388c0cbdd92af0a28cf5fe78fd03986
|
hackerrank_hello_world.py
|
hackerrank_hello_world.py
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = raw_input()
# Print a string literal saying "Hello, World." to stdout.
print 'Hello, World.'
print inputString
|
Print Hello, World on the first line, and the contents of input on the second line.
|
Print Hello, World on the first line, and the contents of input on the second line.
|
Python
|
mit
|
kumarisneha/practice_repo
|
Print Hello, World on the first line, and the contents of input on the second line.
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = raw_input()
# Print a string literal saying "Hello, World." to stdout.
print 'Hello, World.'
print inputString
|
<commit_before><commit_msg>Print Hello, World on the first line, and the contents of input on the second line.<commit_after>
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = raw_input()
# Print a string literal saying "Hello, World." to stdout.
print 'Hello, World.'
print inputString
|
Print Hello, World on the first line, and the contents of input on the second line.# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = raw_input()
# Print a string literal saying "Hello, World." to stdout.
print 'Hello, World.'
print inputString
|
<commit_before><commit_msg>Print Hello, World on the first line, and the contents of input on the second line.<commit_after># Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
inputString = raw_input()
# Print a string literal saying "Hello, World." to stdout.
print 'Hello, World.'
print inputString
|
|
df2dc6cad36851e2b7aaf1d3ace98483a00b51c7
|
altair/vegalite/v2/examples/simple_line_chart_with_markers.py
|
altair/vegalite/v2/examples/simple_line_chart_with_markers.py
|
"""
Simple Line Chart with Markers
------------------------------
This chart shows the most basic line chart with markers, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
data = pd.DataFrame({'x': x,
'sin(x)': np.sin(x / 5)})
alt.Chart(data).mark_line(point=True).encode(
x='x',
y='sin(x)'
)
|
Add simple line chart with markers example
|
DOC: Add simple line chart with markers example
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
DOC: Add simple line chart with markers example
|
"""
Simple Line Chart with Markers
------------------------------
This chart shows the most basic line chart with markers, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
data = pd.DataFrame({'x': x,
'sin(x)': np.sin(x / 5)})
alt.Chart(data).mark_line(point=True).encode(
x='x',
y='sin(x)'
)
|
<commit_before><commit_msg>DOC: Add simple line chart with markers example<commit_after>
|
"""
Simple Line Chart with Markers
------------------------------
This chart shows the most basic line chart with markers, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
data = pd.DataFrame({'x': x,
'sin(x)': np.sin(x / 5)})
alt.Chart(data).mark_line(point=True).encode(
x='x',
y='sin(x)'
)
|
DOC: Add simple line chart with markers example"""
Simple Line Chart with Markers
------------------------------
This chart shows the most basic line chart with markers, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
data = pd.DataFrame({'x': x,
'sin(x)': np.sin(x / 5)})
alt.Chart(data).mark_line(point=True).encode(
x='x',
y='sin(x)'
)
|
<commit_before><commit_msg>DOC: Add simple line chart with markers example<commit_after>"""
Simple Line Chart with Markers
------------------------------
This chart shows the most basic line chart with markers, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
x = np.arange(100)
data = pd.DataFrame({'x': x,
'sin(x)': np.sin(x / 5)})
alt.Chart(data).mark_line(point=True).encode(
x='x',
y='sin(x)'
)
|
|
3ad97790d078d50839d8a0c50775d8a75e04ff9e
|
py/longest-palindromic-subsequence.py
|
py/longest-palindromic-subsequence.py
|
class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
prev2 = [0] * len(s)
prev = [1] * len(s)
for l in xrange(2, len(s) + 1):
nxt = [0] * (len(s) - l + 1)
for i in xrange(len(s) - l + 1):
if s[i] == s[i + l - 1]:
nxt[i] = prev2[i + 1] + 2
else:
nxt[i] = max(prev[i + 1], prev[i])
prev2, prev = prev, nxt
return prev[0]
|
Add py solution for 516. Longest Palindromic Subsequence
|
Add py solution for 516. Longest Palindromic Subsequence
516. Longest Palindromic Subsequence: https://leetcode.com/problems/longest-palindromic-subsequence/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 516. Longest Palindromic Subsequence
516. Longest Palindromic Subsequence: https://leetcode.com/problems/longest-palindromic-subsequence/
|
class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
prev2 = [0] * len(s)
prev = [1] * len(s)
for l in xrange(2, len(s) + 1):
nxt = [0] * (len(s) - l + 1)
for i in xrange(len(s) - l + 1):
if s[i] == s[i + l - 1]:
nxt[i] = prev2[i + 1] + 2
else:
nxt[i] = max(prev[i + 1], prev[i])
prev2, prev = prev, nxt
return prev[0]
|
<commit_before><commit_msg>Add py solution for 516. Longest Palindromic Subsequence
516. Longest Palindromic Subsequence: https://leetcode.com/problems/longest-palindromic-subsequence/<commit_after>
|
class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
prev2 = [0] * len(s)
prev = [1] * len(s)
for l in xrange(2, len(s) + 1):
nxt = [0] * (len(s) - l + 1)
for i in xrange(len(s) - l + 1):
if s[i] == s[i + l - 1]:
nxt[i] = prev2[i + 1] + 2
else:
nxt[i] = max(prev[i + 1], prev[i])
prev2, prev = prev, nxt
return prev[0]
|
Add py solution for 516. Longest Palindromic Subsequence
516. Longest Palindromic Subsequence: https://leetcode.com/problems/longest-palindromic-subsequence/class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
prev2 = [0] * len(s)
prev = [1] * len(s)
for l in xrange(2, len(s) + 1):
nxt = [0] * (len(s) - l + 1)
for i in xrange(len(s) - l + 1):
if s[i] == s[i + l - 1]:
nxt[i] = prev2[i + 1] + 2
else:
nxt[i] = max(prev[i + 1], prev[i])
prev2, prev = prev, nxt
return prev[0]
|
<commit_before><commit_msg>Add py solution for 516. Longest Palindromic Subsequence
516. Longest Palindromic Subsequence: https://leetcode.com/problems/longest-palindromic-subsequence/<commit_after>class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
prev2 = [0] * len(s)
prev = [1] * len(s)
for l in xrange(2, len(s) + 1):
nxt = [0] * (len(s) - l + 1)
for i in xrange(len(s) - l + 1):
if s[i] == s[i + l - 1]:
nxt[i] = prev2[i + 1] + 2
else:
nxt[i] = max(prev[i + 1], prev[i])
prev2, prev = prev, nxt
return prev[0]
|
|
ca716b34fa0e320f8f3684fdba2e124ebf79a534
|
compileJSX.py
|
compileJSX.py
|
from react import jsx
# For a single file, you can use a shortcut method.
jsx.transform('website/public/js/riverComponents.jsx', js_path='website/public/js/riverComponents.js')
|
Add script to translate riverComponents jsx => js
|
Add script to translate riverComponents jsx => js
|
Python
|
agpl-3.0
|
1self/api,1self/api,1self/api,1self/api
|
Add script to translate riverComponents jsx => js
|
from react import jsx
# For a single file, you can use a shortcut method.
jsx.transform('website/public/js/riverComponents.jsx', js_path='website/public/js/riverComponents.js')
|
<commit_before><commit_msg>Add script to translate riverComponents jsx => js<commit_after>
|
from react import jsx
# For a single file, you can use a shortcut method.
jsx.transform('website/public/js/riverComponents.jsx', js_path='website/public/js/riverComponents.js')
|
Add script to translate riverComponents jsx => jsfrom react import jsx
# For a single file, you can use a shortcut method.
jsx.transform('website/public/js/riverComponents.jsx', js_path='website/public/js/riverComponents.js')
|
<commit_before><commit_msg>Add script to translate riverComponents jsx => js<commit_after>from react import jsx
# For a single file, you can use a shortcut method.
jsx.transform('website/public/js/riverComponents.jsx', js_path='website/public/js/riverComponents.js')
|
|
c15ca56c170fe13ef6a7b016de812f57d613c0bb
|
python--learnings/class_functionality.py
|
python--learnings/class_functionality.py
|
#!/usr/bin/env python
#
# Topics: Classes, Inheritance, and Related
#
# Background: Use of classes, including inheritance, instance variables, etc.
#
# Sources:
# - https://www.python-course.eu/object_oriented_programming.php
# - https://realpython.com/python3-object-oriented-programming
import unittest
class Account:
routing = "123"
def __init__(self, first_name="NOTSET"):
self.first_name = first_name
pass
def details(self):
return "{}|{}".format(self.__class__.routing, self.first_name)
class CheckingAccount(Account):
def details(self):
return "Checking|{}|{}".format(self.__class__.routing, self.first_name)
# test functionality
class TestClass(unittest.TestCase):
account = None
@classmethod
def setUpClass(cls):
cls.account = Account(first_name="Joe")
cls.checking_account = CheckingAccount(first_name="Joe")
def test_class_constructor(self):
self.assertIsInstance(self.account, Account)
def test_class_attribute(self):
self.assertEqual(self.account.routing, "123")
def test_instance_method(self):
self.assertEqual(self.account.details(), "123|Joe")
def test_class_inheritance(self):
self.assertIsInstance(self.checking_account, CheckingAccount)
self.assertIsInstance(self.checking_account, Account)
def test_class_override(self):
self.assertEqual(self.checking_account.details(), "Checking|123|Joe")
def test_class_public_var(self):
self.assertEqual(self.account.first_name, "Joe")
# main execution
if __name__ == '__main__':
unittest.main()
|
Add Python Class Test Functionality
|
Add Python Class Test Functionality
Add some refreshers around Python classes.
|
Python
|
mit
|
jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox
|
Add Python Class Test Functionality
Add some refreshers around Python classes.
|
#!/usr/bin/env python
#
# Topics: Classes, Inheritance, and Related
#
# Background: Use of classes, including inheritance, instance variables, etc.
#
# Sources:
# - https://www.python-course.eu/object_oriented_programming.php
# - https://realpython.com/python3-object-oriented-programming
import unittest
class Account:
routing = "123"
def __init__(self, first_name="NOTSET"):
self.first_name = first_name
pass
def details(self):
return "{}|{}".format(self.__class__.routing, self.first_name)
class CheckingAccount(Account):
def details(self):
return "Checking|{}|{}".format(self.__class__.routing, self.first_name)
# test functionality
class TestClass(unittest.TestCase):
account = None
@classmethod
def setUpClass(cls):
cls.account = Account(first_name="Joe")
cls.checking_account = CheckingAccount(first_name="Joe")
def test_class_constructor(self):
self.assertIsInstance(self.account, Account)
def test_class_attribute(self):
self.assertEqual(self.account.routing, "123")
def test_instance_method(self):
self.assertEqual(self.account.details(), "123|Joe")
def test_class_inheritance(self):
self.assertIsInstance(self.checking_account, CheckingAccount)
self.assertIsInstance(self.checking_account, Account)
def test_class_override(self):
self.assertEqual(self.checking_account.details(), "Checking|123|Joe")
def test_class_public_var(self):
self.assertEqual(self.account.first_name, "Joe")
# main execution
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add Python Class Test Functionality
Add some refreshers around Python classes.<commit_after>
|
#!/usr/bin/env python
#
# Topics: Classes, Inheritance, and Related
#
# Background: Use of classes, including inheritance, instance variables, etc.
#
# Sources:
# - https://www.python-course.eu/object_oriented_programming.php
# - https://realpython.com/python3-object-oriented-programming
import unittest
class Account:
routing = "123"
def __init__(self, first_name="NOTSET"):
self.first_name = first_name
pass
def details(self):
return "{}|{}".format(self.__class__.routing, self.first_name)
class CheckingAccount(Account):
def details(self):
return "Checking|{}|{}".format(self.__class__.routing, self.first_name)
# test functionality
class TestClass(unittest.TestCase):
account = None
@classmethod
def setUpClass(cls):
cls.account = Account(first_name="Joe")
cls.checking_account = CheckingAccount(first_name="Joe")
def test_class_constructor(self):
self.assertIsInstance(self.account, Account)
def test_class_attribute(self):
self.assertEqual(self.account.routing, "123")
def test_instance_method(self):
self.assertEqual(self.account.details(), "123|Joe")
def test_class_inheritance(self):
self.assertIsInstance(self.checking_account, CheckingAccount)
self.assertIsInstance(self.checking_account, Account)
def test_class_override(self):
self.assertEqual(self.checking_account.details(), "Checking|123|Joe")
def test_class_public_var(self):
self.assertEqual(self.account.first_name, "Joe")
# main execution
if __name__ == '__main__':
unittest.main()
|
Add Python Class Test Functionality
Add some refreshers around Python classes.#!/usr/bin/env python
#
# Topics: Classes, Inheritance, and Related
#
# Background: Use of classes, including inheritance, instance variables, etc.
#
# Sources:
# - https://www.python-course.eu/object_oriented_programming.php
# - https://realpython.com/python3-object-oriented-programming
import unittest
class Account:
routing = "123"
def __init__(self, first_name="NOTSET"):
self.first_name = first_name
pass
def details(self):
return "{}|{}".format(self.__class__.routing, self.first_name)
class CheckingAccount(Account):
def details(self):
return "Checking|{}|{}".format(self.__class__.routing, self.first_name)
# test functionality
class TestClass(unittest.TestCase):
account = None
@classmethod
def setUpClass(cls):
cls.account = Account(first_name="Joe")
cls.checking_account = CheckingAccount(first_name="Joe")
def test_class_constructor(self):
self.assertIsInstance(self.account, Account)
def test_class_attribute(self):
self.assertEqual(self.account.routing, "123")
def test_instance_method(self):
self.assertEqual(self.account.details(), "123|Joe")
def test_class_inheritance(self):
self.assertIsInstance(self.checking_account, CheckingAccount)
self.assertIsInstance(self.checking_account, Account)
def test_class_override(self):
self.assertEqual(self.checking_account.details(), "Checking|123|Joe")
def test_class_public_var(self):
self.assertEqual(self.account.first_name, "Joe")
# main execution
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add Python Class Test Functionality
Add some refreshers around Python classes.<commit_after>#!/usr/bin/env python
#
# Topics: Classes, Inheritance, and Related
#
# Background: Use of classes, including inheritance, instance variables, etc.
#
# Sources:
# - https://www.python-course.eu/object_oriented_programming.php
# - https://realpython.com/python3-object-oriented-programming
import unittest
class Account:
routing = "123"
def __init__(self, first_name="NOTSET"):
self.first_name = first_name
pass
def details(self):
return "{}|{}".format(self.__class__.routing, self.first_name)
class CheckingAccount(Account):
def details(self):
return "Checking|{}|{}".format(self.__class__.routing, self.first_name)
# test functionality
class TestClass(unittest.TestCase):
account = None
@classmethod
def setUpClass(cls):
cls.account = Account(first_name="Joe")
cls.checking_account = CheckingAccount(first_name="Joe")
def test_class_constructor(self):
self.assertIsInstance(self.account, Account)
def test_class_attribute(self):
self.assertEqual(self.account.routing, "123")
def test_instance_method(self):
self.assertEqual(self.account.details(), "123|Joe")
def test_class_inheritance(self):
self.assertIsInstance(self.checking_account, CheckingAccount)
self.assertIsInstance(self.checking_account, Account)
def test_class_override(self):
self.assertEqual(self.checking_account.details(), "Checking|123|Joe")
def test_class_public_var(self):
self.assertEqual(self.account.first_name, "Joe")
# main execution
if __name__ == '__main__':
unittest.main()
|
|
fb7e95444136b1e8461b4cd246df1b81f4767f1e
|
tests/test_runnable.py
|
tests/test_runnable.py
|
import glob
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(cwd, '**', '*.py')):
with open(path) as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
|
Add test to check if all tests are runnable
|
Add test to check if all tests are runnable
|
Python
|
mit
|
keisuke-umezawa/chainer,jnishi/chainer,wkentaro/chainer,t-abe/chainer,t-abe/chainer,cupy/cupy,okuta/chainer,okuta/chainer,niboshi/chainer,chainer/chainer,keisuke-umezawa/chainer,jnishi/chainer,hvy/chainer,ktnyt/chainer,laysakura/chainer,masia02/chainer,wavelets/chainer,jnishi/chainer,jnishi/chainer,sinhrks/chainer,bayerj/chainer,cupy/cupy,tigerneil/chainer,sou81821/chainer,kuwa32/chainer,elviswf/chainer,chainer/chainer,chainer/chainer,chainer/chainer,1986ks/chainer,rezoo/chainer,ytoyama/yans_chainer_hackathon,cemoody/chainer,keisuke-umezawa/chainer,ktnyt/chainer,minhpqn/chainer,niboshi/chainer,niboshi/chainer,ronekko/chainer,ysekky/chainer,cupy/cupy,tscohen/chainer,kiyukuta/chainer,sinhrks/chainer,truongdq/chainer,benob/chainer,hvy/chainer,hvy/chainer,Kaisuke5/chainer,anaruse/chainer,woodshop/chainer,muupan/chainer,okuta/chainer,cupy/cupy,wkentaro/chainer,ktnyt/chainer,woodshop/complex-chainer,wkentaro/chainer,niboshi/chainer,delta2323/chainer,muupan/chainer,aonotas/chainer,truongdq/chainer,pfnet/chainer,kikusu/chainer,wkentaro/chainer,keisuke-umezawa/chainer,AlpacaDB/chainer,benob/chainer,tkerola/chainer,kashif/chainer,umitanuki/chainer,hvy/chainer,yanweifu/chainer,kikusu/chainer,ikasumi/chainer,okuta/chainer,hidenori-t/chainer,AlpacaDB/chainer,ktnyt/chainer,jfsantos/chainer
|
Add test to check if all tests are runnable
|
import glob
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(cwd, '**', '*.py')):
with open(path) as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add test to check if all tests are runnable<commit_after>
|
import glob
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(cwd, '**', '*.py')):
with open(path) as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
|
Add test to check if all tests are runnableimport glob
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(cwd, '**', '*.py')):
with open(path) as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add test to check if all tests are runnable<commit_after>import glob
import os
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
for path in glob.iglob(os.path.join(cwd, '**', '*.py')):
with open(path) as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
|
|
4db37770ab2378822b91316e56ca0618912231ac
|
demo/scripts/random-build-graph.py
|
demo/scripts/random-build-graph.py
|
#!/usr/bin/env python3
# Copyright 2016 Codethink Ltd.
# Apache 2.0 license
'''Generate a random build graph in 'node-link' JSON format.'''
import networkx
import networkx.readwrite.json_graph
import json
import sys
INPUT_NAMES = '/usr/share/dict/words'
N_NODES = 1000
N_EDGES = 2000
# Return a random graph with a fixed number of nodes and edges.
g = networkx.gnm_random_graph(N_NODES, N_EDGES, directed=True)
# Assign names
with open(INPUT_NAMES) as f:
lines = f.readlines()
for i in g.nodes():
line_mult = len(lines) / g.number_of_nodes()
name = lines[int(i * line_mult)].strip()
g.node[i]['name'] = name
json.dump(networkx.readwrite.json_graph.node_link_data(g), sys.stdout)
|
Add random build graph generator
|
Add random build graph generator
|
Python
|
apache-2.0
|
ssssam/generic-concourse-ui,ssssam/generic-concourse-ui
|
Add random build graph generator
|
#!/usr/bin/env python3
# Copyright 2016 Codethink Ltd.
# Apache 2.0 license
'''Generate a random build graph in 'node-link' JSON format.'''
import networkx
import networkx.readwrite.json_graph
import json
import sys
INPUT_NAMES = '/usr/share/dict/words'
N_NODES = 1000
N_EDGES = 2000
# Return a random graph with a fixed number of nodes and edges.
g = networkx.gnm_random_graph(N_NODES, N_EDGES, directed=True)
# Assign names
with open(INPUT_NAMES) as f:
lines = f.readlines()
for i in g.nodes():
line_mult = len(lines) / g.number_of_nodes()
name = lines[int(i * line_mult)].strip()
g.node[i]['name'] = name
json.dump(networkx.readwrite.json_graph.node_link_data(g), sys.stdout)
|
<commit_before><commit_msg>Add random build graph generator<commit_after>
|
#!/usr/bin/env python3
# Copyright 2016 Codethink Ltd.
# Apache 2.0 license
'''Generate a random build graph in 'node-link' JSON format.'''
import networkx
import networkx.readwrite.json_graph
import json
import sys
INPUT_NAMES = '/usr/share/dict/words'
N_NODES = 1000
N_EDGES = 2000
# Return a random graph with a fixed number of nodes and edges.
g = networkx.gnm_random_graph(N_NODES, N_EDGES, directed=True)
# Assign names
with open(INPUT_NAMES) as f:
lines = f.readlines()
for i in g.nodes():
line_mult = len(lines) / g.number_of_nodes()
name = lines[int(i * line_mult)].strip()
g.node[i]['name'] = name
json.dump(networkx.readwrite.json_graph.node_link_data(g), sys.stdout)
|
Add random build graph generator#!/usr/bin/env python3
# Copyright 2016 Codethink Ltd.
# Apache 2.0 license
'''Generate a random build graph in 'node-link' JSON format.'''
import networkx
import networkx.readwrite.json_graph
import json
import sys
INPUT_NAMES = '/usr/share/dict/words'
N_NODES = 1000
N_EDGES = 2000
# Return a random graph with a fixed number of nodes and edges.
g = networkx.gnm_random_graph(N_NODES, N_EDGES, directed=True)
# Assign names
with open(INPUT_NAMES) as f:
lines = f.readlines()
for i in g.nodes():
line_mult = len(lines) / g.number_of_nodes()
name = lines[int(i * line_mult)].strip()
g.node[i]['name'] = name
json.dump(networkx.readwrite.json_graph.node_link_data(g), sys.stdout)
|
<commit_before><commit_msg>Add random build graph generator<commit_after>#!/usr/bin/env python3
# Copyright 2016 Codethink Ltd.
# Apache 2.0 license
'''Generate a random build graph in 'node-link' JSON format.'''
import networkx
import networkx.readwrite.json_graph
import json
import sys
INPUT_NAMES = '/usr/share/dict/words'
N_NODES = 1000
N_EDGES = 2000
# Return a random graph with a fixed number of nodes and edges.
g = networkx.gnm_random_graph(N_NODES, N_EDGES, directed=True)
# Assign names
with open(INPUT_NAMES) as f:
lines = f.readlines()
for i in g.nodes():
line_mult = len(lines) / g.number_of_nodes()
name = lines[int(i * line_mult)].strip()
g.node[i]['name'] = name
json.dump(networkx.readwrite.json_graph.node_link_data(g), sys.stdout)
|
|
0f06ade09a339f99789b3b4e9ae9ac7db2c1f22d
|
genoome/disease/migrations/0017_remove_allelecolor_color.py
|
genoome/disease/migrations/0017_remove_allelecolor_color.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('disease', '0016_auto_20151007_0824'),
]
operations = [
migrations.RemoveField(
model_name='allelecolor',
name='color',
),
]
|
Remove field color from allelecolor
|
Remove field color from allelecolor
|
Python
|
mit
|
jiivan/genoomy,jiivan/genoomy,jiivan/genoomy,jiivan/genoomy
|
Remove field color from allelecolor
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('disease', '0016_auto_20151007_0824'),
]
operations = [
migrations.RemoveField(
model_name='allelecolor',
name='color',
),
]
|
<commit_before><commit_msg>Remove field color from allelecolor<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('disease', '0016_auto_20151007_0824'),
]
operations = [
migrations.RemoveField(
model_name='allelecolor',
name='color',
),
]
|
Remove field color from allelecolor# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('disease', '0016_auto_20151007_0824'),
]
operations = [
migrations.RemoveField(
model_name='allelecolor',
name='color',
),
]
|
<commit_before><commit_msg>Remove field color from allelecolor<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('disease', '0016_auto_20151007_0824'),
]
operations = [
migrations.RemoveField(
model_name='allelecolor',
name='color',
),
]
|
|
6a889eea6953e8ecb89c0cbac0656f5d7e274669
|
project/creditor/management/commands/update_membershipfees.py
|
project/creditor/management/commands/update_membershipfees.py
|
# -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
|
Add initial version of membership fee updater
|
Add initial version of membership fee updater
|
Python
|
mit
|
jautero/asylum,hacklab-fi/asylum,HelsinkiHacklab/asylum,jautero/asylum,HelsinkiHacklab/asylum,hacklab-fi/asylum,hacklab-fi/asylum,rambo/asylum,HelsinkiHacklab/asylum,jautero/asylum,jautero/asylum,HelsinkiHacklab/asylum,rambo/asylum,rambo/asylum,rambo/asylum,hacklab-fi/asylum
|
Add initial version of membership fee updater
|
# -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
|
<commit_before><commit_msg>Add initial version of membership fee updater<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
|
Add initial version of membership fee updater# -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
|
<commit_before><commit_msg>Add initial version of membership fee updater<commit_after># -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
|
|
71a3fc92f947aa4ae2041829f47b9dad617b3532
|
pylearn2/scripts/tests/test_show_weights.py
|
pylearn2/scripts/tests/test_show_weights.py
|
"""
Tests for the show_weights.py script
"""
import cPickle
import os
from pylearn2.testing.skip import skip_if_no_matplotlib
from pylearn2.models.mlp import MLP, Linear
from pylearn2.scripts.show_weights import show_weights
def test_show_weights():
"""
Create a pickled model and show the weights
"""
skip_if_no_matplotlib()
with open('model.pkl', 'wb') as f:
model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
nvis=784)
model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
"""
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
show_weights('model.pkl', rescale='individual',
border=True, out='garbage.png')
os.remove('model.pkl')
os.remove('garbage.png')
|
Add unit test for show_weights.py
|
Add unit test for show_weights.py
|
Python
|
bsd-3-clause
|
mclaughlin6464/pylearn2,lamblin/pylearn2,KennethPierce/pylearnk,lamblin/pylearn2,pkainz/pylearn2,caidongyun/pylearn2,hyqneuron/pylearn2-maxsom,woozzu/pylearn2,kastnerkyle/pylearn2,sandeepkbhat/pylearn2,matrogers/pylearn2,lunyang/pylearn2,alexjc/pylearn2,fishcorn/pylearn2,ddboline/pylearn2,pkainz/pylearn2,pombredanne/pylearn2,sandeepkbhat/pylearn2,cosmoharrigan/pylearn2,woozzu/pylearn2,fulmicoton/pylearn2,kastnerkyle/pylearn2,TNick/pylearn2,sandeepkbhat/pylearn2,lamblin/pylearn2,fyffyt/pylearn2,sandeepkbhat/pylearn2,lunyang/pylearn2,daemonmaker/pylearn2,theoryno3/pylearn2,CIFASIS/pylearn2,mclaughlin6464/pylearn2,msingh172/pylearn2,shiquanwang/pylearn2,fyffyt/pylearn2,shiquanwang/pylearn2,se4u/pylearn2,pombredanne/pylearn2,jeremyfix/pylearn2,kose-y/pylearn2,TNick/pylearn2,jeremyfix/pylearn2,lunyang/pylearn2,lancezlin/pylearn2,Refefer/pylearn2,fulmicoton/pylearn2,woozzu/pylearn2,matrogers/pylearn2,mkraemer67/pylearn2,daemonmaker/pylearn2,hantek/pylearn2,bartvm/pylearn2,lancezlin/pylearn2,CIFASIS/pylearn2,theoryno3/pylearn2,abergeron/pylearn2,TNick/pylearn2,bartvm/pylearn2,jamessergeant/pylearn2,jeremyfix/pylearn2,w1kke/pylearn2,w1kke/pylearn2,shiquanwang/pylearn2,mclaughlin6464/pylearn2,Refefer/pylearn2,hyqneuron/pylearn2-maxsom,se4u/pylearn2,nouiz/pylearn2,Refefer/pylearn2,abergeron/pylearn2,cosmoharrigan/pylearn2,fishcorn/pylearn2,ddboline/pylearn2,goodfeli/pylearn2,fishcorn/pylearn2,hantek/pylearn2,mclaughlin6464/pylearn2,junbochen/pylearn2,aalmah/pylearn2,junbochen/pylearn2,caidongyun/pylearn2,chrish42/pylearn,KennethPierce/pylearnk,aalmah/pylearn2,se4u/pylearn2,chrish42/pylearn,jeremyfix/pylearn2,kose-y/pylearn2,abergeron/pylearn2,fyffyt/pylearn2,theoryno3/pylearn2,nouiz/pylearn2,mkraemer67/pylearn2,matrogers/pylearn2,ashhher3/pylearn2,cosmoharrigan/pylearn2,fulmicoton/pylearn2,alexjc/pylearn2,alexjc/pylearn2,goodfeli/pylearn2,skearnes/pylearn2,TNick/pylearn2,pombredanne/pylearn2,chrish42/pylearn,theoryno3/pylearn2,fyffyt/pylearn2,kastnerkyle/pylearn2,KennethPierce/pylearnk,daemonmaker/pylearn2,w1kke/pylearn2,fulmicoton/pylearn2,hantek/pylearn2,lisa-lab/pylearn2,goodfeli/pylearn2,kastnerkyle/pylearn2,kose-y/pylearn2,lancezlin/pylearn2,ashhher3/pylearn2,pkainz/pylearn2,woozzu/pylearn2,pkainz/pylearn2,fishcorn/pylearn2,CIFASIS/pylearn2,JesseLivezey/plankton,ashhher3/pylearn2,junbochen/pylearn2,jamessergeant/pylearn2,alexjc/pylearn2,goodfeli/pylearn2,cosmoharrigan/pylearn2,msingh172/pylearn2,shiquanwang/pylearn2,nouiz/pylearn2,abergeron/pylearn2,JesseLivezey/plankton,daemonmaker/pylearn2,ddboline/pylearn2,JesseLivezey/plankton,msingh172/pylearn2,mkraemer67/pylearn2,JesseLivezey/pylearn2,Refefer/pylearn2,junbochen/pylearn2,lancezlin/pylearn2,skearnes/pylearn2,mkraemer67/pylearn2,hantek/pylearn2,KennethPierce/pylearnk,lamblin/pylearn2,JesseLivezey/pylearn2,matrogers/pylearn2,se4u/pylearn2,pombredanne/pylearn2,msingh172/pylearn2,w1kke/pylearn2,lisa-lab/pylearn2,jamessergeant/pylearn2,nouiz/pylearn2,bartvm/pylearn2,lisa-lab/pylearn2,hyqneuron/pylearn2-maxsom,lunyang/pylearn2,hyqneuron/pylearn2-maxsom,lisa-lab/pylearn2,JesseLivezey/plankton,skearnes/pylearn2,chrish42/pylearn,aalmah/pylearn2,caidongyun/pylearn2,skearnes/pylearn2,caidongyun/pylearn2,bartvm/pylearn2,ashhher3/pylearn2,JesseLivezey/pylearn2,aalmah/pylearn2,JesseLivezey/pylearn2,kose-y/pylearn2,ddboline/pylearn2,jamessergeant/pylearn2,CIFASIS/pylearn2
|
Add unit test for show_weights.py
|
"""
Tests for the show_weights.py script
"""
import cPickle
import os
from pylearn2.testing.skip import skip_if_no_matplotlib
from pylearn2.models.mlp import MLP, Linear
from pylearn2.scripts.show_weights import show_weights
def test_show_weights():
"""
Create a pickled model and show the weights
"""
skip_if_no_matplotlib()
with open('model.pkl', 'wb') as f:
model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
nvis=784)
model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
"""
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
show_weights('model.pkl', rescale='individual',
border=True, out='garbage.png')
os.remove('model.pkl')
os.remove('garbage.png')
|
<commit_before><commit_msg>Add unit test for show_weights.py<commit_after>
|
"""
Tests for the show_weights.py script
"""
import cPickle
import os
from pylearn2.testing.skip import skip_if_no_matplotlib
from pylearn2.models.mlp import MLP, Linear
from pylearn2.scripts.show_weights import show_weights
def test_show_weights():
"""
Create a pickled model and show the weights
"""
skip_if_no_matplotlib()
with open('model.pkl', 'wb') as f:
model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
nvis=784)
model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
"""
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
show_weights('model.pkl', rescale='individual',
border=True, out='garbage.png')
os.remove('model.pkl')
os.remove('garbage.png')
|
Add unit test for show_weights.py"""
Tests for the show_weights.py script
"""
import cPickle
import os
from pylearn2.testing.skip import skip_if_no_matplotlib
from pylearn2.models.mlp import MLP, Linear
from pylearn2.scripts.show_weights import show_weights
def test_show_weights():
"""
Create a pickled model and show the weights
"""
skip_if_no_matplotlib()
with open('model.pkl', 'wb') as f:
model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
nvis=784)
model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
"""
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
show_weights('model.pkl', rescale='individual',
border=True, out='garbage.png')
os.remove('model.pkl')
os.remove('garbage.png')
|
<commit_before><commit_msg>Add unit test for show_weights.py<commit_after>"""
Tests for the show_weights.py script
"""
import cPickle
import os
from pylearn2.testing.skip import skip_if_no_matplotlib
from pylearn2.models.mlp import MLP, Linear
from pylearn2.scripts.show_weights import show_weights
def test_show_weights():
"""
Create a pickled model and show the weights
"""
skip_if_no_matplotlib()
with open('model.pkl', 'wb') as f:
model = MLP(layers=[Linear(dim=1, layer_name='h0', irange=0.1)],
nvis=784)
model.dataset_yaml_src = """
!obj:pylearn2.datasets.mnist.MNIST {
which_set: 'train'
}
"""
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
show_weights('model.pkl', rescale='individual',
border=True, out='garbage.png')
os.remove('model.pkl')
os.remove('garbage.png')
|
|
7c2b2fca21424dda2633b152a49d8b2350eff3de
|
moniker/tests/test_api/test_auth.py
|
moniker/tests/test_api/test_auth.py
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.tests.test_api import ApiTestCase
from moniker.api import auth
class KeystoneContextMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.KeystoneContextMiddleware(app)
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_tok)
self.assertEqual('UserID', context.user_id)
self.assertEqual('TenantID', context.tenant_id)
self.assertEqual(['admin', 'Member'], context.roles)
class NoAuthMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.NoAuthMiddleware(app)
request = FakeRequest()
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertTrue(context.is_admin)
self.assertIsNone(context.auth_tok)
self.assertIsNone(context.user_id)
self.assertIsNone(context.tenant_id)
self.assertEqual([], context.roles)
|
Add tests for KeystoneContextMiddleware and NoAuthMiddleware
|
Add tests for KeystoneContextMiddleware and NoAuthMiddleware
Change-Id: I3fa40ae111c48810f1f2c5774925c1460c958163
|
Python
|
apache-2.0
|
ramsateesh/designate,cneill/designate,melodous/designate,ramsateesh/designate,richm/designate,NeCTAR-RC/designate,melodous/designate,openstack/designate,cneill/designate,richm/designate,kiall/designate-py3,kiall/designate-py3,cneill/designate-testing,ramsateesh/designate,grahamhayes/designate,muraliselva10/designate,grahamhayes/designate,kiall/designate-py3,muraliselva10/designate,cneill/designate-testing,kiall/designate-py3,cneill/designate-testing,kiall/designate-py3,NeCTAR-RC/designate,tonyli71/designate,openstack/designate,cneill/designate,melodous/designate,melodous/designate,cneill/designate,openstack/designate,grahamhayes/designate,tonyli71/designate,ionrock/designate,cneill/designate,ionrock/designate,muraliselva10/designate,ionrock/designate,tonyli71/designate
|
Add tests for KeystoneContextMiddleware and NoAuthMiddleware
Change-Id: I3fa40ae111c48810f1f2c5774925c1460c958163
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.tests.test_api import ApiTestCase
from moniker.api import auth
class KeystoneContextMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.KeystoneContextMiddleware(app)
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_tok)
self.assertEqual('UserID', context.user_id)
self.assertEqual('TenantID', context.tenant_id)
self.assertEqual(['admin', 'Member'], context.roles)
class NoAuthMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.NoAuthMiddleware(app)
request = FakeRequest()
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertTrue(context.is_admin)
self.assertIsNone(context.auth_tok)
self.assertIsNone(context.user_id)
self.assertIsNone(context.tenant_id)
self.assertEqual([], context.roles)
|
<commit_before><commit_msg>Add tests for KeystoneContextMiddleware and NoAuthMiddleware
Change-Id: I3fa40ae111c48810f1f2c5774925c1460c958163<commit_after>
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.tests.test_api import ApiTestCase
from moniker.api import auth
class KeystoneContextMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.KeystoneContextMiddleware(app)
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_tok)
self.assertEqual('UserID', context.user_id)
self.assertEqual('TenantID', context.tenant_id)
self.assertEqual(['admin', 'Member'], context.roles)
class NoAuthMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.NoAuthMiddleware(app)
request = FakeRequest()
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertTrue(context.is_admin)
self.assertIsNone(context.auth_tok)
self.assertIsNone(context.user_id)
self.assertIsNone(context.tenant_id)
self.assertEqual([], context.roles)
|
Add tests for KeystoneContextMiddleware and NoAuthMiddleware
Change-Id: I3fa40ae111c48810f1f2c5774925c1460c958163# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.tests.test_api import ApiTestCase
from moniker.api import auth
class KeystoneContextMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.KeystoneContextMiddleware(app)
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_tok)
self.assertEqual('UserID', context.user_id)
self.assertEqual('TenantID', context.tenant_id)
self.assertEqual(['admin', 'Member'], context.roles)
class NoAuthMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.NoAuthMiddleware(app)
request = FakeRequest()
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertTrue(context.is_admin)
self.assertIsNone(context.auth_tok)
self.assertIsNone(context.user_id)
self.assertIsNone(context.tenant_id)
self.assertEqual([], context.roles)
|
<commit_before><commit_msg>Add tests for KeystoneContextMiddleware and NoAuthMiddleware
Change-Id: I3fa40ae111c48810f1f2c5774925c1460c958163<commit_after># Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.tests.test_api import ApiTestCase
from moniker.api import auth
class KeystoneContextMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.KeystoneContextMiddleware(app)
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_tok)
self.assertEqual('UserID', context.user_id)
self.assertEqual('TenantID', context.tenant_id)
self.assertEqual(['admin', 'Member'], context.roles)
class NoAuthMiddlewareTest(ApiTestCase):
__test__ = True
def test_process_request(self):
class FakeRequest(object):
headers = {}
environ = {}
app = {}
middleware = auth.NoAuthMiddleware(app)
request = FakeRequest()
# Process the request
middleware.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertTrue(context.is_admin)
self.assertIsNone(context.auth_tok)
self.assertIsNone(context.user_id)
self.assertIsNone(context.tenant_id)
self.assertEqual([], context.roles)
|
|
06294648da65a1303601a3bc69bc341c59eab9a9
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.1',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.2',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
Prepare for next development cycle.
|
Prepare for next development cycle.
|
Python
|
mit
|
coldfix/udiskie,khardix/udiskie,coldfix/udiskie,mathstuf/udiskie,pstray/udiskie,pstray/udiskie
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.1',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
Prepare for next development cycle.
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.2',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
<commit_before>from distutils.core import setup
setup(
name='udiskie',
version='0.4.1',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
<commit_msg>Prepare for next development cycle.<commit_after>
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.2',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
from distutils.core import setup
setup(
name='udiskie',
version='0.4.1',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
Prepare for next development cycle.from distutils.core import setup
setup(
name='udiskie',
version='0.4.2',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
<commit_before>from distutils.core import setup
setup(
name='udiskie',
version='0.4.1',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
<commit_msg>Prepare for next development cycle.<commit_after>from distutils.core import setup
setup(
name='udiskie',
version='0.4.2',
description='Removable disk automounter for udisks',
author='Byron Clark',
author_email='byron@theclarkfamily.name',
url='http://bitbucket.org/byronclark/udiskie',
license='MIT',
packages=[
'udiskie',
],
scripts=[
'bin/udiskie',
'bin/udiskie-umount',
],
)
|
7468494e5a604c138c8a9ad777eed9c9550c354f
|
docker_test/network_test.py
|
docker_test/network_test.py
|
# coding=utf-8
import unittest
import json
from docker import session
from docker import network
from docker_test import base_test
class NetworkTest(unittest.TestCase):
def setUp(self):
self.c_session = session.get_session(base_test.session_url)
self.n = network.Network(self.c_session)
response = self.n.create(base_test.network_name, 'bridge')
status_code = response.get('status_code')
if status_code != 201:
self.fail('Create network FAIL : ' + str(status_code))
def tearDown(self):
response = self.n.remove(base_test.network_name)
status_code = response.get('status_code')
if status_code != 200:
self.fail('Remove network FAIL : ' + str(status_code))
def test_list(self):
response = self.n.list()
status_code = response.get('status_code')
if status_code == 200:
volume_list = response.get('content')
self.assertGreater(len(volume_list), 0)
else:
self.fail('list: list network fail, status_code : ' + str(status_code))
if base_test.print_json:
print 'list:' + json.dumps(response)
def test_inspect(self):
response = self.n.inspect(base_test.network_name)
status_code = response.get('status_code')
if status_code == 200:
volume_info = response.get('content')
self.assertEqual(volume_info.get('Name'), base_test.network_name)
self.assertEqual(volume_info.get('Driver'), 'bridge')
else:
self.fail('inspect : get network {0} fail, status_code : {1}'.format(base_test.network_name,
str(status_code)))
if base_test.print_json:
print 'inspect:' + json.dumps(response)
|
Add network api test case
|
Add network api test case
|
Python
|
apache-2.0
|
interhui/docker-api,PinaeCloud/docker-api
|
Add network api test case
|
# coding=utf-8
import unittest
import json
from docker import session
from docker import network
from docker_test import base_test
class NetworkTest(unittest.TestCase):
def setUp(self):
self.c_session = session.get_session(base_test.session_url)
self.n = network.Network(self.c_session)
response = self.n.create(base_test.network_name, 'bridge')
status_code = response.get('status_code')
if status_code != 201:
self.fail('Create network FAIL : ' + str(status_code))
def tearDown(self):
response = self.n.remove(base_test.network_name)
status_code = response.get('status_code')
if status_code != 200:
self.fail('Remove network FAIL : ' + str(status_code))
def test_list(self):
response = self.n.list()
status_code = response.get('status_code')
if status_code == 200:
volume_list = response.get('content')
self.assertGreater(len(volume_list), 0)
else:
self.fail('list: list network fail, status_code : ' + str(status_code))
if base_test.print_json:
print 'list:' + json.dumps(response)
def test_inspect(self):
response = self.n.inspect(base_test.network_name)
status_code = response.get('status_code')
if status_code == 200:
volume_info = response.get('content')
self.assertEqual(volume_info.get('Name'), base_test.network_name)
self.assertEqual(volume_info.get('Driver'), 'bridge')
else:
self.fail('inspect : get network {0} fail, status_code : {1}'.format(base_test.network_name,
str(status_code)))
if base_test.print_json:
print 'inspect:' + json.dumps(response)
|
<commit_before><commit_msg>Add network api test case<commit_after>
|
# coding=utf-8
import unittest
import json
from docker import session
from docker import network
from docker_test import base_test
class NetworkTest(unittest.TestCase):
def setUp(self):
self.c_session = session.get_session(base_test.session_url)
self.n = network.Network(self.c_session)
response = self.n.create(base_test.network_name, 'bridge')
status_code = response.get('status_code')
if status_code != 201:
self.fail('Create network FAIL : ' + str(status_code))
def tearDown(self):
response = self.n.remove(base_test.network_name)
status_code = response.get('status_code')
if status_code != 200:
self.fail('Remove network FAIL : ' + str(status_code))
def test_list(self):
response = self.n.list()
status_code = response.get('status_code')
if status_code == 200:
volume_list = response.get('content')
self.assertGreater(len(volume_list), 0)
else:
self.fail('list: list network fail, status_code : ' + str(status_code))
if base_test.print_json:
print 'list:' + json.dumps(response)
def test_inspect(self):
response = self.n.inspect(base_test.network_name)
status_code = response.get('status_code')
if status_code == 200:
volume_info = response.get('content')
self.assertEqual(volume_info.get('Name'), base_test.network_name)
self.assertEqual(volume_info.get('Driver'), 'bridge')
else:
self.fail('inspect : get network {0} fail, status_code : {1}'.format(base_test.network_name,
str(status_code)))
if base_test.print_json:
print 'inspect:' + json.dumps(response)
|
Add network api test case# coding=utf-8
import unittest
import json
from docker import session
from docker import network
from docker_test import base_test
class NetworkTest(unittest.TestCase):
def setUp(self):
self.c_session = session.get_session(base_test.session_url)
self.n = network.Network(self.c_session)
response = self.n.create(base_test.network_name, 'bridge')
status_code = response.get('status_code')
if status_code != 201:
self.fail('Create network FAIL : ' + str(status_code))
def tearDown(self):
response = self.n.remove(base_test.network_name)
status_code = response.get('status_code')
if status_code != 200:
self.fail('Remove network FAIL : ' + str(status_code))
def test_list(self):
response = self.n.list()
status_code = response.get('status_code')
if status_code == 200:
volume_list = response.get('content')
self.assertGreater(len(volume_list), 0)
else:
self.fail('list: list network fail, status_code : ' + str(status_code))
if base_test.print_json:
print 'list:' + json.dumps(response)
def test_inspect(self):
response = self.n.inspect(base_test.network_name)
status_code = response.get('status_code')
if status_code == 200:
volume_info = response.get('content')
self.assertEqual(volume_info.get('Name'), base_test.network_name)
self.assertEqual(volume_info.get('Driver'), 'bridge')
else:
self.fail('inspect : get network {0} fail, status_code : {1}'.format(base_test.network_name,
str(status_code)))
if base_test.print_json:
print 'inspect:' + json.dumps(response)
|
<commit_before><commit_msg>Add network api test case<commit_after># coding=utf-8
import unittest
import json
from docker import session
from docker import network
from docker_test import base_test
class NetworkTest(unittest.TestCase):
def setUp(self):
self.c_session = session.get_session(base_test.session_url)
self.n = network.Network(self.c_session)
response = self.n.create(base_test.network_name, 'bridge')
status_code = response.get('status_code')
if status_code != 201:
self.fail('Create network FAIL : ' + str(status_code))
def tearDown(self):
response = self.n.remove(base_test.network_name)
status_code = response.get('status_code')
if status_code != 200:
self.fail('Remove network FAIL : ' + str(status_code))
def test_list(self):
response = self.n.list()
status_code = response.get('status_code')
if status_code == 200:
volume_list = response.get('content')
self.assertGreater(len(volume_list), 0)
else:
self.fail('list: list network fail, status_code : ' + str(status_code))
if base_test.print_json:
print 'list:' + json.dumps(response)
def test_inspect(self):
response = self.n.inspect(base_test.network_name)
status_code = response.get('status_code')
if status_code == 200:
volume_info = response.get('content')
self.assertEqual(volume_info.get('Name'), base_test.network_name)
self.assertEqual(volume_info.get('Driver'), 'bridge')
else:
self.fail('inspect : get network {0} fail, status_code : {1}'.format(base_test.network_name,
str(status_code)))
if base_test.print_json:
print 'inspect:' + json.dumps(response)
|
|
aa985e9a686c4f333f64f6f39759f1cd0cc0f8c2
|
oscar/apps/offer/managers.py
|
oscar/apps/offer/managers.py
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
start_datetime__lte=cutoff, end_datetime__gte=cutoff)
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
models.Q(end_date__gte=today) | models.Q(end_date=None),
start_date__lte=today)
|
Fix bug with date filtering of offers
|
Fix bug with date filtering of offers
Offers with no end date were not being picked up.
|
Python
|
bsd-3-clause
|
kapt/django-oscar,itbabu/django-oscar,eddiep1101/django-oscar,dongguangming/django-oscar,manevant/django-oscar,ahmetdaglarbas/e-commerce,DrOctogon/unwash_ecom,anentropic/django-oscar,mexeniz/django-oscar,binarydud/django-oscar,taedori81/django-oscar,jinnykoo/wuyisj,WillisXChen/django-oscar,QLGu/django-oscar,itbabu/django-oscar,ahmetdaglarbas/e-commerce,vovanbo/django-oscar,amirrpp/django-oscar,nickpack/django-oscar,ademuk/django-oscar,saadatqadri/django-oscar,jinnykoo/wuyisj.com,pdonadeo/django-oscar,amirrpp/django-oscar,taedori81/django-oscar,bnprk/django-oscar,michaelkuty/django-oscar,jlmadurga/django-oscar,Idematica/django-oscar,ademuk/django-oscar,anentropic/django-oscar,Bogh/django-oscar,lijoantony/django-oscar,kapari/django-oscar,vovanbo/django-oscar,john-parton/django-oscar,machtfit/django-oscar,WillisXChen/django-oscar,DrOctogon/unwash_ecom,sonofatailor/django-oscar,spartonia/django-oscar,thechampanurag/django-oscar,MatthewWilkes/django-oscar,thechampanurag/django-oscar,pasqualguerrero/django-oscar,ahmetdaglarbas/e-commerce,WillisXChen/django-oscar,itbabu/django-oscar,ka7eh/django-oscar,machtfit/django-oscar,kapt/django-oscar,MatthewWilkes/django-oscar,mexeniz/django-oscar,bschuon/django-oscar,john-parton/django-oscar,solarissmoke/django-oscar,adamend/django-oscar,anentropic/django-oscar,WadeYuChen/django-oscar,monikasulik/django-oscar,pdonadeo/django-oscar,dongguangming/django-oscar,saadatqadri/django-oscar,Idematica/django-oscar,Jannes123/django-oscar,nfletton/django-oscar,rocopartners/django-oscar,nickpack/django-oscar,manevant/django-oscar,jinnykoo/wuyisj,WillisXChen/django-oscar,monikasulik/django-oscar,saadatqadri/django-oscar,kapari/django-oscar,QLGu/django-oscar,taedori81/django-oscar,jmt4/django-oscar,saadatqadri/django-oscar,adamend/django-oscar,bschuon/django-oscar,ademuk/django-oscar,monikasulik/django-oscar,nickpack/django-oscar,nfletton/django-oscar,binarydud/django-oscar,MatthewWilkes/django-oscar,makielab/django-oscar,faratro/django-oscar,faratro/django-oscar,marcoantoniooliveira/labweb,jlmadurga/django-oscar,rocopartners/django-oscar,django-oscar/django-oscar,vovanbo/django-oscar,makielab/django-oscar,jinnykoo/wuyisj.com,pasqualguerrero/django-oscar,lijoantony/django-oscar,sasha0/django-oscar,Bogh/django-oscar,sasha0/django-oscar,solarissmoke/django-oscar,makielab/django-oscar,bnprk/django-oscar,eddiep1101/django-oscar,jmt4/django-oscar,pdonadeo/django-oscar,adamend/django-oscar,rocopartners/django-oscar,pdonadeo/django-oscar,thechampanurag/django-oscar,WadeYuChen/django-oscar,spartonia/django-oscar,josesanch/django-oscar,manevant/django-oscar,elliotthill/django-oscar,binarydud/django-oscar,Idematica/django-oscar,DrOctogon/unwash_ecom,monikasulik/django-oscar,makielab/django-oscar,jinnykoo/christmas,john-parton/django-oscar,Bogh/django-oscar,spartonia/django-oscar,jlmadurga/django-oscar,ka7eh/django-oscar,pasqualguerrero/django-oscar,kapt/django-oscar,jinnykoo/wuyisj,elliotthill/django-oscar,sonofatailor/django-oscar,ademuk/django-oscar,WadeYuChen/django-oscar,jinnykoo/wuyisj.com,faratro/django-oscar,marcoantoniooliveira/labweb,rocopartners/django-oscar,mexeniz/django-oscar,WadeYuChen/django-oscar,adamend/django-oscar,bschuon/django-oscar,lijoantony/django-oscar,nfletton/django-oscar,lijoantony/django-oscar,faratro/django-oscar,marcoantoniooliveira/labweb,anentropic/django-oscar,ahmetdaglarbas/e-commerce,jmt4/django-oscar,eddiep1101/django-oscar,ka7eh/django-oscar,okfish/django-oscar,dongguangming/django-oscar,binarydud/django-oscar,jlmadurga/django-oscar,michaelkuty/django-oscar,mexeniz/django-oscar,ka7eh/django-oscar,django-oscar/django-oscar,nfletton/django-oscar,Jannes123/django-oscar,WillisXChen/django-oscar,itbabu/django-oscar,okfish/django-oscar,vovanbo/django-oscar,Jannes123/django-oscar,bnprk/django-oscar,john-parton/django-oscar,sonofatailor/django-oscar,taedori81/django-oscar,jinnykoo/christmas,sasha0/django-oscar,thechampanurag/django-oscar,machtfit/django-oscar,WillisXChen/django-oscar,solarissmoke/django-oscar,jinnykoo/wuyisj.com,sonofatailor/django-oscar,manevant/django-oscar,django-oscar/django-oscar,jinnykoo/wuyisj,bnprk/django-oscar,okfish/django-oscar,Jannes123/django-oscar,QLGu/django-oscar,michaelkuty/django-oscar,spartonia/django-oscar,marcoantoniooliveira/labweb,bschuon/django-oscar,amirrpp/django-oscar,kapari/django-oscar,pasqualguerrero/django-oscar,sasha0/django-oscar,django-oscar/django-oscar,MatthewWilkes/django-oscar,dongguangming/django-oscar,QLGu/django-oscar,solarissmoke/django-oscar,nickpack/django-oscar,kapari/django-oscar,josesanch/django-oscar,jinnykoo/christmas,elliotthill/django-oscar,okfish/django-oscar,Bogh/django-oscar,josesanch/django-oscar,michaelkuty/django-oscar,jmt4/django-oscar,amirrpp/django-oscar,eddiep1101/django-oscar
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
start_datetime__lte=cutoff, end_datetime__gte=cutoff)
Fix bug with date filtering of offers
Offers with no end date were not being picked up.
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
models.Q(end_date__gte=today) | models.Q(end_date=None),
start_date__lte=today)
|
<commit_before>from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
start_datetime__lte=cutoff, end_datetime__gte=cutoff)
<commit_msg>Fix bug with date filtering of offers
Offers with no end date were not being picked up.<commit_after>
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
models.Q(end_date__gte=today) | models.Q(end_date=None),
start_date__lte=today)
|
from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
start_datetime__lte=cutoff, end_datetime__gte=cutoff)
Fix bug with date filtering of offers
Offers with no end date were not being picked up.from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
models.Q(end_date__gte=today) | models.Q(end_date=None),
start_date__lte=today)
|
<commit_before>from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
start_datetime__lte=cutoff, end_datetime__gte=cutoff)
<commit_msg>Fix bug with date filtering of offers
Offers with no end date were not being picked up.<commit_after>from django.utils.timezone import now
from django.db import models
class ActiveOfferManager(models.Manager):
"""
For searching/creating offers within their date range
"""
def get_query_set(self):
cutoff = now()
return super(ActiveOfferManager, self).get_query_set().filter(
models.Q(end_date__gte=today) | models.Q(end_date=None),
start_date__lte=today)
|
4d47aef2b91e77cd8bc295d4166b49d3bfc78b8d
|
watson/apps.py
|
watson/apps.py
|
from django.apps import AppConfig
class WatsonAppConfig(AppConfig):
"""App configuration for watson."""
name = 'watson'
default_auto_field = 'django.db.models.AutoField'
|
Add AppConfig with default_auto_field set
|
Add AppConfig with default_auto_field set
|
Python
|
bsd-3-clause
|
etianen/django-watson,etianen/django-watson
|
Add AppConfig with default_auto_field set
|
from django.apps import AppConfig
class WatsonAppConfig(AppConfig):
"""App configuration for watson."""
name = 'watson'
default_auto_field = 'django.db.models.AutoField'
|
<commit_before><commit_msg>Add AppConfig with default_auto_field set<commit_after>
|
from django.apps import AppConfig
class WatsonAppConfig(AppConfig):
"""App configuration for watson."""
name = 'watson'
default_auto_field = 'django.db.models.AutoField'
|
Add AppConfig with default_auto_field setfrom django.apps import AppConfig
class WatsonAppConfig(AppConfig):
"""App configuration for watson."""
name = 'watson'
default_auto_field = 'django.db.models.AutoField'
|
<commit_before><commit_msg>Add AppConfig with default_auto_field set<commit_after>from django.apps import AppConfig
class WatsonAppConfig(AppConfig):
"""App configuration for watson."""
name = 'watson'
default_auto_field = 'django.db.models.AutoField'
|
|
28b49417a46659a2e64ee91eea497f674d42dde5
|
tests/test_integration.py
|
tests/test_integration.py
|
from pandarus import intersect
import fiona
import json
import numpy as np
import os
import tempfile
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
square = os.path.join(dirpath, "square.geojson")
range_raster = os.path.join(dirpath, "range.tif")
dem = os.path.join(dirpath, "DEM.tif")
outside = os.path.join(dirpath, "outside.geojson")
remain_result = os.path.join(dirpath, "remaining.geojson")
def test_intersection_polygon():
area = 1/4 * (4e7 / 360) ** 2
with tempfile.TemporaryDirectory() as dirpath:
vector_fp, data_fp = intersect(
outside,
'name',
grid,
'name',
dirpath=dirpath,
compress=False
)
data = json.load(open(data_fp))
assert len(data['data']) == 2
print(data)
print(area)
for x, y, z in data['data']:
assert x == 'by-myself'
assert y in ('grid cell 1', 'grid cell 3')
assert np.allclose(z, area, rtol=1e-2)
assert data['metadata'].keys() == {'first', 'second', 'when'}
assert data['metadata']['first'].keys() == {'field', 'filename', 'path', 'sha256'}
assert data['metadata']['second'].keys() == {'field', 'filename', 'path', 'sha256'}
with fiona.open(vector_fp) as src:
meta = src.meta
assert meta['driver'] == 'GeoJSON'
assert meta['schema'] == {
'geometry': 'MultiPolygon',
'properties': dict([
('measure', 'float'),
('from_label', 'str'),
('id', 'int'),
('to_label', 'str')
])
}
assert meta['crs'] == {'init': 'epsg:4326'}
coords = [
[[[(0.5, 1.5), (0.5, 2.0), (1.0, 2.0), (1.0, 1.5), (0.5, 1.5)]]],
[[[(1.5, 2.0), (1.5, 1.5), (1.0, 1.5), (1.0, 2.0), (1.5, 2.0)]]]
]
for feature in src:
assert feature['geometry']['coordinates'] in coords
assert feature['geometry']['type'] == 'MultiPolygon'
assert feature['properties'].keys() == {'measure', 'from_label', 'to_label', 'id'}
assert np.allclose(feature['properties']['measure'], area, rtol=1e-2)
def test_intersection_polygon_projection():
pass
def test_intersection_line():
pass
def test_intersection_line_projection():
pass
def test_intersection_point():
pass
def test_intersection_point_projection():
pass
|
Add first intersection integration test
|
Add first intersection integration test
|
Python
|
bsd-3-clause
|
cmutel/pandarus
|
Add first intersection integration test
|
from pandarus import intersect
import fiona
import json
import numpy as np
import os
import tempfile
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
square = os.path.join(dirpath, "square.geojson")
range_raster = os.path.join(dirpath, "range.tif")
dem = os.path.join(dirpath, "DEM.tif")
outside = os.path.join(dirpath, "outside.geojson")
remain_result = os.path.join(dirpath, "remaining.geojson")
def test_intersection_polygon():
area = 1/4 * (4e7 / 360) ** 2
with tempfile.TemporaryDirectory() as dirpath:
vector_fp, data_fp = intersect(
outside,
'name',
grid,
'name',
dirpath=dirpath,
compress=False
)
data = json.load(open(data_fp))
assert len(data['data']) == 2
print(data)
print(area)
for x, y, z in data['data']:
assert x == 'by-myself'
assert y in ('grid cell 1', 'grid cell 3')
assert np.allclose(z, area, rtol=1e-2)
assert data['metadata'].keys() == {'first', 'second', 'when'}
assert data['metadata']['first'].keys() == {'field', 'filename', 'path', 'sha256'}
assert data['metadata']['second'].keys() == {'field', 'filename', 'path', 'sha256'}
with fiona.open(vector_fp) as src:
meta = src.meta
assert meta['driver'] == 'GeoJSON'
assert meta['schema'] == {
'geometry': 'MultiPolygon',
'properties': dict([
('measure', 'float'),
('from_label', 'str'),
('id', 'int'),
('to_label', 'str')
])
}
assert meta['crs'] == {'init': 'epsg:4326'}
coords = [
[[[(0.5, 1.5), (0.5, 2.0), (1.0, 2.0), (1.0, 1.5), (0.5, 1.5)]]],
[[[(1.5, 2.0), (1.5, 1.5), (1.0, 1.5), (1.0, 2.0), (1.5, 2.0)]]]
]
for feature in src:
assert feature['geometry']['coordinates'] in coords
assert feature['geometry']['type'] == 'MultiPolygon'
assert feature['properties'].keys() == {'measure', 'from_label', 'to_label', 'id'}
assert np.allclose(feature['properties']['measure'], area, rtol=1e-2)
def test_intersection_polygon_projection():
pass
def test_intersection_line():
pass
def test_intersection_line_projection():
pass
def test_intersection_point():
pass
def test_intersection_point_projection():
pass
|
<commit_before><commit_msg>Add first intersection integration test<commit_after>
|
from pandarus import intersect
import fiona
import json
import numpy as np
import os
import tempfile
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
square = os.path.join(dirpath, "square.geojson")
range_raster = os.path.join(dirpath, "range.tif")
dem = os.path.join(dirpath, "DEM.tif")
outside = os.path.join(dirpath, "outside.geojson")
remain_result = os.path.join(dirpath, "remaining.geojson")
def test_intersection_polygon():
area = 1/4 * (4e7 / 360) ** 2
with tempfile.TemporaryDirectory() as dirpath:
vector_fp, data_fp = intersect(
outside,
'name',
grid,
'name',
dirpath=dirpath,
compress=False
)
data = json.load(open(data_fp))
assert len(data['data']) == 2
print(data)
print(area)
for x, y, z in data['data']:
assert x == 'by-myself'
assert y in ('grid cell 1', 'grid cell 3')
assert np.allclose(z, area, rtol=1e-2)
assert data['metadata'].keys() == {'first', 'second', 'when'}
assert data['metadata']['first'].keys() == {'field', 'filename', 'path', 'sha256'}
assert data['metadata']['second'].keys() == {'field', 'filename', 'path', 'sha256'}
with fiona.open(vector_fp) as src:
meta = src.meta
assert meta['driver'] == 'GeoJSON'
assert meta['schema'] == {
'geometry': 'MultiPolygon',
'properties': dict([
('measure', 'float'),
('from_label', 'str'),
('id', 'int'),
('to_label', 'str')
])
}
assert meta['crs'] == {'init': 'epsg:4326'}
coords = [
[[[(0.5, 1.5), (0.5, 2.0), (1.0, 2.0), (1.0, 1.5), (0.5, 1.5)]]],
[[[(1.5, 2.0), (1.5, 1.5), (1.0, 1.5), (1.0, 2.0), (1.5, 2.0)]]]
]
for feature in src:
assert feature['geometry']['coordinates'] in coords
assert feature['geometry']['type'] == 'MultiPolygon'
assert feature['properties'].keys() == {'measure', 'from_label', 'to_label', 'id'}
assert np.allclose(feature['properties']['measure'], area, rtol=1e-2)
def test_intersection_polygon_projection():
pass
def test_intersection_line():
pass
def test_intersection_line_projection():
pass
def test_intersection_point():
pass
def test_intersection_point_projection():
pass
|
Add first intersection integration testfrom pandarus import intersect
import fiona
import json
import numpy as np
import os
import tempfile
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
square = os.path.join(dirpath, "square.geojson")
range_raster = os.path.join(dirpath, "range.tif")
dem = os.path.join(dirpath, "DEM.tif")
outside = os.path.join(dirpath, "outside.geojson")
remain_result = os.path.join(dirpath, "remaining.geojson")
def test_intersection_polygon():
area = 1/4 * (4e7 / 360) ** 2
with tempfile.TemporaryDirectory() as dirpath:
vector_fp, data_fp = intersect(
outside,
'name',
grid,
'name',
dirpath=dirpath,
compress=False
)
data = json.load(open(data_fp))
assert len(data['data']) == 2
print(data)
print(area)
for x, y, z in data['data']:
assert x == 'by-myself'
assert y in ('grid cell 1', 'grid cell 3')
assert np.allclose(z, area, rtol=1e-2)
assert data['metadata'].keys() == {'first', 'second', 'when'}
assert data['metadata']['first'].keys() == {'field', 'filename', 'path', 'sha256'}
assert data['metadata']['second'].keys() == {'field', 'filename', 'path', 'sha256'}
with fiona.open(vector_fp) as src:
meta = src.meta
assert meta['driver'] == 'GeoJSON'
assert meta['schema'] == {
'geometry': 'MultiPolygon',
'properties': dict([
('measure', 'float'),
('from_label', 'str'),
('id', 'int'),
('to_label', 'str')
])
}
assert meta['crs'] == {'init': 'epsg:4326'}
coords = [
[[[(0.5, 1.5), (0.5, 2.0), (1.0, 2.0), (1.0, 1.5), (0.5, 1.5)]]],
[[[(1.5, 2.0), (1.5, 1.5), (1.0, 1.5), (1.0, 2.0), (1.5, 2.0)]]]
]
for feature in src:
assert feature['geometry']['coordinates'] in coords
assert feature['geometry']['type'] == 'MultiPolygon'
assert feature['properties'].keys() == {'measure', 'from_label', 'to_label', 'id'}
assert np.allclose(feature['properties']['measure'], area, rtol=1e-2)
def test_intersection_polygon_projection():
pass
def test_intersection_line():
pass
def test_intersection_line_projection():
pass
def test_intersection_point():
pass
def test_intersection_point_projection():
pass
|
<commit_before><commit_msg>Add first intersection integration test<commit_after>from pandarus import intersect
import fiona
import json
import numpy as np
import os
import tempfile
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
square = os.path.join(dirpath, "square.geojson")
range_raster = os.path.join(dirpath, "range.tif")
dem = os.path.join(dirpath, "DEM.tif")
outside = os.path.join(dirpath, "outside.geojson")
remain_result = os.path.join(dirpath, "remaining.geojson")
def test_intersection_polygon():
area = 1/4 * (4e7 / 360) ** 2
with tempfile.TemporaryDirectory() as dirpath:
vector_fp, data_fp = intersect(
outside,
'name',
grid,
'name',
dirpath=dirpath,
compress=False
)
data = json.load(open(data_fp))
assert len(data['data']) == 2
print(data)
print(area)
for x, y, z in data['data']:
assert x == 'by-myself'
assert y in ('grid cell 1', 'grid cell 3')
assert np.allclose(z, area, rtol=1e-2)
assert data['metadata'].keys() == {'first', 'second', 'when'}
assert data['metadata']['first'].keys() == {'field', 'filename', 'path', 'sha256'}
assert data['metadata']['second'].keys() == {'field', 'filename', 'path', 'sha256'}
with fiona.open(vector_fp) as src:
meta = src.meta
assert meta['driver'] == 'GeoJSON'
assert meta['schema'] == {
'geometry': 'MultiPolygon',
'properties': dict([
('measure', 'float'),
('from_label', 'str'),
('id', 'int'),
('to_label', 'str')
])
}
assert meta['crs'] == {'init': 'epsg:4326'}
coords = [
[[[(0.5, 1.5), (0.5, 2.0), (1.0, 2.0), (1.0, 1.5), (0.5, 1.5)]]],
[[[(1.5, 2.0), (1.5, 1.5), (1.0, 1.5), (1.0, 2.0), (1.5, 2.0)]]]
]
for feature in src:
assert feature['geometry']['coordinates'] in coords
assert feature['geometry']['type'] == 'MultiPolygon'
assert feature['properties'].keys() == {'measure', 'from_label', 'to_label', 'id'}
assert np.allclose(feature['properties']['measure'], area, rtol=1e-2)
def test_intersection_polygon_projection():
pass
def test_intersection_line():
pass
def test_intersection_line_projection():
pass
def test_intersection_point():
pass
def test_intersection_point_projection():
pass
|
|
a6bd9c0b2b552347d540b2b05b7d7ed31d84a47b
|
remove_nth_node_from_end_of_list.py
|
remove_nth_node_from_end_of_list.py
|
'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
'''
Classic two pointers problem. Edge case: delete the first Node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from listnode import ListNode
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
pre = None
for i in xrange(n):
p2 = p2.next
#delete the first node
if p2 == None:
return head.next
while p2:
pre = p1
p1 = p1.next
p2 = p2.next
pre.next = p1.next if p1 else None
return head
if __name__ == '__main__':
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
head.printList()
s.removeNthFromEnd(head, 2).printList()
n1 = ListNode(1)
head = n1
print s.removeNthFromEnd(head,1)
n1 = ListNode(1)
n2 = ListNode(2)
n1.next = n2
head = n1
s.removeNthFromEnd(head, 2).printList()
|
Remove Nth Node From End of List problem
|
Remove Nth Node From End of List problem
|
Python
|
apache-2.0
|
zsmountain/leetcode,zsmountain/leetcode,zsmountain/leetcode
|
Remove Nth Node From End of List problem
|
'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
'''
Classic two pointers problem. Edge case: delete the first Node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from listnode import ListNode
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
pre = None
for i in xrange(n):
p2 = p2.next
#delete the first node
if p2 == None:
return head.next
while p2:
pre = p1
p1 = p1.next
p2 = p2.next
pre.next = p1.next if p1 else None
return head
if __name__ == '__main__':
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
head.printList()
s.removeNthFromEnd(head, 2).printList()
n1 = ListNode(1)
head = n1
print s.removeNthFromEnd(head,1)
n1 = ListNode(1)
n2 = ListNode(2)
n1.next = n2
head = n1
s.removeNthFromEnd(head, 2).printList()
|
<commit_before><commit_msg>Remove Nth Node From End of List problem<commit_after>
|
'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
'''
Classic two pointers problem. Edge case: delete the first Node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from listnode import ListNode
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
pre = None
for i in xrange(n):
p2 = p2.next
#delete the first node
if p2 == None:
return head.next
while p2:
pre = p1
p1 = p1.next
p2 = p2.next
pre.next = p1.next if p1 else None
return head
if __name__ == '__main__':
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
head.printList()
s.removeNthFromEnd(head, 2).printList()
n1 = ListNode(1)
head = n1
print s.removeNthFromEnd(head,1)
n1 = ListNode(1)
n2 = ListNode(2)
n1.next = n2
head = n1
s.removeNthFromEnd(head, 2).printList()
|
Remove Nth Node From End of List problem'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
'''
Classic two pointers problem. Edge case: delete the first Node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from listnode import ListNode
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
pre = None
for i in xrange(n):
p2 = p2.next
#delete the first node
if p2 == None:
return head.next
while p2:
pre = p1
p1 = p1.next
p2 = p2.next
pre.next = p1.next if p1 else None
return head
if __name__ == '__main__':
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
head.printList()
s.removeNthFromEnd(head, 2).printList()
n1 = ListNode(1)
head = n1
print s.removeNthFromEnd(head,1)
n1 = ListNode(1)
n2 = ListNode(2)
n1.next = n2
head = n1
s.removeNthFromEnd(head, 2).printList()
|
<commit_before><commit_msg>Remove Nth Node From End of List problem<commit_after>'''
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
'''
'''
Classic two pointers problem. Edge case: delete the first Node
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from listnode import ListNode
class Solution:
# @return a ListNode
def removeNthFromEnd(self, head, n):
p1 = head
p2 = head
pre = None
for i in xrange(n):
p2 = p2.next
#delete the first node
if p2 == None:
return head.next
while p2:
pre = p1
p1 = p1.next
p2 = p2.next
pre.next = p1.next if p1 else None
return head
if __name__ == '__main__':
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
head = n1
head.printList()
s.removeNthFromEnd(head, 2).printList()
n1 = ListNode(1)
head = n1
print s.removeNthFromEnd(head,1)
n1 = ListNode(1)
n2 = ListNode(2)
n1.next = n2
head = n1
s.removeNthFromEnd(head, 2).printList()
|
|
f25d4cc39d5a6b4f9463ad4abfbfc722b0ab74af
|
tests/kafka_consumer_manager/test_list_groups.py
|
tests/kafka_consumer_manager/test_list_groups.py
|
import contextlib
import sys
import mock
from kazoo.exceptions import NoNodeError
from yelp_kafka_tool.kafka_consumer_manager. \
commands.list_groups import ListGroups
class TestListGroups(object):
@contextlib.contextmanager
def mock_kafka_info(self, topics_partitions):
with mock.patch.object(
ListGroups,
"preprocess_args",
spec=ListGroups.preprocess_args,
return_value=topics_partitions,
) as mock_process_args, mock.patch(
"yelp_kafka_tool.kafka_consumer_manager."
"commands.list_groups.ZK",
autospec=True
) as mock_ZK:
mock_ZK.return_value.__enter__.return_value = mock_ZK
yield mock_process_args, mock_ZK
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.get_children.return_value = [
'group1', 'group2', 'group3'
]
cluster_config = mock.Mock(zookeeper='some_ip', type='some_cluster_type')
cluster_config.configure_mock(name='some_cluster_name')
args = mock.Mock()
expected_print = [
mock.call("Consumer Groups:"),
mock.call("\tgroup1"),
mock.call("\tgroup2"),
mock.call("\tgroup3"),
mock.call("3 groups found for cluster some_cluster_name "
"of type some_cluster_type"),
]
ListGroups.run(args, cluster_config)
assert mock_print.call_args_list == expected_print
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run_zknode_error(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.__exit__.return_value = False
cluster_config = mock.Mock(zookeeper='some_ip')
args = mock.Mock()
obj.get_children.side_effect = NoNodeError("Boom!")
ListGroups.run(args, cluster_config)
mock_print.assert_called_with(
"Error: No consumers node found in zookeeper",
file=sys.stderr,
)
|
Add unit tests for list_groups command.
|
KAFKA-797: Add unit tests for list_groups command.
|
Python
|
apache-2.0
|
anthonysandrin/kafka-utils,Yelp/kafka-utils,Yelp/kafka-utils,anthonysandrin/kafka-utils
|
KAFKA-797: Add unit tests for list_groups command.
|
import contextlib
import sys
import mock
from kazoo.exceptions import NoNodeError
from yelp_kafka_tool.kafka_consumer_manager. \
commands.list_groups import ListGroups
class TestListGroups(object):
@contextlib.contextmanager
def mock_kafka_info(self, topics_partitions):
with mock.patch.object(
ListGroups,
"preprocess_args",
spec=ListGroups.preprocess_args,
return_value=topics_partitions,
) as mock_process_args, mock.patch(
"yelp_kafka_tool.kafka_consumer_manager."
"commands.list_groups.ZK",
autospec=True
) as mock_ZK:
mock_ZK.return_value.__enter__.return_value = mock_ZK
yield mock_process_args, mock_ZK
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.get_children.return_value = [
'group1', 'group2', 'group3'
]
cluster_config = mock.Mock(zookeeper='some_ip', type='some_cluster_type')
cluster_config.configure_mock(name='some_cluster_name')
args = mock.Mock()
expected_print = [
mock.call("Consumer Groups:"),
mock.call("\tgroup1"),
mock.call("\tgroup2"),
mock.call("\tgroup3"),
mock.call("3 groups found for cluster some_cluster_name "
"of type some_cluster_type"),
]
ListGroups.run(args, cluster_config)
assert mock_print.call_args_list == expected_print
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run_zknode_error(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.__exit__.return_value = False
cluster_config = mock.Mock(zookeeper='some_ip')
args = mock.Mock()
obj.get_children.side_effect = NoNodeError("Boom!")
ListGroups.run(args, cluster_config)
mock_print.assert_called_with(
"Error: No consumers node found in zookeeper",
file=sys.stderr,
)
|
<commit_before><commit_msg>KAFKA-797: Add unit tests for list_groups command.<commit_after>
|
import contextlib
import sys
import mock
from kazoo.exceptions import NoNodeError
from yelp_kafka_tool.kafka_consumer_manager. \
commands.list_groups import ListGroups
class TestListGroups(object):
@contextlib.contextmanager
def mock_kafka_info(self, topics_partitions):
with mock.patch.object(
ListGroups,
"preprocess_args",
spec=ListGroups.preprocess_args,
return_value=topics_partitions,
) as mock_process_args, mock.patch(
"yelp_kafka_tool.kafka_consumer_manager."
"commands.list_groups.ZK",
autospec=True
) as mock_ZK:
mock_ZK.return_value.__enter__.return_value = mock_ZK
yield mock_process_args, mock_ZK
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.get_children.return_value = [
'group1', 'group2', 'group3'
]
cluster_config = mock.Mock(zookeeper='some_ip', type='some_cluster_type')
cluster_config.configure_mock(name='some_cluster_name')
args = mock.Mock()
expected_print = [
mock.call("Consumer Groups:"),
mock.call("\tgroup1"),
mock.call("\tgroup2"),
mock.call("\tgroup3"),
mock.call("3 groups found for cluster some_cluster_name "
"of type some_cluster_type"),
]
ListGroups.run(args, cluster_config)
assert mock_print.call_args_list == expected_print
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run_zknode_error(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.__exit__.return_value = False
cluster_config = mock.Mock(zookeeper='some_ip')
args = mock.Mock()
obj.get_children.side_effect = NoNodeError("Boom!")
ListGroups.run(args, cluster_config)
mock_print.assert_called_with(
"Error: No consumers node found in zookeeper",
file=sys.stderr,
)
|
KAFKA-797: Add unit tests for list_groups command.import contextlib
import sys
import mock
from kazoo.exceptions import NoNodeError
from yelp_kafka_tool.kafka_consumer_manager. \
commands.list_groups import ListGroups
class TestListGroups(object):
@contextlib.contextmanager
def mock_kafka_info(self, topics_partitions):
with mock.patch.object(
ListGroups,
"preprocess_args",
spec=ListGroups.preprocess_args,
return_value=topics_partitions,
) as mock_process_args, mock.patch(
"yelp_kafka_tool.kafka_consumer_manager."
"commands.list_groups.ZK",
autospec=True
) as mock_ZK:
mock_ZK.return_value.__enter__.return_value = mock_ZK
yield mock_process_args, mock_ZK
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.get_children.return_value = [
'group1', 'group2', 'group3'
]
cluster_config = mock.Mock(zookeeper='some_ip', type='some_cluster_type')
cluster_config.configure_mock(name='some_cluster_name')
args = mock.Mock()
expected_print = [
mock.call("Consumer Groups:"),
mock.call("\tgroup1"),
mock.call("\tgroup2"),
mock.call("\tgroup3"),
mock.call("3 groups found for cluster some_cluster_name "
"of type some_cluster_type"),
]
ListGroups.run(args, cluster_config)
assert mock_print.call_args_list == expected_print
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run_zknode_error(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.__exit__.return_value = False
cluster_config = mock.Mock(zookeeper='some_ip')
args = mock.Mock()
obj.get_children.side_effect = NoNodeError("Boom!")
ListGroups.run(args, cluster_config)
mock_print.assert_called_with(
"Error: No consumers node found in zookeeper",
file=sys.stderr,
)
|
<commit_before><commit_msg>KAFKA-797: Add unit tests for list_groups command.<commit_after>import contextlib
import sys
import mock
from kazoo.exceptions import NoNodeError
from yelp_kafka_tool.kafka_consumer_manager. \
commands.list_groups import ListGroups
class TestListGroups(object):
@contextlib.contextmanager
def mock_kafka_info(self, topics_partitions):
with mock.patch.object(
ListGroups,
"preprocess_args",
spec=ListGroups.preprocess_args,
return_value=topics_partitions,
) as mock_process_args, mock.patch(
"yelp_kafka_tool.kafka_consumer_manager."
"commands.list_groups.ZK",
autospec=True
) as mock_ZK:
mock_ZK.return_value.__enter__.return_value = mock_ZK
yield mock_process_args, mock_ZK
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.get_children.return_value = [
'group1', 'group2', 'group3'
]
cluster_config = mock.Mock(zookeeper='some_ip', type='some_cluster_type')
cluster_config.configure_mock(name='some_cluster_name')
args = mock.Mock()
expected_print = [
mock.call("Consumer Groups:"),
mock.call("\tgroup1"),
mock.call("\tgroup2"),
mock.call("\tgroup3"),
mock.call("3 groups found for cluster some_cluster_name "
"of type some_cluster_type"),
]
ListGroups.run(args, cluster_config)
assert mock_print.call_args_list == expected_print
@mock.patch("yelp_kafka_tool.kafka_consumer_manager.commands.list_groups.print", create=True)
def test_run_zknode_error(self, mock_print):
topics_partitions = {
"topic1": [0, 1, 2],
"topic2": [0, 1]
}
with self.mock_kafka_info(
topics_partitions
) as (mock_process_args, mock_ZK):
obj = mock_ZK.return_value.__enter__.return_value
obj.__exit__.return_value = False
cluster_config = mock.Mock(zookeeper='some_ip')
args = mock.Mock()
obj.get_children.side_effect = NoNodeError("Boom!")
ListGroups.run(args, cluster_config)
mock_print.assert_called_with(
"Error: No consumers node found in zookeeper",
file=sys.stderr,
)
|
|
30d26f76e76ee760ec72ce95f6845891cd6ed3b0
|
examples/asmleds.py
|
examples/asmleds.py
|
"""
This script uses the inline assembler to make the LEDs light up
in a pattern based on how they are multiplexed in rows/cols.
"""
# row pins: 13, 14, 15
# col pins: 4..12 inclusive
# GPIO words starting at 0x50000500:
# RESERVED, OUT, OUTSET, OUTCLR, IN, DIR, DIRSET, DIRCLR
@micropython.asm_thumb
def led_cycle():
b(START)
# DELAY routine
label(DELAY)
mov(r3, 0xa0)
lsl(r3, r3, 11)
label(delay_loop)
sub(r3, 1)
bne(delay_loop)
bx(lr)
label(START)
cpsid('i') # disable interrupts so we control the display
mov(r0, 0x50) # r0=0x50
lsl(r0, r0, 16) # r0=0x500000
add(r0, 0x05) # r0=0x500005
lsl(r0, r0, 8) # r0=0x50000500 -- this points to GPIO registers
mov(r1, 0b111)
lsl(r1, r1, 13) # r1=0xe000
str(r1, [r0, 8]) # pull all rows high
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_on)
str(r1, [r0, 12]) # pull col low to turn LEDs on
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_on) # loop while there are still cols left
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_off)
str(r1, [r0, 8]) # pull col high to turn LEDs off
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_off) # loop while there are still cols left
cpsie('i') # enable interrupts
for i in range(4):
led_cycle()
|
Add example using the inline assembler.
|
Add example using the inline assembler.
|
Python
|
mit
|
JoeGlancy/micropython,JoeGlancy/micropython,JoeGlancy/micropython
|
Add example using the inline assembler.
|
"""
This script uses the inline assembler to make the LEDs light up
in a pattern based on how they are multiplexed in rows/cols.
"""
# row pins: 13, 14, 15
# col pins: 4..12 inclusive
# GPIO words starting at 0x50000500:
# RESERVED, OUT, OUTSET, OUTCLR, IN, DIR, DIRSET, DIRCLR
@micropython.asm_thumb
def led_cycle():
b(START)
# DELAY routine
label(DELAY)
mov(r3, 0xa0)
lsl(r3, r3, 11)
label(delay_loop)
sub(r3, 1)
bne(delay_loop)
bx(lr)
label(START)
cpsid('i') # disable interrupts so we control the display
mov(r0, 0x50) # r0=0x50
lsl(r0, r0, 16) # r0=0x500000
add(r0, 0x05) # r0=0x500005
lsl(r0, r0, 8) # r0=0x50000500 -- this points to GPIO registers
mov(r1, 0b111)
lsl(r1, r1, 13) # r1=0xe000
str(r1, [r0, 8]) # pull all rows high
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_on)
str(r1, [r0, 12]) # pull col low to turn LEDs on
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_on) # loop while there are still cols left
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_off)
str(r1, [r0, 8]) # pull col high to turn LEDs off
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_off) # loop while there are still cols left
cpsie('i') # enable interrupts
for i in range(4):
led_cycle()
|
<commit_before><commit_msg>Add example using the inline assembler.<commit_after>
|
"""
This script uses the inline assembler to make the LEDs light up
in a pattern based on how they are multiplexed in rows/cols.
"""
# row pins: 13, 14, 15
# col pins: 4..12 inclusive
# GPIO words starting at 0x50000500:
# RESERVED, OUT, OUTSET, OUTCLR, IN, DIR, DIRSET, DIRCLR
@micropython.asm_thumb
def led_cycle():
b(START)
# DELAY routine
label(DELAY)
mov(r3, 0xa0)
lsl(r3, r3, 11)
label(delay_loop)
sub(r3, 1)
bne(delay_loop)
bx(lr)
label(START)
cpsid('i') # disable interrupts so we control the display
mov(r0, 0x50) # r0=0x50
lsl(r0, r0, 16) # r0=0x500000
add(r0, 0x05) # r0=0x500005
lsl(r0, r0, 8) # r0=0x50000500 -- this points to GPIO registers
mov(r1, 0b111)
lsl(r1, r1, 13) # r1=0xe000
str(r1, [r0, 8]) # pull all rows high
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_on)
str(r1, [r0, 12]) # pull col low to turn LEDs on
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_on) # loop while there are still cols left
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_off)
str(r1, [r0, 8]) # pull col high to turn LEDs off
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_off) # loop while there are still cols left
cpsie('i') # enable interrupts
for i in range(4):
led_cycle()
|
Add example using the inline assembler."""
This script uses the inline assembler to make the LEDs light up
in a pattern based on how they are multiplexed in rows/cols.
"""
# row pins: 13, 14, 15
# col pins: 4..12 inclusive
# GPIO words starting at 0x50000500:
# RESERVED, OUT, OUTSET, OUTCLR, IN, DIR, DIRSET, DIRCLR
@micropython.asm_thumb
def led_cycle():
b(START)
# DELAY routine
label(DELAY)
mov(r3, 0xa0)
lsl(r3, r3, 11)
label(delay_loop)
sub(r3, 1)
bne(delay_loop)
bx(lr)
label(START)
cpsid('i') # disable interrupts so we control the display
mov(r0, 0x50) # r0=0x50
lsl(r0, r0, 16) # r0=0x500000
add(r0, 0x05) # r0=0x500005
lsl(r0, r0, 8) # r0=0x50000500 -- this points to GPIO registers
mov(r1, 0b111)
lsl(r1, r1, 13) # r1=0xe000
str(r1, [r0, 8]) # pull all rows high
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_on)
str(r1, [r0, 12]) # pull col low to turn LEDs on
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_on) # loop while there are still cols left
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_off)
str(r1, [r0, 8]) # pull col high to turn LEDs off
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_off) # loop while there are still cols left
cpsie('i') # enable interrupts
for i in range(4):
led_cycle()
|
<commit_before><commit_msg>Add example using the inline assembler.<commit_after>"""
This script uses the inline assembler to make the LEDs light up
in a pattern based on how they are multiplexed in rows/cols.
"""
# row pins: 13, 14, 15
# col pins: 4..12 inclusive
# GPIO words starting at 0x50000500:
# RESERVED, OUT, OUTSET, OUTCLR, IN, DIR, DIRSET, DIRCLR
@micropython.asm_thumb
def led_cycle():
b(START)
# DELAY routine
label(DELAY)
mov(r3, 0xa0)
lsl(r3, r3, 11)
label(delay_loop)
sub(r3, 1)
bne(delay_loop)
bx(lr)
label(START)
cpsid('i') # disable interrupts so we control the display
mov(r0, 0x50) # r0=0x50
lsl(r0, r0, 16) # r0=0x500000
add(r0, 0x05) # r0=0x500005
lsl(r0, r0, 8) # r0=0x50000500 -- this points to GPIO registers
mov(r1, 0b111)
lsl(r1, r1, 13) # r1=0xe000
str(r1, [r0, 8]) # pull all rows high
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_on)
str(r1, [r0, 12]) # pull col low to turn LEDs on
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_on) # loop while there are still cols left
mov(r1, 1 << 4) # r1 holds current col bit
mov(r2, 9) # r2 holds number of cols left
label(loop_off)
str(r1, [r0, 8]) # pull col high to turn LEDs off
bl(DELAY) # wait
lsl(r1, r1, 1) # shift to next col
sub(r2, 1) # decrease col counter
bne(loop_off) # loop while there are still cols left
cpsie('i') # enable interrupts
for i in range(4):
led_cycle()
|
|
bae0360435a42b0298c0728f0c718d415b12938d
|
socket_server.py
|
socket_server.py
|
#!/usr/bin/env python
import socket
from datetime import datetime
def req_ok(content):
time= datetime.now()
length= len(bytearray(content))
response= "HTTP/1.1 200 OK\r\nDate: {t}\r\nContent-Length: {l}\r\n\r\n".format(t=time, l=length)
return bytearray(response)
def req_notok(content):
return bytearray("HTTP/1.1 400 Bad Request")
def parse_uri(request):
if request[:3] != 'GET':
return -1
elif request[-10:-2] != 'HTTP/1.1':
return -2
else:
return bytearray(request[4:-9])
def Main():
port= 50000
address= '127.0.0.1'
print "\nSocket server starting...\n"
s= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
s.bind((address, port))
s.listen(1)
conn, client_addr= s.accept()
request= str(conn.recv(1024))
if parse_uri(request)== -1:
message= bytearray('Bad Request. Must be of the form GET\n')
elif parse_uri(request)== -2:
message= bytearray('Bad Request. HTTP protocol must be 1.1\n')
else:
message= req_ok(request)
conn.sendall(message)
conn.shutdown(socket.SHUT_WR)
conn.close()
s.close()
if __name__ == '__main__':
Main()
|
Add first attempt at http server assignment.
|
Add first attempt at http server assignment.
|
Python
|
mit
|
charlieRode/network_tools
|
Add first attempt at http server assignment.
|
#!/usr/bin/env python
import socket
from datetime import datetime
def req_ok(content):
time= datetime.now()
length= len(bytearray(content))
response= "HTTP/1.1 200 OK\r\nDate: {t}\r\nContent-Length: {l}\r\n\r\n".format(t=time, l=length)
return bytearray(response)
def req_notok(content):
return bytearray("HTTP/1.1 400 Bad Request")
def parse_uri(request):
if request[:3] != 'GET':
return -1
elif request[-10:-2] != 'HTTP/1.1':
return -2
else:
return bytearray(request[4:-9])
def Main():
port= 50000
address= '127.0.0.1'
print "\nSocket server starting...\n"
s= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
s.bind((address, port))
s.listen(1)
conn, client_addr= s.accept()
request= str(conn.recv(1024))
if parse_uri(request)== -1:
message= bytearray('Bad Request. Must be of the form GET\n')
elif parse_uri(request)== -2:
message= bytearray('Bad Request. HTTP protocol must be 1.1\n')
else:
message= req_ok(request)
conn.sendall(message)
conn.shutdown(socket.SHUT_WR)
conn.close()
s.close()
if __name__ == '__main__':
Main()
|
<commit_before><commit_msg>Add first attempt at http server assignment.<commit_after>
|
#!/usr/bin/env python
import socket
from datetime import datetime
def req_ok(content):
time= datetime.now()
length= len(bytearray(content))
response= "HTTP/1.1 200 OK\r\nDate: {t}\r\nContent-Length: {l}\r\n\r\n".format(t=time, l=length)
return bytearray(response)
def req_notok(content):
return bytearray("HTTP/1.1 400 Bad Request")
def parse_uri(request):
if request[:3] != 'GET':
return -1
elif request[-10:-2] != 'HTTP/1.1':
return -2
else:
return bytearray(request[4:-9])
def Main():
port= 50000
address= '127.0.0.1'
print "\nSocket server starting...\n"
s= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
s.bind((address, port))
s.listen(1)
conn, client_addr= s.accept()
request= str(conn.recv(1024))
if parse_uri(request)== -1:
message= bytearray('Bad Request. Must be of the form GET\n')
elif parse_uri(request)== -2:
message= bytearray('Bad Request. HTTP protocol must be 1.1\n')
else:
message= req_ok(request)
conn.sendall(message)
conn.shutdown(socket.SHUT_WR)
conn.close()
s.close()
if __name__ == '__main__':
Main()
|
Add first attempt at http server assignment.#!/usr/bin/env python
import socket
from datetime import datetime
def req_ok(content):
time= datetime.now()
length= len(bytearray(content))
response= "HTTP/1.1 200 OK\r\nDate: {t}\r\nContent-Length: {l}\r\n\r\n".format(t=time, l=length)
return bytearray(response)
def req_notok(content):
return bytearray("HTTP/1.1 400 Bad Request")
def parse_uri(request):
if request[:3] != 'GET':
return -1
elif request[-10:-2] != 'HTTP/1.1':
return -2
else:
return bytearray(request[4:-9])
def Main():
port= 50000
address= '127.0.0.1'
print "\nSocket server starting...\n"
s= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
s.bind((address, port))
s.listen(1)
conn, client_addr= s.accept()
request= str(conn.recv(1024))
if parse_uri(request)== -1:
message= bytearray('Bad Request. Must be of the form GET\n')
elif parse_uri(request)== -2:
message= bytearray('Bad Request. HTTP protocol must be 1.1\n')
else:
message= req_ok(request)
conn.sendall(message)
conn.shutdown(socket.SHUT_WR)
conn.close()
s.close()
if __name__ == '__main__':
Main()
|
<commit_before><commit_msg>Add first attempt at http server assignment.<commit_after>#!/usr/bin/env python
import socket
from datetime import datetime
def req_ok(content):
time= datetime.now()
length= len(bytearray(content))
response= "HTTP/1.1 200 OK\r\nDate: {t}\r\nContent-Length: {l}\r\n\r\n".format(t=time, l=length)
return bytearray(response)
def req_notok(content):
return bytearray("HTTP/1.1 400 Bad Request")
def parse_uri(request):
if request[:3] != 'GET':
return -1
elif request[-10:-2] != 'HTTP/1.1':
return -2
else:
return bytearray(request[4:-9])
def Main():
port= 50000
address= '127.0.0.1'
print "\nSocket server starting...\n"
s= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
s.bind((address, port))
s.listen(1)
conn, client_addr= s.accept()
request= str(conn.recv(1024))
if parse_uri(request)== -1:
message= bytearray('Bad Request. Must be of the form GET\n')
elif parse_uri(request)== -2:
message= bytearray('Bad Request. HTTP protocol must be 1.1\n')
else:
message= req_ok(request)
conn.sendall(message)
conn.shutdown(socket.SHUT_WR)
conn.close()
s.close()
if __name__ == '__main__':
Main()
|
|
efd5fd08833d562d90f0dc8008af368665a054b8
|
worker.py
|
worker.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
zhihu_crawler.worker
~~~~~~~~~~~~~~~~
Use rq module to support distributed crawler task assigment,
deploy this file to a machine cluster.
"""
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
Add rq module to support distributed cluster.
|
Add rq module to support distributed cluster.
|
Python
|
mit
|
cpselvis/zhihu-crawler
|
Add rq module to support distributed cluster.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
zhihu_crawler.worker
~~~~~~~~~~~~~~~~
Use rq module to support distributed crawler task assigment,
deploy this file to a machine cluster.
"""
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
<commit_before><commit_msg>Add rq module to support distributed cluster.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
zhihu_crawler.worker
~~~~~~~~~~~~~~~~
Use rq module to support distributed crawler task assigment,
deploy this file to a machine cluster.
"""
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
Add rq module to support distributed cluster.#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
zhihu_crawler.worker
~~~~~~~~~~~~~~~~
Use rq module to support distributed crawler task assigment,
deploy this file to a machine cluster.
"""
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
<commit_before><commit_msg>Add rq module to support distributed cluster.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
zhihu_crawler.worker
~~~~~~~~~~~~~~~~
Use rq module to support distributed crawler task assigment,
deploy this file to a machine cluster.
"""
import os
import redis
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
|
|
97e4d80bbd5b0b5ad03d51b72083174402f183f6
|
learning_automata.py
|
learning_automata.py
|
# Learning_automata.py is a module containing some learning automata.
# For now it only contains a Tsetlin 2N,2 automaton.
# Write a Tsetlin automaton.
# A Tsetlin automaton is defined by N memory states, R actions,
# and c penalties.
#
# N is expected to be a whole number indicating memory depth.
# R is expected to be a while number indicating the number of,
# actions.
# c is a vector quantity indicating the penalties for actions
# 1 to i.
def tsetlin(N, R, c):
'''Tsetlin automata. N, R positive integers. c a structure
of penalties from action 1 to action i.'''
pass
|
Add a module to contain learning automata with an empty function for the Tsetlin automata.
|
Add a module to contain learning automata with an empty function for the Tsetlin automata.
|
Python
|
mit
|
0xSteve/learning_automata_simulator
|
Add a module to contain learning automata with an empty function for the Tsetlin automata.
|
# Learning_automata.py is a module containing some learning automata.
# For now it only contains a Tsetlin 2N,2 automaton.
# Write a Tsetlin automaton.
# A Tsetlin automaton is defined by N memory states, R actions,
# and c penalties.
#
# N is expected to be a whole number indicating memory depth.
# R is expected to be a while number indicating the number of,
# actions.
# c is a vector quantity indicating the penalties for actions
# 1 to i.
def tsetlin(N, R, c):
'''Tsetlin automata. N, R positive integers. c a structure
of penalties from action 1 to action i.'''
pass
|
<commit_before><commit_msg>Add a module to contain learning automata with an empty function for the Tsetlin automata.<commit_after>
|
# Learning_automata.py is a module containing some learning automata.
# For now it only contains a Tsetlin 2N,2 automaton.
# Write a Tsetlin automaton.
# A Tsetlin automaton is defined by N memory states, R actions,
# and c penalties.
#
# N is expected to be a whole number indicating memory depth.
# R is expected to be a while number indicating the number of,
# actions.
# c is a vector quantity indicating the penalties for actions
# 1 to i.
def tsetlin(N, R, c):
'''Tsetlin automata. N, R positive integers. c a structure
of penalties from action 1 to action i.'''
pass
|
Add a module to contain learning automata with an empty function for the Tsetlin automata.# Learning_automata.py is a module containing some learning automata.
# For now it only contains a Tsetlin 2N,2 automaton.
# Write a Tsetlin automaton.
# A Tsetlin automaton is defined by N memory states, R actions,
# and c penalties.
#
# N is expected to be a whole number indicating memory depth.
# R is expected to be a while number indicating the number of,
# actions.
# c is a vector quantity indicating the penalties for actions
# 1 to i.
def tsetlin(N, R, c):
'''Tsetlin automata. N, R positive integers. c a structure
of penalties from action 1 to action i.'''
pass
|
<commit_before><commit_msg>Add a module to contain learning automata with an empty function for the Tsetlin automata.<commit_after># Learning_automata.py is a module containing some learning automata.
# For now it only contains a Tsetlin 2N,2 automaton.
# Write a Tsetlin automaton.
# A Tsetlin automaton is defined by N memory states, R actions,
# and c penalties.
#
# N is expected to be a whole number indicating memory depth.
# R is expected to be a while number indicating the number of,
# actions.
# c is a vector quantity indicating the penalties for actions
# 1 to i.
def tsetlin(N, R, c):
'''Tsetlin automata. N, R positive integers. c a structure
of penalties from action 1 to action i.'''
pass
|
|
69884909800ac468b7ad7d1fc9491facec9e79b6
|
zephyr/management/commands/update_permissions.py
|
zephyr/management/commands/update_permissions.py
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
help = "Sync newly created object permissions to the database"
def handle(self, *args, **options):
# From http://stackoverflow.com/a/11914435/90777
create_permissions(get_app("zephyr"), get_models(), 2)
|
Add a management command to create objects for newly defined permissions
|
Add a management command to create objects for newly defined permissions
This script does not remove permissions that already exist.
(imported from commit 15d18266a05a84b9cac6cc7d2104668b41b48f35)
|
Python
|
apache-2.0
|
nicholasbs/zulip,shrikrishnaholla/zulip,tiansiyuan/zulip,arpith/zulip,arpitpanwar/zulip,kokoar/zulip,brainwane/zulip,bastianh/zulip,qq1012803704/zulip,aliceriot/zulip,yuvipanda/zulip,vikas-parashar/zulip,natanovia/zulip,mahim97/zulip,he15his/zulip,dwrpayne/zulip,hengqujushi/zulip,Batterfii/zulip,EasonYi/zulip,hafeez3000/zulip,gigawhitlocks/zulip,schatt/zulip,Diptanshu8/zulip,jerryge/zulip,pradiptad/zulip,moria/zulip,kaiyuanheshang/zulip,levixie/zulip,wweiradio/zulip,bluesea/zulip,codeKonami/zulip,susansls/zulip,brockwhittaker/zulip,jonesgithub/zulip,peiwei/zulip,thomasboyt/zulip,he15his/zulip,he15his/zulip,rht/zulip,noroot/zulip,bowlofstew/zulip,fw1121/zulip,ufosky-server/zulip,hj3938/zulip,xuanhan863/zulip,sharmaeklavya2/zulip,codeKonami/zulip,zhaoweigg/zulip,MayB/zulip,udxxabp/zulip,yuvipanda/zulip,ikasumiwt/zulip,huangkebo/zulip,proliming/zulip,christi3k/zulip,andersk/zulip,voidException/zulip,gigawhitlocks/zulip,KingxBanana/zulip,so0k/zulip,umkay/zulip,noroot/zulip,karamcnair/zulip,udxxabp/zulip,esander91/zulip,alliejones/zulip,dxq-git/zulip,hackerkid/zulip,amyliu345/zulip,MariaFaBella85/zulip,arpith/zulip,dotcool/zulip,tiansiyuan/zulip,LeeRisk/zulip,kou/zulip,dhcrzf/zulip,DazWorrall/zulip,Jianchun1/zulip,vaidap/zulip,aps-sids/zulip,zhaoweigg/zulip,joshisa/zulip,esander91/zulip,EasonYi/zulip,TigorC/zulip,adnanh/zulip,Galexrt/zulip,Suninus/zulip,jainayush975/zulip,grave-w-grave/zulip,tiansiyuan/zulip,shaunstanislaus/zulip,umkay/zulip,developerfm/zulip,PaulPetring/zulip,noroot/zulip,RobotCaleb/zulip,jphilipsen05/zulip,voidException/zulip,zwily/zulip,bssrdf/zulip,bssrdf/zulip,luyifan/zulip,hafeez3000/zulip,amanharitsh123/zulip,proliming/zulip,levixie/zulip,babbage/zulip,mahim97/zulip,mdavid/zulip,joyhchen/zulip,Vallher/zulip,zhaoweigg/zulip,jimmy54/zulip,jainayush975/zulip,ufosky-server/zulip,christi3k/zulip,babbage/zulip,Galexrt/zulip,vabs22/zulip,suxinde2009/zulip,proliming/zulip,qq1012803704/zulip,mohsenSy/zulip,hj3938/zulip,avastu/zulip,dnmfarrell/zulip,jphilipsen05/zulip,timabbott/zulip,niftynei/zulip,deer-hope/zulip,he15his/zulip,aliceriot/zulip,arpitpanwar/zulip,noroot/zulip,so0k/zulip,LAndreas/zulip,Drooids/zulip,PhilSk/zulip,zulip/zulip,tommyip/zulip,showell/zulip,RobotCaleb/zulip,armooo/zulip,AZtheAsian/zulip,eeshangarg/zulip,armooo/zulip,isht3/zulip,DazWorrall/zulip,showell/zulip,glovebx/zulip,luyifan/zulip,showell/zulip,babbage/zulip,j831/zulip,zacps/zulip,jackrzhang/zulip,swinghu/zulip,dotcool/zulip,arpitpanwar/zulip,vaidap/zulip,vaidap/zulip,EasonYi/zulip,tdr130/zulip,fw1121/zulip,esander91/zulip,aakash-cr7/zulip,Frouk/zulip,ApsOps/zulip,peguin40/zulip,RobotCaleb/zulip,Vallher/zulip,showell/zulip,dxq-git/zulip,willingc/zulip,hackerkid/zulip,andersk/zulip,littledogboy/zulip,so0k/zulip,deer-hope/zulip,shubhamdhama/zulip,m1ssou/zulip,mohsenSy/zulip,punchagan/zulip,Vallher/zulip,PaulPetring/zulip,codeKonami/zulip,tdr130/zulip,karamcnair/zulip,KJin99/zulip,bluesea/zulip,developerfm/zulip,dawran6/zulip,hayderimran7/zulip,dwrpayne/zulip,jrowan/zulip,Drooids/zulip,JanzTam/zulip,technicalpickles/zulip,zachallaun/zulip,wweiradio/zulip,Batterfii/zulip,praveenaki/zulip,amanharitsh123/zulip,lfranchi/zulip,amallia/zulip,yocome/zulip,wdaher/zulip,firstblade/zulip,stamhe/zulip,shrikrishnaholla/zulip,verma-varsha/zulip,udxxabp/zulip,shaunstanislaus/zulip,rht/zulip,zhaoweigg/zulip,huangkebo/zulip,Juanvulcano/zulip,PaulPetring/zulip,fw1121/zulip,ipernet/zulip,PhilSk/zulip,eeshangarg/zulip,brockwhittaker/zulip,wangdeshui/zulip,grave-w-grave/zulip,alliejones/zulip,paxapy/zulip,bastianh/zulip,ikasumiwt/zulip,aliceriot/zulip,wweiradio/zulip,Diptanshu8/zulip,alliejones/zulip,hayderimran7/zulip,Juanvulcano/zulip,itnihao/zulip,reyha/zulip,Qgap/zulip,moria/zulip,jessedhillon/zulip,vakila/zulip,KJin99/zulip,ipernet/zulip,reyha/zulip,he15his/zulip,KJin99/zulip,atomic-labs/zulip,timabbott/zulip,fw1121/zulip,Cheppers/zulip,samatdav/zulip,xuxiao/zulip,mahim97/zulip,amallia/zulip,kaiyuanheshang/zulip,technicalpickles/zulip,gkotian/zulip,yocome/zulip,RobotCaleb/zulip,easyfmxu/zulip,dawran6/zulip,he15his/zulip,shubhamdhama/zulip,pradiptad/zulip,Galexrt/zulip,MariaFaBella85/zulip,DazWorrall/zulip,aps-sids/zulip,aakash-cr7/zulip,hustlzp/zulip,firstblade/zulip,wavelets/zulip,reyha/zulip,tommyip/zulip,rishig/zulip,hackerkid/zulip,peiwei/zulip,JPJPJPOPOP/zulip,avastu/zulip,mohsenSy/zulip,suxinde2009/zulip,codeKonami/zulip,developerfm/zulip,TigorC/zulip,isht3/zulip,moria/zulip,KingxBanana/zulip,hj3938/zulip,dattatreya303/zulip,hj3938/zulip,zachallaun/zulip,qq1012803704/zulip,bowlofstew/zulip,kokoar/zulip,christi3k/zulip,ikasumiwt/zulip,dwrpayne/zulip,amyliu345/zulip,gigawhitlocks/zulip,willingc/zulip,susansls/zulip,Suninus/zulip,paxapy/zulip,huangkebo/zulip,Batterfii/zulip,Jianchun1/zulip,shubhamdhama/zulip,jeffcao/zulip,vakila/zulip,tiansiyuan/zulip,natanovia/zulip,he15his/zulip,krtkmj/zulip,levixie/zulip,showell/zulip,hackerkid/zulip,hengqujushi/zulip,ashwinirudrappa/zulip,sharmaeklavya2/zulip,calvinleenyc/zulip,brainwane/zulip,vikas-parashar/zulip,lfranchi/zulip,praveenaki/zulip,SmartPeople/zulip,cosmicAsymmetry/zulip,KingxBanana/zulip,johnny9/zulip,timabbott/zulip,yuvipanda/zulip,kokoar/zulip,bitemyapp/zulip,bitemyapp/zulip,AZtheAsian/zulip,grave-w-grave/zulip,zulip/zulip,willingc/zulip,dnmfarrell/zulip,mdavid/zulip,cosmicAsymmetry/zulip,natanovia/zulip,jerryge/zulip,rht/zulip,dattatreya303/zulip,Cheppers/zulip,brainwane/zulip,armooo/zulip,j831/zulip,brainwane/zulip,firstblade/zulip,littledogboy/zulip,developerfm/zulip,ikasumiwt/zulip,reyha/zulip,joshisa/zulip,jrowan/zulip,verma-varsha/zulip,huangkebo/zulip,mohsenSy/zulip,ufosky-server/zulip,verma-varsha/zulip,jonesgithub/zulip,nicholasbs/zulip,littledogboy/zulip,littledogboy/zulip,hustlzp/zulip,jimmy54/zulip,shubhamdhama/zulip,wavelets/zulip,mdavid/zulip,armooo/zulip,moria/zulip,niftynei/zulip,AZtheAsian/zulip,JPJPJPOPOP/zulip,hafeez3000/zulip,kokoar/zulip,udxxabp/zulip,hustlzp/zulip,suxinde2009/zulip,dwrpayne/zulip,willingc/zulip,tommyip/zulip,bowlofstew/zulip,codeKonami/zulip,sup95/zulip,thomasboyt/zulip,xuanhan863/zulip,MayB/zulip,ryanbackman/zulip,jimmy54/zulip,kokoar/zulip,seapasulli/zulip,ufosky-server/zulip,tdr130/zulip,lfranchi/zulip,bitemyapp/zulip,ahmadassaf/zulip,mansilladev/zulip,aps-sids/zulip,xuanhan863/zulip,synicalsyntax/zulip,mansilladev/zulip,arpitpanwar/zulip,zofuthan/zulip,zhaoweigg/zulip,wangdeshui/zulip,voidException/zulip,Gabriel0402/zulip,dnmfarrell/zulip,lfranchi/zulip,hengqujushi/zulip,johnnygaddarr/zulip,sup95/zulip,RobotCaleb/zulip,dxq-git/zulip,johnnygaddarr/zulip,hayderimran7/zulip,zwily/zulip,mahim97/zulip,ufosky-server/zulip,synicalsyntax/zulip,Diptanshu8/zulip,jackrzhang/zulip,bastianh/zulip,vakila/zulip,Diptanshu8/zulip,gkotian/zulip,amallia/zulip,tdr130/zulip,zofuthan/zulip,joshisa/zulip,mdavid/zulip,kou/zulip,amallia/zulip,easyfmxu/zulip,seapasulli/zulip,brockwhittaker/zulip,brockwhittaker/zulip,jimmy54/zulip,KingxBanana/zulip,andersk/zulip,punchagan/zulip,punchagan/zulip,zorojean/zulip,mansilladev/zulip,johnny9/zulip,peiwei/zulip,aakash-cr7/zulip,eastlhu/zulip,ryansnowboarder/zulip,hengqujushi/zulip,j831/zulip,natanovia/zulip,shrikrishnaholla/zulip,ApsOps/zulip,hayderimran7/zulip,bssrdf/zulip,shrikrishnaholla/zulip,Drooids/zulip,joyhchen/zulip,easyfmxu/zulip,Qgap/zulip,akuseru/zulip,dotcool/zulip,zwily/zulip,ryansnowboarder/zulip,vakila/zulip,noroot/zulip,swinghu/zulip,gkotian/zulip,LeeRisk/zulip,grave-w-grave/zulip,zachallaun/zulip,atomic-labs/zulip,blaze225/zulip,SmartPeople/zulip,themass/zulip,ashwinirudrappa/zulip,brainwane/zulip,peiwei/zulip,babbage/zulip,jeffcao/zulip,zachallaun/zulip,stamhe/zulip,AZtheAsian/zulip,ryanbackman/zulip,rht/zulip,amyliu345/zulip,m1ssou/zulip,KJin99/zulip,Cheppers/zulip,amallia/zulip,Galexrt/zulip,adnanh/zulip,firstblade/zulip,Gabriel0402/zulip,jimmy54/zulip,umkay/zulip,JanzTam/zulip,thomasboyt/zulip,christi3k/zulip,proliming/zulip,dhcrzf/zulip,guiquanz/zulip,bowlofstew/zulip,schatt/zulip,kou/zulip,zofuthan/zulip,kokoar/zulip,codeKonami/zulip,themass/zulip,jackrzhang/zulip,LeeRisk/zulip,glovebx/zulip,atomic-labs/zulip,souravbadami/zulip,souravbadami/zulip,zachallaun/zulip,peguin40/zulip,shubhamdhama/zulip,Suninus/zulip,jessedhillon/zulip,umkay/zulip,themass/zulip,JanzTam/zulip,synicalsyntax/zulip,Juanvulcano/zulip,jainayush975/zulip,tbutter/zulip,vabs22/zulip,dotcool/zulip,krtkmj/zulip,shubhamdhama/zulip,qq1012803704/zulip,jackrzhang/zulip,joyhchen/zulip,suxinde2009/zulip,verma-varsha/zulip,aliceriot/zulip,vaidap/zulip,rishig/zulip,zachallaun/zulip,andersk/zulip,Gabriel0402/zulip,shaunstanislaus/zulip,cosmicAsymmetry/zulip,hj3938/zulip,punchagan/zulip,hengqujushi/zulip,susansls/zulip,Frouk/zulip,PaulPetring/zulip,vabs22/zulip,wangdeshui/zulip,sharmaeklavya2/zulip,LeeRisk/zulip,hackerkid/zulip,Galexrt/zulip,atomic-labs/zulip,yocome/zulip,dattatreya303/zulip,bitemyapp/zulip,JPJPJPOPOP/zulip,SmartPeople/zulip,samatdav/zulip,seapasulli/zulip,kaiyuanheshang/zulip,ericzhou2008/zulip,hengqujushi/zulip,DazWorrall/zulip,tbutter/zulip,praveenaki/zulip,tbutter/zulip,akuseru/zulip,adnanh/zulip,PhilSk/zulip,avastu/zulip,EasonYi/zulip,peguin40/zulip,swinghu/zulip,Drooids/zulip,Cheppers/zulip,pradiptad/zulip,amallia/zulip,bastianh/zulip,ipernet/zulip,amanharitsh123/zulip,mansilladev/zulip,thomasboyt/zulip,rht/zulip,peiwei/zulip,susansls/zulip,m1ssou/zulip,shrikrishnaholla/zulip,jessedhillon/zulip,niftynei/zulip,aliceriot/zulip,zachallaun/zulip,KJin99/zulip,niftynei/zulip,m1ssou/zulip,Drooids/zulip,ryanbackman/zulip,udxxabp/zulip,suxinde2009/zulip,hustlzp/zulip,LAndreas/zulip,kaiyuanheshang/zulip,jonesgithub/zulip,ikasumiwt/zulip,JPJPJPOPOP/zulip,luyifan/zulip,ipernet/zulip,jeffcao/zulip,wavelets/zulip,babbage/zulip,ryanbackman/zulip,fw1121/zulip,developerfm/zulip,swinghu/zulip,sonali0901/zulip,aps-sids/zulip,wangdeshui/zulip,ericzhou2008/zulip,LAndreas/zulip,MariaFaBella85/zulip,krtkmj/zulip,dnmfarrell/zulip,MayB/zulip,souravbadami/zulip,bssrdf/zulip,so0k/zulip,LAndreas/zulip,itnihao/zulip,blaze225/zulip,vakila/zulip,fw1121/zulip,Gabriel0402/zulip,Cheppers/zulip,sonali0901/zulip,themass/zulip,dotcool/zulip,rishig/zulip,ericzhou2008/zulip,dhcrzf/zulip,grave-w-grave/zulip,Vallher/zulip,vabs22/zulip,bssrdf/zulip,wweiradio/zulip,TigorC/zulip,hj3938/zulip,wavelets/zulip,Galexrt/zulip,peguin40/zulip,dawran6/zulip,shaunstanislaus/zulip,dawran6/zulip,tbutter/zulip,joshisa/zulip,bastianh/zulip,PaulPetring/zulip,Vallher/zulip,souravbadami/zulip,ApsOps/zulip,huangkebo/zulip,ufosky-server/zulip,shrikrishnaholla/zulip,yuvipanda/zulip,pradiptad/zulip,vabs22/zulip,zacps/zulip,MayB/zulip,zofuthan/zulip,dxq-git/zulip,j831/zulip,adnanh/zulip,tbutter/zulip,sharmaeklavya2/zulip,jainayush975/zulip,ApsOps/zulip,calvinleenyc/zulip,aakash-cr7/zulip,Frouk/zulip,tommyip/zulip,akuseru/zulip,itnihao/zulip,bluesea/zulip,punchagan/zulip,johnny9/zulip,ahmadassaf/zulip,kaiyuanheshang/zulip,ashwinirudrappa/zulip,eastlhu/zulip,proliming/zulip,guiquanz/zulip,MariaFaBella85/zulip,thomasboyt/zulip,Frouk/zulip,dxq-git/zulip,rishig/zulip,jessedhillon/zulip,JanzTam/zulip,dwrpayne/zulip,yocome/zulip,Jianchun1/zulip,Cheppers/zulip,bssrdf/zulip,tdr130/zulip,DazWorrall/zulip,Drooids/zulip,so0k/zulip,DazWorrall/zulip,swinghu/zulip,timabbott/zulip,vikas-parashar/zulip,gkotian/zulip,mohsenSy/zulip,Gabriel0402/zulip,swinghu/zulip,eastlhu/zulip,dawran6/zulip,eastlhu/zulip,nicholasbs/zulip,bastianh/zulip,synicalsyntax/zulip,developerfm/zulip,xuxiao/zulip,pradiptad/zulip,kaiyuanheshang/zulip,dnmfarrell/zulip,dwrpayne/zulip,amanharitsh123/zulip,dhcrzf/zulip,SmartPeople/zulip,zulip/zulip,avastu/zulip,isht3/zulip,swinghu/zulip,wangdeshui/zulip,ashwinirudrappa/zulip,yuvipanda/zulip,reyha/zulip,themass/zulip,paxapy/zulip,bluesea/zulip,zorojean/zulip,levixie/zulip,vaidap/zulip,avastu/zulip,hafeez3000/zulip,ipernet/zulip,luyifan/zulip,amanharitsh123/zulip,dotcool/zulip,Batterfii/zulip,SmartPeople/zulip,kou/zulip,sonali0901/zulip,moria/zulip,ryansnowboarder/zulip,stamhe/zulip,rht/zulip,bastianh/zulip,moria/zulip,sup95/zulip,alliejones/zulip,tiansiyuan/zulip,xuxiao/zulip,bluesea/zulip,johnnygaddarr/zulip,hj3938/zulip,dhcrzf/zulip,amanharitsh123/zulip,jerryge/zulip,deer-hope/zulip,pradiptad/zulip,jphilipsen05/zulip,babbage/zulip,JanzTam/zulip,aakash-cr7/zulip,MariaFaBella85/zulip,Juanvulcano/zulip,timabbott/zulip,alliejones/zulip,suxinde2009/zulip,jainayush975/zulip,jackrzhang/zulip,krtkmj/zulip,seapasulli/zulip,mdavid/zulip,wangdeshui/zulip,akuseru/zulip,jerryge/zulip,peguin40/zulip,deer-hope/zulip,arpith/zulip,eastlhu/zulip,nicholasbs/zulip,LeeRisk/zulip,brainwane/zulip,littledogboy/zulip,paxapy/zulip,joshisa/zulip,saitodisse/zulip,ApsOps/zulip,dawran6/zulip,armooo/zulip,krtkmj/zulip,xuxiao/zulip,esander91/zulip,nicholasbs/zulip,niftynei/zulip,susansls/zulip,qq1012803704/zulip,aps-sids/zulip,ashwinirudrappa/zulip,saitodisse/zulip,TigorC/zulip,jessedhillon/zulip,eeshangarg/zulip,ryansnowboarder/zulip,Frouk/zulip,thomasboyt/zulip,Jianchun1/zulip,voidException/zulip,developerfm/zulip,ashwinirudrappa/zulip,vakila/zulip,jimmy54/zulip,blaze225/zulip,peguin40/zulip,guiquanz/zulip,schatt/zulip,ashwinirudrappa/zulip,hustlzp/zulip,jrowan/zulip,littledogboy/zulip,sup95/zulip,Qgap/zulip,fw1121/zulip,esander91/zulip,tdr130/zulip,atomic-labs/zulip,natanovia/zulip,yocome/zulip,easyfmxu/zulip,LeeRisk/zulip,johnnygaddarr/zulip,LAndreas/zulip,willingc/zulip,AZtheAsian/zulip,firstblade/zulip,vaidap/zulip,jessedhillon/zulip,andersk/zulip,mahim97/zulip,tbutter/zulip,wdaher/zulip,voidException/zulip,EasonYi/zulip,wdaher/zulip,ApsOps/zulip,zacps/zulip,johnnygaddarr/zulip,joyhchen/zulip,punchagan/zulip,jerryge/zulip,natanovia/zulip,alliejones/zulip,eeshangarg/zulip,johnny9/zulip,samatdav/zulip,johnnygaddarr/zulip,shaunstanislaus/zulip,karamcnair/zulip,jerryge/zulip,stamhe/zulip,so0k/zulip,proliming/zulip,mansilladev/zulip,shrikrishnaholla/zulip,jimmy54/zulip,hafeez3000/zulip,blaze225/zulip,kaiyuanheshang/zulip,isht3/zulip,luyifan/zulip,sonali0901/zulip,dhcrzf/zulip,easyfmxu/zulip,xuanhan863/zulip,arpitpanwar/zulip,MayB/zulip,johnny9/zulip,KingxBanana/zulip,praveenaki/zulip,easyfmxu/zulip,lfranchi/zulip,arpitpanwar/zulip,bluesea/zulip,sonali0901/zulip,Batterfii/zulip,glovebx/zulip,zacps/zulip,babbage/zulip,niftynei/zulip,vakila/zulip,verma-varsha/zulip,Qgap/zulip,peiwei/zulip,ryanbackman/zulip,Qgap/zulip,jrowan/zulip,amyliu345/zulip,dnmfarrell/zulip,huangkebo/zulip,bitemyapp/zulip,kokoar/zulip,karamcnair/zulip,JanzTam/zulip,esander91/zulip,wdaher/zulip,dattatreya303/zulip,proliming/zulip,ipernet/zulip,seapasulli/zulip,guiquanz/zulip,firstblade/zulip,guiquanz/zulip,hayderimran7/zulip,levixie/zulip,xuxiao/zulip,hustlzp/zulip,dattatreya303/zulip,avastu/zulip,tiansiyuan/zulip,gkotian/zulip,jonesgithub/zulip,bitemyapp/zulip,zhaoweigg/zulip,hengqujushi/zulip,esander91/zulip,aliceriot/zulip,willingc/zulip,synicalsyntax/zulip,wweiradio/zulip,tbutter/zulip,blaze225/zulip,zorojean/zulip,gigawhitlocks/zulip,zorojean/zulip,hayderimran7/zulip,cosmicAsymmetry/zulip,zwily/zulip,tdr130/zulip,mdavid/zulip,brainwane/zulip,punchagan/zulip,LeeRisk/zulip,ikasumiwt/zulip,qq1012803704/zulip,zwily/zulip,souravbadami/zulip,vikas-parashar/zulip,amyliu345/zulip,eeshangarg/zulip,DazWorrall/zulip,zwily/zulip,TigorC/zulip,bitemyapp/zulip,mahim97/zulip,stamhe/zulip,hustlzp/zulip,technicalpickles/zulip,technicalpickles/zulip,wangdeshui/zulip,adnanh/zulip,schatt/zulip,suxinde2009/zulip,jainayush975/zulip,jphilipsen05/zulip,itnihao/zulip,Qgap/zulip,zacps/zulip,jonesgithub/zulip,saitodisse/zulip,jphilipsen05/zulip,EasonYi/zulip,Drooids/zulip,TigorC/zulip,qq1012803704/zulip,saitodisse/zulip,synicalsyntax/zulip,schatt/zulip,jeffcao/zulip,souravbadami/zulip,akuseru/zulip,MariaFaBella85/zulip,technicalpickles/zulip,m1ssou/zulip,ahmadassaf/zulip,LAndreas/zulip,yuvipanda/zulip,saitodisse/zulip,PhilSk/zulip,sonali0901/zulip,LAndreas/zulip,kou/zulip,itnihao/zulip,AZtheAsian/zulip,tiansiyuan/zulip,voidException/zulip,so0k/zulip,jeffcao/zulip,glovebx/zulip,dxq-git/zulip,calvinleenyc/zulip,mohsenSy/zulip,calvinleenyc/zulip,ryansnowboarder/zulip,gkotian/zulip,dotcool/zulip,hackerkid/zulip,atomic-labs/zulip,vabs22/zulip,calvinleenyc/zulip,RobotCaleb/zulip,bssrdf/zulip,grave-w-grave/zulip,brockwhittaker/zulip,avastu/zulip,xuxiao/zulip,Jianchun1/zulip,brockwhittaker/zulip,wdaher/zulip,sharmaeklavya2/zulip,wdaher/zulip,rishig/zulip,JPJPJPOPOP/zulip,aakash-cr7/zulip,zofuthan/zulip,jonesgithub/zulip,ahmadassaf/zulip,adnanh/zulip,mansilladev/zulip,luyifan/zulip,alliejones/zulip,pradiptad/zulip,Juanvulcano/zulip,KingxBanana/zulip,zorojean/zulip,luyifan/zulip,isht3/zulip,gigawhitlocks/zulip,kou/zulip,bowlofstew/zulip,yocome/zulip,shubhamdhama/zulip,jackrzhang/zulip,PhilSk/zulip,johnnygaddarr/zulip,umkay/zulip,mansilladev/zulip,andersk/zulip,saitodisse/zulip,j831/zulip,umkay/zulip,arpitpanwar/zulip,Suninus/zulip,RobotCaleb/zulip,synicalsyntax/zulip,eeshangarg/zulip,paxapy/zulip,glovebx/zulip,glovebx/zulip,KJin99/zulip,wavelets/zulip,sup95/zulip,Gabriel0402/zulip,dxq-git/zulip,aps-sids/zulip,wweiradio/zulip,Suninus/zulip,zulip/zulip,samatdav/zulip,jonesgithub/zulip,Frouk/zulip,mdavid/zulip,seapasulli/zulip,firstblade/zulip,Vallher/zulip,littledogboy/zulip,gigawhitlocks/zulip,ericzhou2008/zulip,zwily/zulip,MayB/zulip,Galexrt/zulip,christi3k/zulip,zofuthan/zulip,umkay/zulip,PhilSk/zulip,Jianchun1/zulip,guiquanz/zulip,ufosky-server/zulip,karamcnair/zulip,joshisa/zulip,amyliu345/zulip,itnihao/zulip,showell/zulip,xuxiao/zulip,zhaoweigg/zulip,joshisa/zulip,verma-varsha/zulip,SmartPeople/zulip,eastlhu/zulip,Batterfii/zulip,seapasulli/zulip,hackerkid/zulip,schatt/zulip,stamhe/zulip,jessedhillon/zulip,Diptanshu8/zulip,susansls/zulip,calvinleenyc/zulip,ahmadassaf/zulip,isht3/zulip,dhcrzf/zulip,johnny9/zulip,reyha/zulip,themass/zulip,lfranchi/zulip,easyfmxu/zulip,armooo/zulip,m1ssou/zulip,huangkebo/zulip,christi3k/zulip,thomasboyt/zulip,wavelets/zulip,arpith/zulip,adnanh/zulip,showell/zulip,themass/zulip,Batterfii/zulip,zulip/zulip,MayB/zulip,deer-hope/zulip,ahmadassaf/zulip,technicalpickles/zulip,dwrpayne/zulip,xuanhan863/zulip,andersk/zulip,JanzTam/zulip,amallia/zulip,praveenaki/zulip,Gabriel0402/zulip,xuanhan863/zulip,levixie/zulip,deer-hope/zulip,armooo/zulip,technicalpickles/zulip,timabbott/zulip,tommyip/zulip,nicholasbs/zulip,ericzhou2008/zulip,arpith/zulip,shaunstanislaus/zulip,levixie/zulip,aliceriot/zulip,codeKonami/zulip,praveenaki/zulip,Suninus/zulip,xuanhan863/zulip,ipernet/zulip,jrowan/zulip,Juanvulcano/zulip,vikas-parashar/zulip,praveenaki/zulip,KJin99/zulip,JPJPJPOPOP/zulip,krtkmj/zulip,tommyip/zulip,zacps/zulip,ikasumiwt/zulip,ryansnowboarder/zulip,itnihao/zulip,EasonYi/zulip,Diptanshu8/zulip,kou/zulip,moria/zulip,j831/zulip,hafeez3000/zulip,samatdav/zulip,arpith/zulip,yuvipanda/zulip,rht/zulip,joyhchen/zulip,bowlofstew/zulip,wweiradio/zulip,ahmadassaf/zulip,zofuthan/zulip,zorojean/zulip,udxxabp/zulip,blaze225/zulip,natanovia/zulip,bluesea/zulip,hayderimran7/zulip,ryansnowboarder/zulip,peiwei/zulip,jeffcao/zulip,stamhe/zulip,akuseru/zulip,krtkmj/zulip,jeffcao/zulip,guiquanz/zulip,akuseru/zulip,MariaFaBella85/zulip,samatdav/zulip,karamcnair/zulip,cosmicAsymmetry/zulip,shaunstanislaus/zulip,glovebx/zulip,willingc/zulip,jphilipsen05/zulip,Cheppers/zulip,zorojean/zulip,ApsOps/zulip,nicholasbs/zulip,Frouk/zulip,paxapy/zulip,zulip/zulip,dattatreya303/zulip,cosmicAsymmetry/zulip,schatt/zulip,yocome/zulip,eastlhu/zulip,wavelets/zulip,rishig/zulip,noroot/zulip,atomic-labs/zulip,rishig/zulip,voidException/zulip,jackrzhang/zulip,noroot/zulip,gkotian/zulip,zulip/zulip,vikas-parashar/zulip,Qgap/zulip,m1ssou/zulip,sharmaeklavya2/zulip,gigawhitlocks/zulip,johnny9/zulip,jrowan/zulip,Vallher/zulip,bowlofstew/zulip,eeshangarg/zulip,saitodisse/zulip,timabbott/zulip,joyhchen/zulip,aps-sids/zulip,lfranchi/zulip,dnmfarrell/zulip,tommyip/zulip,ericzhou2008/zulip,karamcnair/zulip,sup95/zulip,udxxabp/zulip,wdaher/zulip,deer-hope/zulip,PaulPetring/zulip,PaulPetring/zulip,hafeez3000/zulip,Suninus/zulip,jerryge/zulip,ericzhou2008/zulip,ryanbackman/zulip
|
Add a management command to create objects for newly defined permissions
This script does not remove permissions that already exist.
(imported from commit 15d18266a05a84b9cac6cc7d2104668b41b48f35)
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
help = "Sync newly created object permissions to the database"
def handle(self, *args, **options):
# From http://stackoverflow.com/a/11914435/90777
create_permissions(get_app("zephyr"), get_models(), 2)
|
<commit_before><commit_msg>Add a management command to create objects for newly defined permissions
This script does not remove permissions that already exist.
(imported from commit 15d18266a05a84b9cac6cc7d2104668b41b48f35)<commit_after>
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
help = "Sync newly created object permissions to the database"
def handle(self, *args, **options):
# From http://stackoverflow.com/a/11914435/90777
create_permissions(get_app("zephyr"), get_models(), 2)
|
Add a management command to create objects for newly defined permissions
This script does not remove permissions that already exist.
(imported from commit 15d18266a05a84b9cac6cc7d2104668b41b48f35)from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
help = "Sync newly created object permissions to the database"
def handle(self, *args, **options):
# From http://stackoverflow.com/a/11914435/90777
create_permissions(get_app("zephyr"), get_models(), 2)
|
<commit_before><commit_msg>Add a management command to create objects for newly defined permissions
This script does not remove permissions that already exist.
(imported from commit 15d18266a05a84b9cac6cc7d2104668b41b48f35)<commit_after>from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
class Command(BaseCommand):
help = "Sync newly created object permissions to the database"
def handle(self, *args, **options):
# From http://stackoverflow.com/a/11914435/90777
create_permissions(get_app("zephyr"), get_models(), 2)
|
|
ecced598c7048739dd25bfa027939975f94d6950
|
src/import_to_elastic.py
|
src/import_to_elastic.py
|
import sys
import os
def get_file_names(dir_path):
return None
def transform_data(data):
return data
def import_json(dir_path):
file_names = get_file_names(dir_path)
for file in file_names:
# TODO: Read json files and do manipulations
# Maybe have a passed in function to do this? transform_data(?)
continue
return None
def export_to_elastic(json):
# TODO: Read elastic config from somewhere
# TODO: Push the data to elastic
return None
def main():
if len(sys.argv) > 1 and os.path.isDir(sys.argv[1]):
json = import_json(sys.argv[1])
success = export_to_elastic(json)
else:
print "None"
if __name__ == "__main__":
main()
|
Add intial dummy skeleton structure
|
Add intial dummy skeleton structure
|
Python
|
mit
|
PinPinIre/slack-scripts
|
Add intial dummy skeleton structure
|
import sys
import os
def get_file_names(dir_path):
return None
def transform_data(data):
return data
def import_json(dir_path):
file_names = get_file_names(dir_path)
for file in file_names:
# TODO: Read json files and do manipulations
# Maybe have a passed in function to do this? transform_data(?)
continue
return None
def export_to_elastic(json):
# TODO: Read elastic config from somewhere
# TODO: Push the data to elastic
return None
def main():
if len(sys.argv) > 1 and os.path.isDir(sys.argv[1]):
json = import_json(sys.argv[1])
success = export_to_elastic(json)
else:
print "None"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add intial dummy skeleton structure<commit_after>
|
import sys
import os
def get_file_names(dir_path):
return None
def transform_data(data):
return data
def import_json(dir_path):
file_names = get_file_names(dir_path)
for file in file_names:
# TODO: Read json files and do manipulations
# Maybe have a passed in function to do this? transform_data(?)
continue
return None
def export_to_elastic(json):
# TODO: Read elastic config from somewhere
# TODO: Push the data to elastic
return None
def main():
if len(sys.argv) > 1 and os.path.isDir(sys.argv[1]):
json = import_json(sys.argv[1])
success = export_to_elastic(json)
else:
print "None"
if __name__ == "__main__":
main()
|
Add intial dummy skeleton structureimport sys
import os
def get_file_names(dir_path):
return None
def transform_data(data):
return data
def import_json(dir_path):
file_names = get_file_names(dir_path)
for file in file_names:
# TODO: Read json files and do manipulations
# Maybe have a passed in function to do this? transform_data(?)
continue
return None
def export_to_elastic(json):
# TODO: Read elastic config from somewhere
# TODO: Push the data to elastic
return None
def main():
if len(sys.argv) > 1 and os.path.isDir(sys.argv[1]):
json = import_json(sys.argv[1])
success = export_to_elastic(json)
else:
print "None"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add intial dummy skeleton structure<commit_after>import sys
import os
def get_file_names(dir_path):
return None
def transform_data(data):
return data
def import_json(dir_path):
file_names = get_file_names(dir_path)
for file in file_names:
# TODO: Read json files and do manipulations
# Maybe have a passed in function to do this? transform_data(?)
continue
return None
def export_to_elastic(json):
# TODO: Read elastic config from somewhere
# TODO: Push the data to elastic
return None
def main():
if len(sys.argv) > 1 and os.path.isDir(sys.argv[1]):
json = import_json(sys.argv[1])
success = export_to_elastic(json)
else:
print "None"
if __name__ == "__main__":
main()
|
|
ec9ed86353070a6523c3bc6833a708422e28664e
|
CodeFights/phoneCall.py
|
CodeFights/phoneCall.py
|
#!/usr/local/bin/python
# Code Fights Phone Call Problem
def phoneCall(min1, min2_10, min11, s):
money_left = s
talking = 0
while money_left > 0:
if talking < 1:
if money_left - min1 < 0:
return talking
else:
money_left -= min1
talking += 1
elif 1 <= talking < 10:
if money_left - min2_10 < 0:
return talking
else:
money_left -= min2_10
talking += 1
else:
if money_left - min11 < 0:
return talking
else:
money_left -= min11
talking += 1
return talking
def main():
tests = [
[3, 1, 2, 20, 14],
[2, 2, 1, 2, 1],
[10, 1, 2, 22, 11],
[2, 2, 1, 24, 14],
[1, 2, 1, 6, 3]
]
for t in tests:
res = phoneCall(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: phoneCall({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: phoneCall({}, {}, {}, {}) returned {}, answer: {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
Solve Code Fights phone call problem
|
Solve Code Fights phone call problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights phone call problem
|
#!/usr/local/bin/python
# Code Fights Phone Call Problem
def phoneCall(min1, min2_10, min11, s):
money_left = s
talking = 0
while money_left > 0:
if talking < 1:
if money_left - min1 < 0:
return talking
else:
money_left -= min1
talking += 1
elif 1 <= talking < 10:
if money_left - min2_10 < 0:
return talking
else:
money_left -= min2_10
talking += 1
else:
if money_left - min11 < 0:
return talking
else:
money_left -= min11
talking += 1
return talking
def main():
tests = [
[3, 1, 2, 20, 14],
[2, 2, 1, 2, 1],
[10, 1, 2, 22, 11],
[2, 2, 1, 24, 14],
[1, 2, 1, 6, 3]
]
for t in tests:
res = phoneCall(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: phoneCall({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: phoneCall({}, {}, {}, {}) returned {}, answer: {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights phone call problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Phone Call Problem
def phoneCall(min1, min2_10, min11, s):
money_left = s
talking = 0
while money_left > 0:
if talking < 1:
if money_left - min1 < 0:
return talking
else:
money_left -= min1
talking += 1
elif 1 <= talking < 10:
if money_left - min2_10 < 0:
return talking
else:
money_left -= min2_10
talking += 1
else:
if money_left - min11 < 0:
return talking
else:
money_left -= min11
talking += 1
return talking
def main():
tests = [
[3, 1, 2, 20, 14],
[2, 2, 1, 2, 1],
[10, 1, 2, 22, 11],
[2, 2, 1, 24, 14],
[1, 2, 1, 6, 3]
]
for t in tests:
res = phoneCall(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: phoneCall({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: phoneCall({}, {}, {}, {}) returned {}, answer: {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
Solve Code Fights phone call problem#!/usr/local/bin/python
# Code Fights Phone Call Problem
def phoneCall(min1, min2_10, min11, s):
money_left = s
talking = 0
while money_left > 0:
if talking < 1:
if money_left - min1 < 0:
return talking
else:
money_left -= min1
talking += 1
elif 1 <= talking < 10:
if money_left - min2_10 < 0:
return talking
else:
money_left -= min2_10
talking += 1
else:
if money_left - min11 < 0:
return talking
else:
money_left -= min11
talking += 1
return talking
def main():
tests = [
[3, 1, 2, 20, 14],
[2, 2, 1, 2, 1],
[10, 1, 2, 22, 11],
[2, 2, 1, 24, 14],
[1, 2, 1, 6, 3]
]
for t in tests:
res = phoneCall(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: phoneCall({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: phoneCall({}, {}, {}, {}) returned {}, answer: {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights phone call problem<commit_after>#!/usr/local/bin/python
# Code Fights Phone Call Problem
def phoneCall(min1, min2_10, min11, s):
money_left = s
talking = 0
while money_left > 0:
if talking < 1:
if money_left - min1 < 0:
return talking
else:
money_left -= min1
talking += 1
elif 1 <= talking < 10:
if money_left - min2_10 < 0:
return talking
else:
money_left -= min2_10
talking += 1
else:
if money_left - min11 < 0:
return talking
else:
money_left -= min11
talking += 1
return talking
def main():
tests = [
[3, 1, 2, 20, 14],
[2, 2, 1, 2, 1],
[10, 1, 2, 22, 11],
[2, 2, 1, 24, 14],
[1, 2, 1, 6, 3]
]
for t in tests:
res = phoneCall(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: phoneCall({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: phoneCall({}, {}, {}, {}) returned {}, answer: {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
|
56ce986d4f203f1b8187615a42a6ac0ecf25f9f8
|
tests/test_db.py
|
tests/test_db.py
|
from socket import inet_aton
import tempfile
from whip.db import Database
from whip.json import loads as json_loads
from whip.util import ipv4_str_to_int
def test_db_loading():
snapshot_1 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=1, datetime='2010'),
dict(begin='3.0.0.0', end='3.255.255.255', x=2, datetime='2010'),
dict(begin='8.0.0.0', end='9.255.255.255', x=3, datetime='2010'),
]
snapshot_2 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=4, datetime='2011'),
dict(begin='3.0.0.0', end='3.255.255.255', x=5, datetime='2011'),
dict(begin='8.0.0.0', end='9.255.255.255', x=6, datetime='2011'),
]
snapshots = [
snapshot_1,
snapshot_2,
]
def iter_snapshot(snapshot):
for d in snapshot:
yield ipv4_str_to_int(d['begin']), ipv4_str_to_int(d['end']), d
with tempfile.TemporaryDirectory() as db_dir:
db = Database(db_dir, create_if_missing=True)
iters = [iter_snapshot(s) for s in snapshots]
db.load(*iters)
def lookup(ip, datetime=None):
ret = db.lookup(inet_aton(ip), datetime=datetime) or b'{}'
return json_loads(ret)
# Latest version
assert lookup('1.0.0.0')['x'] == 4
assert lookup('1.255.255.255')['x'] == 4
assert lookup('7.0.0.0') == {}
assert lookup('8.1.2.3')['x'] == 6
assert lookup('12.0.0.0') == {}
# Older date
assert lookup('1.2.3.3', '2010')['x'] == 1
# No hit for really old dates
assert lookup('1.2.3.4', '2009') == {}
# Future date
assert lookup('1.2.3.4', '2038')['x'] == 4
# All versions
assert [d['x'] for d in lookup('1.2.3.4', 'all')['history']] == [4, 1]
|
Add some tests for the whip.db module
|
Add some tests for the whip.db module
|
Python
|
bsd-3-clause
|
wbolster/whip
|
Add some tests for the whip.db module
|
from socket import inet_aton
import tempfile
from whip.db import Database
from whip.json import loads as json_loads
from whip.util import ipv4_str_to_int
def test_db_loading():
snapshot_1 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=1, datetime='2010'),
dict(begin='3.0.0.0', end='3.255.255.255', x=2, datetime='2010'),
dict(begin='8.0.0.0', end='9.255.255.255', x=3, datetime='2010'),
]
snapshot_2 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=4, datetime='2011'),
dict(begin='3.0.0.0', end='3.255.255.255', x=5, datetime='2011'),
dict(begin='8.0.0.0', end='9.255.255.255', x=6, datetime='2011'),
]
snapshots = [
snapshot_1,
snapshot_2,
]
def iter_snapshot(snapshot):
for d in snapshot:
yield ipv4_str_to_int(d['begin']), ipv4_str_to_int(d['end']), d
with tempfile.TemporaryDirectory() as db_dir:
db = Database(db_dir, create_if_missing=True)
iters = [iter_snapshot(s) for s in snapshots]
db.load(*iters)
def lookup(ip, datetime=None):
ret = db.lookup(inet_aton(ip), datetime=datetime) or b'{}'
return json_loads(ret)
# Latest version
assert lookup('1.0.0.0')['x'] == 4
assert lookup('1.255.255.255')['x'] == 4
assert lookup('7.0.0.0') == {}
assert lookup('8.1.2.3')['x'] == 6
assert lookup('12.0.0.0') == {}
# Older date
assert lookup('1.2.3.3', '2010')['x'] == 1
# No hit for really old dates
assert lookup('1.2.3.4', '2009') == {}
# Future date
assert lookup('1.2.3.4', '2038')['x'] == 4
# All versions
assert [d['x'] for d in lookup('1.2.3.4', 'all')['history']] == [4, 1]
|
<commit_before><commit_msg>Add some tests for the whip.db module<commit_after>
|
from socket import inet_aton
import tempfile
from whip.db import Database
from whip.json import loads as json_loads
from whip.util import ipv4_str_to_int
def test_db_loading():
snapshot_1 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=1, datetime='2010'),
dict(begin='3.0.0.0', end='3.255.255.255', x=2, datetime='2010'),
dict(begin='8.0.0.0', end='9.255.255.255', x=3, datetime='2010'),
]
snapshot_2 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=4, datetime='2011'),
dict(begin='3.0.0.0', end='3.255.255.255', x=5, datetime='2011'),
dict(begin='8.0.0.0', end='9.255.255.255', x=6, datetime='2011'),
]
snapshots = [
snapshot_1,
snapshot_2,
]
def iter_snapshot(snapshot):
for d in snapshot:
yield ipv4_str_to_int(d['begin']), ipv4_str_to_int(d['end']), d
with tempfile.TemporaryDirectory() as db_dir:
db = Database(db_dir, create_if_missing=True)
iters = [iter_snapshot(s) for s in snapshots]
db.load(*iters)
def lookup(ip, datetime=None):
ret = db.lookup(inet_aton(ip), datetime=datetime) or b'{}'
return json_loads(ret)
# Latest version
assert lookup('1.0.0.0')['x'] == 4
assert lookup('1.255.255.255')['x'] == 4
assert lookup('7.0.0.0') == {}
assert lookup('8.1.2.3')['x'] == 6
assert lookup('12.0.0.0') == {}
# Older date
assert lookup('1.2.3.3', '2010')['x'] == 1
# No hit for really old dates
assert lookup('1.2.3.4', '2009') == {}
# Future date
assert lookup('1.2.3.4', '2038')['x'] == 4
# All versions
assert [d['x'] for d in lookup('1.2.3.4', 'all')['history']] == [4, 1]
|
Add some tests for the whip.db module
from socket import inet_aton
import tempfile
from whip.db import Database
from whip.json import loads as json_loads
from whip.util import ipv4_str_to_int
def test_db_loading():
snapshot_1 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=1, datetime='2010'),
dict(begin='3.0.0.0', end='3.255.255.255', x=2, datetime='2010'),
dict(begin='8.0.0.0', end='9.255.255.255', x=3, datetime='2010'),
]
snapshot_2 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=4, datetime='2011'),
dict(begin='3.0.0.0', end='3.255.255.255', x=5, datetime='2011'),
dict(begin='8.0.0.0', end='9.255.255.255', x=6, datetime='2011'),
]
snapshots = [
snapshot_1,
snapshot_2,
]
def iter_snapshot(snapshot):
for d in snapshot:
yield ipv4_str_to_int(d['begin']), ipv4_str_to_int(d['end']), d
with tempfile.TemporaryDirectory() as db_dir:
db = Database(db_dir, create_if_missing=True)
iters = [iter_snapshot(s) for s in snapshots]
db.load(*iters)
def lookup(ip, datetime=None):
ret = db.lookup(inet_aton(ip), datetime=datetime) or b'{}'
return json_loads(ret)
# Latest version
assert lookup('1.0.0.0')['x'] == 4
assert lookup('1.255.255.255')['x'] == 4
assert lookup('7.0.0.0') == {}
assert lookup('8.1.2.3')['x'] == 6
assert lookup('12.0.0.0') == {}
# Older date
assert lookup('1.2.3.3', '2010')['x'] == 1
# No hit for really old dates
assert lookup('1.2.3.4', '2009') == {}
# Future date
assert lookup('1.2.3.4', '2038')['x'] == 4
# All versions
assert [d['x'] for d in lookup('1.2.3.4', 'all')['history']] == [4, 1]
|
<commit_before><commit_msg>Add some tests for the whip.db module<commit_after>
from socket import inet_aton
import tempfile
from whip.db import Database
from whip.json import loads as json_loads
from whip.util import ipv4_str_to_int
def test_db_loading():
snapshot_1 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=1, datetime='2010'),
dict(begin='3.0.0.0', end='3.255.255.255', x=2, datetime='2010'),
dict(begin='8.0.0.0', end='9.255.255.255', x=3, datetime='2010'),
]
snapshot_2 = [
dict(begin='1.0.0.0', end='1.255.255.255', x=4, datetime='2011'),
dict(begin='3.0.0.0', end='3.255.255.255', x=5, datetime='2011'),
dict(begin='8.0.0.0', end='9.255.255.255', x=6, datetime='2011'),
]
snapshots = [
snapshot_1,
snapshot_2,
]
def iter_snapshot(snapshot):
for d in snapshot:
yield ipv4_str_to_int(d['begin']), ipv4_str_to_int(d['end']), d
with tempfile.TemporaryDirectory() as db_dir:
db = Database(db_dir, create_if_missing=True)
iters = [iter_snapshot(s) for s in snapshots]
db.load(*iters)
def lookup(ip, datetime=None):
ret = db.lookup(inet_aton(ip), datetime=datetime) or b'{}'
return json_loads(ret)
# Latest version
assert lookup('1.0.0.0')['x'] == 4
assert lookup('1.255.255.255')['x'] == 4
assert lookup('7.0.0.0') == {}
assert lookup('8.1.2.3')['x'] == 6
assert lookup('12.0.0.0') == {}
# Older date
assert lookup('1.2.3.3', '2010')['x'] == 1
# No hit for really old dates
assert lookup('1.2.3.4', '2009') == {}
# Future date
assert lookup('1.2.3.4', '2038')['x'] == 4
# All versions
assert [d['x'] for d in lookup('1.2.3.4', 'all')['history']] == [4, 1]
|
|
1755f2bc99b2e0e42496fbfc2cb439db0251608a
|
tsa/lib/cache.py
|
tsa/lib/cache.py
|
import os
import cPickle
import logging
logger = logging.getLogger(__name__)
def pickleable(file_pattern):
'''A function helper. Use like:
@pickle('tmp/longrunner-%(hashtag)s-%(limit)d.pyckle')
def get_tweets(hashtag='hcr', limit=1000):
... go get some tweets and return them as a plain dict or list ...
For all we care, the function should return the same thing
everytime it is run with any particular combination of keyword arguments.
Only supports functions with keyword arguments.
'''
def decorator(func):
# print 'pickleable decorator', func
# *args,
def wrapper(**kw):
# print 'pickleable wrapper', kw
file_path = file_pattern % kw
if os.path.exists(file_path):
logger.info('Reading pickled object from file: %s', file_path)
pickle_fp = open(file_path, 'rb')
result = cPickle.load(pickle_fp)
else:
logger.info('Executing pickle-able function')
result = func(**kw)
logger.info('Writing pickled object to file: %s', file_path)
pickle_fp = open(file_path, 'wb')
cPickle.dump(result, pickle_fp)
return result
return wrapper
return decorator
|
Add general pickle decorator that interpolates a string with the function's **kw
|
Add general pickle decorator that interpolates a string with the function's **kw
|
Python
|
mit
|
chbrown/tsa,chbrown/tsa,chbrown/tsa
|
Add general pickle decorator that interpolates a string with the function's **kw
|
import os
import cPickle
import logging
logger = logging.getLogger(__name__)
def pickleable(file_pattern):
'''A function helper. Use like:
@pickle('tmp/longrunner-%(hashtag)s-%(limit)d.pyckle')
def get_tweets(hashtag='hcr', limit=1000):
... go get some tweets and return them as a plain dict or list ...
For all we care, the function should return the same thing
everytime it is run with any particular combination of keyword arguments.
Only supports functions with keyword arguments.
'''
def decorator(func):
# print 'pickleable decorator', func
# *args,
def wrapper(**kw):
# print 'pickleable wrapper', kw
file_path = file_pattern % kw
if os.path.exists(file_path):
logger.info('Reading pickled object from file: %s', file_path)
pickle_fp = open(file_path, 'rb')
result = cPickle.load(pickle_fp)
else:
logger.info('Executing pickle-able function')
result = func(**kw)
logger.info('Writing pickled object to file: %s', file_path)
pickle_fp = open(file_path, 'wb')
cPickle.dump(result, pickle_fp)
return result
return wrapper
return decorator
|
<commit_before><commit_msg>Add general pickle decorator that interpolates a string with the function's **kw<commit_after>
|
import os
import cPickle
import logging
logger = logging.getLogger(__name__)
def pickleable(file_pattern):
'''A function helper. Use like:
@pickle('tmp/longrunner-%(hashtag)s-%(limit)d.pyckle')
def get_tweets(hashtag='hcr', limit=1000):
... go get some tweets and return them as a plain dict or list ...
For all we care, the function should return the same thing
everytime it is run with any particular combination of keyword arguments.
Only supports functions with keyword arguments.
'''
def decorator(func):
# print 'pickleable decorator', func
# *args,
def wrapper(**kw):
# print 'pickleable wrapper', kw
file_path = file_pattern % kw
if os.path.exists(file_path):
logger.info('Reading pickled object from file: %s', file_path)
pickle_fp = open(file_path, 'rb')
result = cPickle.load(pickle_fp)
else:
logger.info('Executing pickle-able function')
result = func(**kw)
logger.info('Writing pickled object to file: %s', file_path)
pickle_fp = open(file_path, 'wb')
cPickle.dump(result, pickle_fp)
return result
return wrapper
return decorator
|
Add general pickle decorator that interpolates a string with the function's **kwimport os
import cPickle
import logging
logger = logging.getLogger(__name__)
def pickleable(file_pattern):
'''A function helper. Use like:
@pickle('tmp/longrunner-%(hashtag)s-%(limit)d.pyckle')
def get_tweets(hashtag='hcr', limit=1000):
... go get some tweets and return them as a plain dict or list ...
For all we care, the function should return the same thing
everytime it is run with any particular combination of keyword arguments.
Only supports functions with keyword arguments.
'''
def decorator(func):
# print 'pickleable decorator', func
# *args,
def wrapper(**kw):
# print 'pickleable wrapper', kw
file_path = file_pattern % kw
if os.path.exists(file_path):
logger.info('Reading pickled object from file: %s', file_path)
pickle_fp = open(file_path, 'rb')
result = cPickle.load(pickle_fp)
else:
logger.info('Executing pickle-able function')
result = func(**kw)
logger.info('Writing pickled object to file: %s', file_path)
pickle_fp = open(file_path, 'wb')
cPickle.dump(result, pickle_fp)
return result
return wrapper
return decorator
|
<commit_before><commit_msg>Add general pickle decorator that interpolates a string with the function's **kw<commit_after>import os
import cPickle
import logging
logger = logging.getLogger(__name__)
def pickleable(file_pattern):
'''A function helper. Use like:
@pickle('tmp/longrunner-%(hashtag)s-%(limit)d.pyckle')
def get_tweets(hashtag='hcr', limit=1000):
... go get some tweets and return them as a plain dict or list ...
For all we care, the function should return the same thing
everytime it is run with any particular combination of keyword arguments.
Only supports functions with keyword arguments.
'''
def decorator(func):
# print 'pickleable decorator', func
# *args,
def wrapper(**kw):
# print 'pickleable wrapper', kw
file_path = file_pattern % kw
if os.path.exists(file_path):
logger.info('Reading pickled object from file: %s', file_path)
pickle_fp = open(file_path, 'rb')
result = cPickle.load(pickle_fp)
else:
logger.info('Executing pickle-able function')
result = func(**kw)
logger.info('Writing pickled object to file: %s', file_path)
pickle_fp = open(file_path, 'wb')
cPickle.dump(result, pickle_fp)
return result
return wrapper
return decorator
|
|
294e3251cff159e08bcb28820720c1de41534ea1
|
passenger_wsgi.py
|
passenger_wsgi.py
|
import sys, os
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/huxley')
INTERP = os.path.join(os.getcwd(), 'env/bin/python')
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), 'env/bin'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages/django'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages'))
os.environ['DJANGO_SETTINGS_MODULE'] = "huxley.settings"
import django
django.setup()
from django.core.handlers.wsgi import WSGIHandler
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(WSGIHandler(), debug=True)
|
Add production-specific passenger wsgi file
|
Add production-specific passenger wsgi file
|
Python
|
bsd-3-clause
|
ctmunwebmaster/huxley,ctmunwebmaster/huxley,ctmunwebmaster/huxley,ctmunwebmaster/huxley
|
Add production-specific passenger wsgi file
|
import sys, os
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/huxley')
INTERP = os.path.join(os.getcwd(), 'env/bin/python')
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), 'env/bin'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages/django'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages'))
os.environ['DJANGO_SETTINGS_MODULE'] = "huxley.settings"
import django
django.setup()
from django.core.handlers.wsgi import WSGIHandler
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(WSGIHandler(), debug=True)
|
<commit_before><commit_msg>Add production-specific passenger wsgi file<commit_after>
|
import sys, os
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/huxley')
INTERP = os.path.join(os.getcwd(), 'env/bin/python')
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), 'env/bin'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages/django'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages'))
os.environ['DJANGO_SETTINGS_MODULE'] = "huxley.settings"
import django
django.setup()
from django.core.handlers.wsgi import WSGIHandler
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(WSGIHandler(), debug=True)
|
Add production-specific passenger wsgi fileimport sys, os
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/huxley')
INTERP = os.path.join(os.getcwd(), 'env/bin/python')
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), 'env/bin'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages/django'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages'))
os.environ['DJANGO_SETTINGS_MODULE'] = "huxley.settings"
import django
django.setup()
from django.core.handlers.wsgi import WSGIHandler
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(WSGIHandler(), debug=True)
|
<commit_before><commit_msg>Add production-specific passenger wsgi file<commit_after>import sys, os
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/huxley')
INTERP = os.path.join(os.getcwd(), 'env/bin/python')
if sys.executable != INTERP: os.execl(INTERP, INTERP, *sys.argv)
sys.path.insert(0, os.path.join(os.getcwd(), 'env/bin'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages/django'))
sys.path.insert(0, os.path.join(os.getcwd(), 'env/lib/python2.7/site-packages'))
os.environ['DJANGO_SETTINGS_MODULE'] = "huxley.settings"
import django
django.setup()
from django.core.handlers.wsgi import WSGIHandler
from paste.exceptions.errormiddleware import ErrorMiddleware
application = ErrorMiddleware(WSGIHandler(), debug=True)
|
|
8ef5a7105f23a3c3050aa0df0ec5aca5b738dc7d
|
directions.py
|
directions.py
|
#!/usr/bin/python
import googlemaps
#api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
class Directions(object):
"""
"""
api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
def __init__(self):
self.gmaps = googlemaps.Client(self.api_key)
pass
def getData(self, orig, dest):
directions = self.gmaps.directions(orig, dest)
distance = (directions[0]['legs'][0]['distance']['text']) #value is meter, text is formatted with mile
time = (directions[0]['legs'][0]['duration']['text']) #value is seconds, text is formatted
output = {distance, time}
return output
|
Create new Direction class. Calculates distance and time from two addresses with Google Maps API
|
Create new Direction class. Calculates distance and time from two addresses with Google Maps API
|
Python
|
mit
|
LibriCerule/Cerulean_Tracking,LibriCerule/Cerulean_Tracking,LibriCerule/Cerulean_Tracking,LibriCerule/Cerulean_Tracking,LibriCerule/Cerulean_Tracking
|
Create new Direction class. Calculates distance and time from two addresses with Google Maps API
|
#!/usr/bin/python
import googlemaps
#api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
class Directions(object):
"""
"""
api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
def __init__(self):
self.gmaps = googlemaps.Client(self.api_key)
pass
def getData(self, orig, dest):
directions = self.gmaps.directions(orig, dest)
distance = (directions[0]['legs'][0]['distance']['text']) #value is meter, text is formatted with mile
time = (directions[0]['legs'][0]['duration']['text']) #value is seconds, text is formatted
output = {distance, time}
return output
|
<commit_before><commit_msg>Create new Direction class. Calculates distance and time from two addresses with Google Maps API<commit_after>
|
#!/usr/bin/python
import googlemaps
#api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
class Directions(object):
"""
"""
api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
def __init__(self):
self.gmaps = googlemaps.Client(self.api_key)
pass
def getData(self, orig, dest):
directions = self.gmaps.directions(orig, dest)
distance = (directions[0]['legs'][0]['distance']['text']) #value is meter, text is formatted with mile
time = (directions[0]['legs'][0]['duration']['text']) #value is seconds, text is formatted
output = {distance, time}
return output
|
Create new Direction class. Calculates distance and time from two addresses with Google Maps API#!/usr/bin/python
import googlemaps
#api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
class Directions(object):
"""
"""
api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
def __init__(self):
self.gmaps = googlemaps.Client(self.api_key)
pass
def getData(self, orig, dest):
directions = self.gmaps.directions(orig, dest)
distance = (directions[0]['legs'][0]['distance']['text']) #value is meter, text is formatted with mile
time = (directions[0]['legs'][0]['duration']['text']) #value is seconds, text is formatted
output = {distance, time}
return output
|
<commit_before><commit_msg>Create new Direction class. Calculates distance and time from two addresses with Google Maps API<commit_after>#!/usr/bin/python
import googlemaps
#api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
class Directions(object):
"""
"""
api_key = "AIzaSyBhOIJ_Ta2QrnO2jllAy4sd5dGCzUOA4Hw"
def __init__(self):
self.gmaps = googlemaps.Client(self.api_key)
pass
def getData(self, orig, dest):
directions = self.gmaps.directions(orig, dest)
distance = (directions[0]['legs'][0]['distance']['text']) #value is meter, text is formatted with mile
time = (directions[0]['legs'][0]['duration']['text']) #value is seconds, text is formatted
output = {distance, time}
return output
|
|
998303eff5fc4fecfa26d32a9920a6726a275ae3
|
tests/test_billion.py
|
tests/test_billion.py
|
from numpy.testing import assert_raises
from fuel.datasets.billion import OneBillionWord
class TestOneBillionWord(object):
def setUp(self):
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
self.char2code = {v: k for k, v in code2char.items()}
def test_value_error_wrong_set(self):
assert_raises(
ValueError, OneBillionWord, 'dummy', [0, 1], self.char2code)
def test_value_error_training_partition(self):
assert_raises(
ValueError, OneBillionWord, 'training', [101], self.char2code)
def test_value_error_heldout_partition(self):
assert_raises(
ValueError, OneBillionWord, 'heldout', [101], self.char2code)
|
Increase test coverage for OneBillonWord
|
Increase test coverage for OneBillonWord
|
Python
|
mit
|
glewis17/fuel,bouthilx/fuel,dhruvparamhans/fuel,hantek/fuel,lamblin/fuel,udibr/fuel,bouthilx/fuel,dwf/fuel,laurent-dinh/fuel,hantek/fuel,dribnet/fuel,capybaralet/fuel,dmitriy-serdyuk/fuel,janchorowski/fuel,markusnagel/fuel,dmitriy-serdyuk/fuel,aalmah/fuel,harmdevries89/fuel,rodrigob/fuel,EderSantana/fuel,jbornschein/fuel,mila-udem/fuel,rizar/fuel,orhanf/fuel,glewis17/fuel,lamblin/fuel,mila-udem/fuel,jbornschein/fuel,chrishokamp/fuel,codeaudit/fuel,EderSantana/fuel,codeaudit/fuel,dhruvparamhans/fuel,markusnagel/fuel,ejls/fuel,udibr/fuel,orhanf/fuel,janchorowski/fuel,laurent-dinh/fuel,ejls/fuel,aalmah/fuel,vdumoulin/fuel,mjwillson/fuel,rizar/fuel,capybaralet/fuel,dribnet/fuel,vdumoulin/fuel,dwf/fuel,mjwillson/fuel,harmdevries89/fuel,rodrigob/fuel,chrishokamp/fuel
|
Increase test coverage for OneBillonWord
|
from numpy.testing import assert_raises
from fuel.datasets.billion import OneBillionWord
class TestOneBillionWord(object):
def setUp(self):
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
self.char2code = {v: k for k, v in code2char.items()}
def test_value_error_wrong_set(self):
assert_raises(
ValueError, OneBillionWord, 'dummy', [0, 1], self.char2code)
def test_value_error_training_partition(self):
assert_raises(
ValueError, OneBillionWord, 'training', [101], self.char2code)
def test_value_error_heldout_partition(self):
assert_raises(
ValueError, OneBillionWord, 'heldout', [101], self.char2code)
|
<commit_before><commit_msg>Increase test coverage for OneBillonWord<commit_after>
|
from numpy.testing import assert_raises
from fuel.datasets.billion import OneBillionWord
class TestOneBillionWord(object):
def setUp(self):
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
self.char2code = {v: k for k, v in code2char.items()}
def test_value_error_wrong_set(self):
assert_raises(
ValueError, OneBillionWord, 'dummy', [0, 1], self.char2code)
def test_value_error_training_partition(self):
assert_raises(
ValueError, OneBillionWord, 'training', [101], self.char2code)
def test_value_error_heldout_partition(self):
assert_raises(
ValueError, OneBillionWord, 'heldout', [101], self.char2code)
|
Increase test coverage for OneBillonWordfrom numpy.testing import assert_raises
from fuel.datasets.billion import OneBillionWord
class TestOneBillionWord(object):
def setUp(self):
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
self.char2code = {v: k for k, v in code2char.items()}
def test_value_error_wrong_set(self):
assert_raises(
ValueError, OneBillionWord, 'dummy', [0, 1], self.char2code)
def test_value_error_training_partition(self):
assert_raises(
ValueError, OneBillionWord, 'training', [101], self.char2code)
def test_value_error_heldout_partition(self):
assert_raises(
ValueError, OneBillionWord, 'heldout', [101], self.char2code)
|
<commit_before><commit_msg>Increase test coverage for OneBillonWord<commit_after>from numpy.testing import assert_raises
from fuel.datasets.billion import OneBillionWord
class TestOneBillionWord(object):
def setUp(self):
all_chars = ([chr(ord('a') + i) for i in range(26)] +
[chr(ord('0') + i) for i in range(10)] +
[',', '.', '!', '?', '<UNK>'] +
[' ', '<S>', '</S>'])
code2char = dict(enumerate(all_chars))
self.char2code = {v: k for k, v in code2char.items()}
def test_value_error_wrong_set(self):
assert_raises(
ValueError, OneBillionWord, 'dummy', [0, 1], self.char2code)
def test_value_error_training_partition(self):
assert_raises(
ValueError, OneBillionWord, 'training', [101], self.char2code)
def test_value_error_heldout_partition(self):
assert_raises(
ValueError, OneBillionWord, 'heldout', [101], self.char2code)
|
|
b084ca332c34103139078e2e9956b757bfed190f
|
tests/test_filters.py
|
tests/test_filters.py
|
import pytest
import vtki
from vtki import examples
def test_uniform_grid_filters():
"""This tests all avaialble filters"""
dataset = examples.load_uniform()
dataset.set_active_scalar('Spatial Point Data')
# Threshold
thresh = dataset.threshold([100, 500])
assert thresh is not None
# Slice
slc = dataset.slice()
assert slc is not None
# Clip
clp = dataset.clip(invert=True)
assert clp is not None
# Contour
iso = dataset.contour()
assert iso is not None
|
Add simple test case to make sure the filters work
|
Add simple test case to make sure the filters work
|
Python
|
mit
|
akaszynski/vtkInterface
|
Add simple test case to make sure the filters work
|
import pytest
import vtki
from vtki import examples
def test_uniform_grid_filters():
"""This tests all avaialble filters"""
dataset = examples.load_uniform()
dataset.set_active_scalar('Spatial Point Data')
# Threshold
thresh = dataset.threshold([100, 500])
assert thresh is not None
# Slice
slc = dataset.slice()
assert slc is not None
# Clip
clp = dataset.clip(invert=True)
assert clp is not None
# Contour
iso = dataset.contour()
assert iso is not None
|
<commit_before><commit_msg>Add simple test case to make sure the filters work<commit_after>
|
import pytest
import vtki
from vtki import examples
def test_uniform_grid_filters():
"""This tests all avaialble filters"""
dataset = examples.load_uniform()
dataset.set_active_scalar('Spatial Point Data')
# Threshold
thresh = dataset.threshold([100, 500])
assert thresh is not None
# Slice
slc = dataset.slice()
assert slc is not None
# Clip
clp = dataset.clip(invert=True)
assert clp is not None
# Contour
iso = dataset.contour()
assert iso is not None
|
Add simple test case to make sure the filters workimport pytest
import vtki
from vtki import examples
def test_uniform_grid_filters():
"""This tests all avaialble filters"""
dataset = examples.load_uniform()
dataset.set_active_scalar('Spatial Point Data')
# Threshold
thresh = dataset.threshold([100, 500])
assert thresh is not None
# Slice
slc = dataset.slice()
assert slc is not None
# Clip
clp = dataset.clip(invert=True)
assert clp is not None
# Contour
iso = dataset.contour()
assert iso is not None
|
<commit_before><commit_msg>Add simple test case to make sure the filters work<commit_after>import pytest
import vtki
from vtki import examples
def test_uniform_grid_filters():
"""This tests all avaialble filters"""
dataset = examples.load_uniform()
dataset.set_active_scalar('Spatial Point Data')
# Threshold
thresh = dataset.threshold([100, 500])
assert thresh is not None
# Slice
slc = dataset.slice()
assert slc is not None
# Clip
clp = dataset.clip(invert=True)
assert clp is not None
# Contour
iso = dataset.contour()
assert iso is not None
|
|
777b7d32ea71a09429caef492ace61074b84dd91
|
private_storage/storage/s3boto3.py
|
private_storage/storage/s3boto3.py
|
from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
# Since this class inherits the default storage, it shares many parameters with the base class.
# Thus, redefine the setting name that is used to read these values, so almost all settings are not shared.
access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', S3Boto3Storage.access_key))
secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', S3Boto3Storage.secret_key))
file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME', strict=True)
auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
location = setting('AWS_PRIVATE_LOCATION', '')
encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_PRIVATE_IS_GZIPPED', S3Boto3Storage.gzip) # fallback to default
preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', S3Boto3Storage.url_protocol) # fallback to default
endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
region_name = setting('AWS_PRIVATE_S3_REGION_NAME', S3Boto3Storage.region_name) # fallback to default
use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
signature_version = PrivateS3BotoStorage.signature_version or 's3v4'
|
Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
|
Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
This class can now be selected with the new `PRIVATE_STORAGE_CLASS` setting.
|
Python
|
apache-2.0
|
edoburu/django-private-storage
|
Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
This class can now be selected with the new `PRIVATE_STORAGE_CLASS` setting.
|
from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
# Since this class inherits the default storage, it shares many parameters with the base class.
# Thus, redefine the setting name that is used to read these values, so almost all settings are not shared.
access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', S3Boto3Storage.access_key))
secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', S3Boto3Storage.secret_key))
file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME', strict=True)
auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
location = setting('AWS_PRIVATE_LOCATION', '')
encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_PRIVATE_IS_GZIPPED', S3Boto3Storage.gzip) # fallback to default
preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', S3Boto3Storage.url_protocol) # fallback to default
endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
region_name = setting('AWS_PRIVATE_S3_REGION_NAME', S3Boto3Storage.region_name) # fallback to default
use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
signature_version = PrivateS3BotoStorage.signature_version or 's3v4'
|
<commit_before><commit_msg>Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
This class can now be selected with the new `PRIVATE_STORAGE_CLASS` setting.<commit_after>
|
from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
# Since this class inherits the default storage, it shares many parameters with the base class.
# Thus, redefine the setting name that is used to read these values, so almost all settings are not shared.
access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', S3Boto3Storage.access_key))
secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', S3Boto3Storage.secret_key))
file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME', strict=True)
auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
location = setting('AWS_PRIVATE_LOCATION', '')
encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_PRIVATE_IS_GZIPPED', S3Boto3Storage.gzip) # fallback to default
preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', S3Boto3Storage.url_protocol) # fallback to default
endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
region_name = setting('AWS_PRIVATE_S3_REGION_NAME', S3Boto3Storage.region_name) # fallback to default
use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
signature_version = PrivateS3BotoStorage.signature_version or 's3v4'
|
Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
This class can now be selected with the new `PRIVATE_STORAGE_CLASS` setting.from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
# Since this class inherits the default storage, it shares many parameters with the base class.
# Thus, redefine the setting name that is used to read these values, so almost all settings are not shared.
access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', S3Boto3Storage.access_key))
secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', S3Boto3Storage.secret_key))
file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME', strict=True)
auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
location = setting('AWS_PRIVATE_LOCATION', '')
encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_PRIVATE_IS_GZIPPED', S3Boto3Storage.gzip) # fallback to default
preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', S3Boto3Storage.url_protocol) # fallback to default
endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
region_name = setting('AWS_PRIVATE_S3_REGION_NAME', S3Boto3Storage.region_name) # fallback to default
use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
signature_version = PrivateS3BotoStorage.signature_version or 's3v4'
|
<commit_before><commit_msg>Add new S3 storage class, PrivateS3BotoStorage and PrivateEncryptedS3BotoStorage
This class can now be selected with the new `PRIVATE_STORAGE_CLASS` setting.<commit_after>from storages.backends.s3boto3 import S3Boto3Storage
from storages.utils import setting
class PrivateS3BotoStorage(S3Boto3Storage):
"""
Private storage bucket for S3
"""
# Since this class inherits the default storage, it shares many parameters with the base class.
# Thus, redefine the setting name that is used to read these values, so almost all settings are not shared.
access_key = setting('AWS_PRIVATE_S3_ACCESS_KEY_ID', setting('AWS_PRIVATE_ACCESS_KEY_ID', S3Boto3Storage.access_key))
secret_key = setting('AWS_PRIVATE_S3_SECRET_ACCESS_KEY', setting('AWS_PRIVATE_SECRET_ACCESS_KEY', S3Boto3Storage.secret_key))
file_overwrite = setting('AWS_PRIVATE_S3_FILE_OVERWRITE', False) # false, differ from base class
object_parameters = setting('AWS_PRIVATE_S3_OBJECT_PARAMETERS', {})
bucket_name = setting('AWS_PRIVATE_STORAGE_BUCKET_NAME', strict=True)
auto_create_bucket = setting('AWS_PRIVATE_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_PRIVATE_DEFAULT_ACL', 'private') # differ from base class
bucket_acl = setting('AWS_PRIVATE_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_PRIVATE_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_PRIVATE_QUERYSTRING_EXPIRE', 3600)
signature_version = setting('AWS_PRIVATE_S3_SIGNATURE_VERSION')
reduced_redundancy = setting('AWS_PRIVATE_REDUCED_REDUNDANCY', False)
location = setting('AWS_PRIVATE_LOCATION', '')
encryption = setting('AWS_PRIVATE_S3_ENCRYPTION', False)
custom_domain = setting('AWS_PRIVATE_S3_CUSTOM_DOMAIN')
addressing_style = setting('AWS_PRIVATE_S3_ADDRESSING_STYLE')
secure_urls = setting('AWS_PRIVATE_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_PRIVATE_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_PRIVATE_IS_GZIPPED', S3Boto3Storage.gzip) # fallback to default
preload_metadata = setting('AWS_PRIVATE_PRELOAD_METADATA', False)
url_protocol = setting('AWS_PRIVATE_S3_URL_PROTOCOL', S3Boto3Storage.url_protocol) # fallback to default
endpoint_url = setting('AWS_PRIVATE_S3_ENDPOINT_URL', None)
region_name = setting('AWS_PRIVATE_S3_REGION_NAME', S3Boto3Storage.region_name) # fallback to default
use_ssl = setting('AWS_PRIVATE_S3_USE_SSL', True)
class PrivateEncryptedS3BotoStorage(PrivateS3BotoStorage):
"""
Enforced encryption for private storage on S3.
This is a convience option, it can also be implemented
through :class:`PrivateS3BotoStorage` by using the proper settings.
"""
encryption = True
signature_version = PrivateS3BotoStorage.signature_version or 's3v4'
|
|
b736ae17f17aa4034f1722a6a3c449aed07fd8cd
|
hanzifreqs.py
|
hanzifreqs.py
|
import sys
from hanzidefs import get_all_hanzi
if __name__ == "__main__":
chars = get_all_hanzi(sys.stdin.read())
for ch, ct in chars:
print("%s\t%s" % (ch, ct))
|
Add separate script to print out frequencies only
|
Add separate script to print out frequencies only
|
Python
|
agpl-3.0
|
erjiang/hanzidefs
|
Add separate script to print out frequencies only
|
import sys
from hanzidefs import get_all_hanzi
if __name__ == "__main__":
chars = get_all_hanzi(sys.stdin.read())
for ch, ct in chars:
print("%s\t%s" % (ch, ct))
|
<commit_before><commit_msg>Add separate script to print out frequencies only<commit_after>
|
import sys
from hanzidefs import get_all_hanzi
if __name__ == "__main__":
chars = get_all_hanzi(sys.stdin.read())
for ch, ct in chars:
print("%s\t%s" % (ch, ct))
|
Add separate script to print out frequencies onlyimport sys
from hanzidefs import get_all_hanzi
if __name__ == "__main__":
chars = get_all_hanzi(sys.stdin.read())
for ch, ct in chars:
print("%s\t%s" % (ch, ct))
|
<commit_before><commit_msg>Add separate script to print out frequencies only<commit_after>import sys
from hanzidefs import get_all_hanzi
if __name__ == "__main__":
chars = get_all_hanzi(sys.stdin.read())
for ch, ct in chars:
print("%s\t%s" % (ch, ct))
|
|
f962333ab52c0041f4873ff27da4185a51df7795
|
demo/apps/catalogue/migrations/0011_remove_category_name.py
|
demo/apps/catalogue/migrations/0011_remove_category_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_auto_20160616_1048'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='name',
),
]
|
Add remove cat name migration
|
Add remove cat name migration
|
Python
|
mit
|
pgovers/oscar-wagtail-demo,pgovers/oscar-wagtail-demo
|
Add remove cat name migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_auto_20160616_1048'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='name',
),
]
|
<commit_before><commit_msg>Add remove cat name migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_auto_20160616_1048'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='name',
),
]
|
Add remove cat name migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_auto_20160616_1048'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='name',
),
]
|
<commit_before><commit_msg>Add remove cat name migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_auto_20160616_1048'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='name',
),
]
|
|
5cb11884e953a734afab55a4548753fe99a16d5b
|
{{cookiecutter.repo_name}}/tests/test_{{cookiecutter.repo_name}}.py
|
{{cookiecutter.repo_name}}/tests/test_{{cookiecutter.repo_name}}.py
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def basic_app():
"""Fixture for a default app.
Returns:
:class:`{{cookiecutter.app_class_name}}`: App instance
"""
from {{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
return {{cookiecutter.app_class_name}}()
def test_app_title(basic_app):
"""Simply tests if the default app title meets our expectations.
Args:
basic_app (:class:`{{cookiecutter.app_class_name}}`): Default app instance
Raises:
AssertionError: If the title does not match
"""
assert basic_app.title == "{{cookiecutter.app_title}}"
|
Create simple test module for the app title
|
Create simple test module for the app title
|
Python
|
mit
|
hackebrot/cookiedozer,hackebrot/cookiedozer
|
Create simple test module for the app title
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def basic_app():
"""Fixture for a default app.
Returns:
:class:`{{cookiecutter.app_class_name}}`: App instance
"""
from {{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
return {{cookiecutter.app_class_name}}()
def test_app_title(basic_app):
"""Simply tests if the default app title meets our expectations.
Args:
basic_app (:class:`{{cookiecutter.app_class_name}}`): Default app instance
Raises:
AssertionError: If the title does not match
"""
assert basic_app.title == "{{cookiecutter.app_title}}"
|
<commit_before><commit_msg>Create simple test module for the app title<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def basic_app():
"""Fixture for a default app.
Returns:
:class:`{{cookiecutter.app_class_name}}`: App instance
"""
from {{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
return {{cookiecutter.app_class_name}}()
def test_app_title(basic_app):
"""Simply tests if the default app title meets our expectations.
Args:
basic_app (:class:`{{cookiecutter.app_class_name}}`): Default app instance
Raises:
AssertionError: If the title does not match
"""
assert basic_app.title == "{{cookiecutter.app_title}}"
|
Create simple test module for the app title# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def basic_app():
"""Fixture for a default app.
Returns:
:class:`{{cookiecutter.app_class_name}}`: App instance
"""
from {{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
return {{cookiecutter.app_class_name}}()
def test_app_title(basic_app):
"""Simply tests if the default app title meets our expectations.
Args:
basic_app (:class:`{{cookiecutter.app_class_name}}`): Default app instance
Raises:
AssertionError: If the title does not match
"""
assert basic_app.title == "{{cookiecutter.app_title}}"
|
<commit_before><commit_msg>Create simple test module for the app title<commit_after># -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def basic_app():
"""Fixture for a default app.
Returns:
:class:`{{cookiecutter.app_class_name}}`: App instance
"""
from {{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}
return {{cookiecutter.app_class_name}}()
def test_app_title(basic_app):
"""Simply tests if the default app title meets our expectations.
Args:
basic_app (:class:`{{cookiecutter.app_class_name}}`): Default app instance
Raises:
AssertionError: If the title does not match
"""
assert basic_app.title == "{{cookiecutter.app_title}}"
|
|
c245bd90dd8f1fd90da45e737ff3cbfba43707fa
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
#
# Copyright 2012 Ezox Systems LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup paths and App Engine's stubs, then run tests."""
import os
import sys
import argparse
import tempfile
CURRENT_PATH = os.getcwdu()
# Setup your paths here...
paths = [
#os.path.join(CURRENT_PATH, 'lib'),
]
try:
from dev_appserver import fix_sys_path
except ImportError:
# Something is not setup right, maybe they're using a local .pth file.
import site
site.addsitedir('.')
# Now, try again.
from dev_appserver import fix_sys_path
fix_sys_path()
sys.path.extend(paths)
import unittest
stub_config = {
'login_url': None,
'require_indexes': True,
'clear_datastore': False,
'save_changes': False,
}
def run():
parser = argparse.ArgumentParser(description='Run tests')
parser.add_argument('tests', nargs='+', default='', help="Path to tests to be run.")
parser.add_argument('--failfast', action='store_true', default=False)
parser.add_argument('--verbosity', '-v', type=int, default=2)
args = parser.parse_args()
suite = _build_suite(args.tests)
_setup_environment()
_run_suite(suite, args)
def _run_suite(suite, options):
runner = unittest.TextTestRunner(
verbosity=options.verbosity,
failfast=options.failfast)
return runner.run(suite)
def _build_suite(tests):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
if not tests:
suite.addTests(loader.discover(CURRENT_PATH))
else:
for label in tests:
rel_root = label.replace('.', os.path.sep)
if os.path.exists(rel_root):
suite.addTests(loader.discover(rel_root, top_level_dir=os.getcwdu()))
else:
suite.addTests(loader.loadTestsFromName(label))
return suite
def _setup_environment():
from google.appengine.tools import dev_appserver
config = stub_config.copy()
config['root_path'] = os.getcwd()
config['blobstore_path'] = tempfile.mkdtemp()
config['datastore_path'] = tempfile.mktemp()
config['high_replication'] = True
dev_appserver.SetupStubs('unittest', **config)
import logging
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
run()
|
Add simple unit test runner to setup environment.
|
Add simple unit test runner to setup environment.
The test runner performs setup like configuring paths and setting up App
Engine stubs needed to run tests.
|
Python
|
apache-2.0
|
andreleblanc-wf/furious,beaulyddon-wf/furious,rosshendrickson-wf/furious,mattsanders-wf/furious,Workiva/furious,beaulyddon-wf/furious,rosshendrickson-wf/furious,mattsanders-wf/furious,Workiva/furious,andreleblanc-wf/furious,robertkluin/furious
|
Add simple unit test runner to setup environment.
The test runner performs setup like configuring paths and setting up App
Engine stubs needed to run tests.
|
#!/usr/bin/env python
#
# Copyright 2012 Ezox Systems LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup paths and App Engine's stubs, then run tests."""
import os
import sys
import argparse
import tempfile
CURRENT_PATH = os.getcwdu()
# Setup your paths here...
paths = [
#os.path.join(CURRENT_PATH, 'lib'),
]
try:
from dev_appserver import fix_sys_path
except ImportError:
# Something is not setup right, maybe they're using a local .pth file.
import site
site.addsitedir('.')
# Now, try again.
from dev_appserver import fix_sys_path
fix_sys_path()
sys.path.extend(paths)
import unittest
stub_config = {
'login_url': None,
'require_indexes': True,
'clear_datastore': False,
'save_changes': False,
}
def run():
parser = argparse.ArgumentParser(description='Run tests')
parser.add_argument('tests', nargs='+', default='', help="Path to tests to be run.")
parser.add_argument('--failfast', action='store_true', default=False)
parser.add_argument('--verbosity', '-v', type=int, default=2)
args = parser.parse_args()
suite = _build_suite(args.tests)
_setup_environment()
_run_suite(suite, args)
def _run_suite(suite, options):
runner = unittest.TextTestRunner(
verbosity=options.verbosity,
failfast=options.failfast)
return runner.run(suite)
def _build_suite(tests):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
if not tests:
suite.addTests(loader.discover(CURRENT_PATH))
else:
for label in tests:
rel_root = label.replace('.', os.path.sep)
if os.path.exists(rel_root):
suite.addTests(loader.discover(rel_root, top_level_dir=os.getcwdu()))
else:
suite.addTests(loader.loadTestsFromName(label))
return suite
def _setup_environment():
from google.appengine.tools import dev_appserver
config = stub_config.copy()
config['root_path'] = os.getcwd()
config['blobstore_path'] = tempfile.mkdtemp()
config['datastore_path'] = tempfile.mktemp()
config['high_replication'] = True
dev_appserver.SetupStubs('unittest', **config)
import logging
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add simple unit test runner to setup environment.
The test runner performs setup like configuring paths and setting up App
Engine stubs needed to run tests.<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2012 Ezox Systems LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup paths and App Engine's stubs, then run tests."""
import os
import sys
import argparse
import tempfile
CURRENT_PATH = os.getcwdu()
# Setup your paths here...
paths = [
#os.path.join(CURRENT_PATH, 'lib'),
]
try:
from dev_appserver import fix_sys_path
except ImportError:
# Something is not setup right, maybe they're using a local .pth file.
import site
site.addsitedir('.')
# Now, try again.
from dev_appserver import fix_sys_path
fix_sys_path()
sys.path.extend(paths)
import unittest
stub_config = {
'login_url': None,
'require_indexes': True,
'clear_datastore': False,
'save_changes': False,
}
def run():
parser = argparse.ArgumentParser(description='Run tests')
parser.add_argument('tests', nargs='+', default='', help="Path to tests to be run.")
parser.add_argument('--failfast', action='store_true', default=False)
parser.add_argument('--verbosity', '-v', type=int, default=2)
args = parser.parse_args()
suite = _build_suite(args.tests)
_setup_environment()
_run_suite(suite, args)
def _run_suite(suite, options):
runner = unittest.TextTestRunner(
verbosity=options.verbosity,
failfast=options.failfast)
return runner.run(suite)
def _build_suite(tests):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
if not tests:
suite.addTests(loader.discover(CURRENT_PATH))
else:
for label in tests:
rel_root = label.replace('.', os.path.sep)
if os.path.exists(rel_root):
suite.addTests(loader.discover(rel_root, top_level_dir=os.getcwdu()))
else:
suite.addTests(loader.loadTestsFromName(label))
return suite
def _setup_environment():
from google.appengine.tools import dev_appserver
config = stub_config.copy()
config['root_path'] = os.getcwd()
config['blobstore_path'] = tempfile.mkdtemp()
config['datastore_path'] = tempfile.mktemp()
config['high_replication'] = True
dev_appserver.SetupStubs('unittest', **config)
import logging
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
run()
|
Add simple unit test runner to setup environment.
The test runner performs setup like configuring paths and setting up App
Engine stubs needed to run tests.#!/usr/bin/env python
#
# Copyright 2012 Ezox Systems LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup paths and App Engine's stubs, then run tests."""
import os
import sys
import argparse
import tempfile
CURRENT_PATH = os.getcwdu()
# Setup your paths here...
paths = [
#os.path.join(CURRENT_PATH, 'lib'),
]
try:
from dev_appserver import fix_sys_path
except ImportError:
# Something is not setup right, maybe they're using a local .pth file.
import site
site.addsitedir('.')
# Now, try again.
from dev_appserver import fix_sys_path
fix_sys_path()
sys.path.extend(paths)
import unittest
stub_config = {
'login_url': None,
'require_indexes': True,
'clear_datastore': False,
'save_changes': False,
}
def run():
parser = argparse.ArgumentParser(description='Run tests')
parser.add_argument('tests', nargs='+', default='', help="Path to tests to be run.")
parser.add_argument('--failfast', action='store_true', default=False)
parser.add_argument('--verbosity', '-v', type=int, default=2)
args = parser.parse_args()
suite = _build_suite(args.tests)
_setup_environment()
_run_suite(suite, args)
def _run_suite(suite, options):
runner = unittest.TextTestRunner(
verbosity=options.verbosity,
failfast=options.failfast)
return runner.run(suite)
def _build_suite(tests):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
if not tests:
suite.addTests(loader.discover(CURRENT_PATH))
else:
for label in tests:
rel_root = label.replace('.', os.path.sep)
if os.path.exists(rel_root):
suite.addTests(loader.discover(rel_root, top_level_dir=os.getcwdu()))
else:
suite.addTests(loader.loadTestsFromName(label))
return suite
def _setup_environment():
from google.appengine.tools import dev_appserver
config = stub_config.copy()
config['root_path'] = os.getcwd()
config['blobstore_path'] = tempfile.mkdtemp()
config['datastore_path'] = tempfile.mktemp()
config['high_replication'] = True
dev_appserver.SetupStubs('unittest', **config)
import logging
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add simple unit test runner to setup environment.
The test runner performs setup like configuring paths and setting up App
Engine stubs needed to run tests.<commit_after>#!/usr/bin/env python
#
# Copyright 2012 Ezox Systems LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup paths and App Engine's stubs, then run tests."""
import os
import sys
import argparse
import tempfile
CURRENT_PATH = os.getcwdu()
# Setup your paths here...
paths = [
#os.path.join(CURRENT_PATH, 'lib'),
]
try:
from dev_appserver import fix_sys_path
except ImportError:
# Something is not setup right, maybe they're using a local .pth file.
import site
site.addsitedir('.')
# Now, try again.
from dev_appserver import fix_sys_path
fix_sys_path()
sys.path.extend(paths)
import unittest
stub_config = {
'login_url': None,
'require_indexes': True,
'clear_datastore': False,
'save_changes': False,
}
def run():
parser = argparse.ArgumentParser(description='Run tests')
parser.add_argument('tests', nargs='+', default='', help="Path to tests to be run.")
parser.add_argument('--failfast', action='store_true', default=False)
parser.add_argument('--verbosity', '-v', type=int, default=2)
args = parser.parse_args()
suite = _build_suite(args.tests)
_setup_environment()
_run_suite(suite, args)
def _run_suite(suite, options):
runner = unittest.TextTestRunner(
verbosity=options.verbosity,
failfast=options.failfast)
return runner.run(suite)
def _build_suite(tests):
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
if not tests:
suite.addTests(loader.discover(CURRENT_PATH))
else:
for label in tests:
rel_root = label.replace('.', os.path.sep)
if os.path.exists(rel_root):
suite.addTests(loader.discover(rel_root, top_level_dir=os.getcwdu()))
else:
suite.addTests(loader.loadTestsFromName(label))
return suite
def _setup_environment():
from google.appengine.tools import dev_appserver
config = stub_config.copy()
config['root_path'] = os.getcwd()
config['blobstore_path'] = tempfile.mkdtemp()
config['datastore_path'] = tempfile.mktemp()
config['high_replication'] = True
dev_appserver.SetupStubs('unittest', **config)
import logging
logging.getLogger().setLevel(logging.DEBUG)
if __name__ == '__main__':
run()
|
|
aa36823abcd2371519ab2e6c195c6ffb2f7af27a
|
abstract_soup.py
|
abstract_soup.py
|
# import sys
from bs4 import BeautifulSoup
fn = "/Users/ajh/Code/openssr-parser/sample-data/wb-abstract-2770053.html"
soup = BeautifulSoup(
open(fn),
"html.parser")
print("Ready. Beautiful Soup object available as soup.")
|
Add interactive script for parsing abstracts
|
Add interactive script for parsing abstracts
Run with `python3 -i abstract_soup.py`
|
Python
|
agpl-3.0
|
OpenSSR/openssr-parser,OpenSSR/openssr-parser
|
Add interactive script for parsing abstracts
Run with `python3 -i abstract_soup.py`
|
# import sys
from bs4 import BeautifulSoup
fn = "/Users/ajh/Code/openssr-parser/sample-data/wb-abstract-2770053.html"
soup = BeautifulSoup(
open(fn),
"html.parser")
print("Ready. Beautiful Soup object available as soup.")
|
<commit_before><commit_msg>Add interactive script for parsing abstracts
Run with `python3 -i abstract_soup.py`<commit_after>
|
# import sys
from bs4 import BeautifulSoup
fn = "/Users/ajh/Code/openssr-parser/sample-data/wb-abstract-2770053.html"
soup = BeautifulSoup(
open(fn),
"html.parser")
print("Ready. Beautiful Soup object available as soup.")
|
Add interactive script for parsing abstracts
Run with `python3 -i abstract_soup.py`# import sys
from bs4 import BeautifulSoup
fn = "/Users/ajh/Code/openssr-parser/sample-data/wb-abstract-2770053.html"
soup = BeautifulSoup(
open(fn),
"html.parser")
print("Ready. Beautiful Soup object available as soup.")
|
<commit_before><commit_msg>Add interactive script for parsing abstracts
Run with `python3 -i abstract_soup.py`<commit_after># import sys
from bs4 import BeautifulSoup
fn = "/Users/ajh/Code/openssr-parser/sample-data/wb-abstract-2770053.html"
soup = BeautifulSoup(
open(fn),
"html.parser")
print("Ready. Beautiful Soup object available as soup.")
|
|
6eeaaedb0a15dbecac93e649b456a7adfdbbc919
|
tests/thrift/test_multiple_services.py
|
tests/thrift/test_multiple_services.py
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
|
Verify Thrift service name used for inherited methods
|
Test: Verify Thrift service name used for inherited methods
|
Python
|
mit
|
uber/tchannel-python,uber/tchannel-python
|
Test: Verify Thrift service name used for inherited methods
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
|
<commit_before><commit_msg>Test: Verify Thrift service name used for inherited methods<commit_after>
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
|
Test: Verify Thrift service name used for inherited methods# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
|
<commit_before><commit_msg>Test: Verify Thrift service name used for inherited methods<commit_after># Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, print_function, unicode_literals, division
)
import pytest
from tchannel import TChannel, thrift
@pytest.mark.gen_test
def test_inherited_method_names(tmpdir):
thrift_file = tmpdir.join('service.thrift')
thrift_file.write('''
service Base { string hello() }
service Foo extends Base {}
service Bar extends Base {}
''')
service = thrift.load(str(thrift_file), 'myservice')
server = TChannel('server')
@server.thrift.register(service.Foo, method='hello')
def foo_hello(request):
return 'foo'
@server.thrift.register(service.Bar, method='hello')
def bar_hello(request):
return 'bar'
server.listen()
client = TChannel('client')
res = yield client.thrift(service.Foo.hello(), hostport=server.hostport)
assert res.body == 'foo'
res = yield client.thrift(service.Bar.hello(), hostport=server.hostport)
assert res.body == 'bar'
|
|
db1b12b26ee6136198cdc51ca12c7ab17977bc05
|
rajab_roza/__init__.py
|
rajab_roza/__init__.py
|
from datetime import timedelta
import yaml
from hijri_date import HijriDate
from usno_data import USNO_Data
class RajabRoza:
def __init__(self, lat, lng, start_year, end_year):
self.lat = lat
self.lng = lng
self.start_year = start_year
self.end_year = end_year
self.usno_data = USNO_Data(lat, lng)
def get_roza_durations_for_year(self, hijri_year):
durations = (timedelta.max, timedelta.min)
start_date = HijriDate(hijri_year, 7, 1).to_gregorian()
self.usno_data.get_data(start_date.year)
for day in range(0, 30):
date = start_date + timedelta(day)
if date.year != start_date.year:
self.usno_data.get_data(date.year)
sunrise = self.usno_data.sunrise(date.month, date.day)
sunset = self.usno_data.sunset(date.month, date.day)
duration = sunset - sunrise
durations = (
min(durations[0], duration), max(durations[1], duration)
)
return durations
def get_roza_durations(self):
self.durations = [
self.get_roza_durations_for_year(year)
for year in range(self.start_year, self.end_year + 1)
]
def save_to_yaml(self, filename):
stream = open(filename, 'w')
yaml.safe_dump({
'lat': self.lat,
'lng': self.lng,
'start_year': self.start_year,
'end_year': self.end_year,
'durations': [
[duration[0].total_seconds(), duration[1].total_seconds()]
for duration in self.durations
]
}, stream, allow_unicode=True)
stream.close()
@staticmethod
def load_from_yaml(filename):
stream = open(filename, 'r')
data = yaml.load(stream)
stream.close()
rajab_roza = RajabRoza(
data['lat'], data['lng'], data['start_year'], data['end_year']
)
rajab_roza.durations = [
(timedelta(seconds=duration[0]), timedelta(seconds=duration[1]))
for duration in data['durations']
]
return rajab_roza
|
Add class to accumulate and load/save roza durations.
|
Add class to accumulate and load/save roza durations.
|
Python
|
mit
|
mygulamali/rajab_roza
|
Add class to accumulate and load/save roza durations.
|
from datetime import timedelta
import yaml
from hijri_date import HijriDate
from usno_data import USNO_Data
class RajabRoza:
def __init__(self, lat, lng, start_year, end_year):
self.lat = lat
self.lng = lng
self.start_year = start_year
self.end_year = end_year
self.usno_data = USNO_Data(lat, lng)
def get_roza_durations_for_year(self, hijri_year):
durations = (timedelta.max, timedelta.min)
start_date = HijriDate(hijri_year, 7, 1).to_gregorian()
self.usno_data.get_data(start_date.year)
for day in range(0, 30):
date = start_date + timedelta(day)
if date.year != start_date.year:
self.usno_data.get_data(date.year)
sunrise = self.usno_data.sunrise(date.month, date.day)
sunset = self.usno_data.sunset(date.month, date.day)
duration = sunset - sunrise
durations = (
min(durations[0], duration), max(durations[1], duration)
)
return durations
def get_roza_durations(self):
self.durations = [
self.get_roza_durations_for_year(year)
for year in range(self.start_year, self.end_year + 1)
]
def save_to_yaml(self, filename):
stream = open(filename, 'w')
yaml.safe_dump({
'lat': self.lat,
'lng': self.lng,
'start_year': self.start_year,
'end_year': self.end_year,
'durations': [
[duration[0].total_seconds(), duration[1].total_seconds()]
for duration in self.durations
]
}, stream, allow_unicode=True)
stream.close()
@staticmethod
def load_from_yaml(filename):
stream = open(filename, 'r')
data = yaml.load(stream)
stream.close()
rajab_roza = RajabRoza(
data['lat'], data['lng'], data['start_year'], data['end_year']
)
rajab_roza.durations = [
(timedelta(seconds=duration[0]), timedelta(seconds=duration[1]))
for duration in data['durations']
]
return rajab_roza
|
<commit_before><commit_msg>Add class to accumulate and load/save roza durations.<commit_after>
|
from datetime import timedelta
import yaml
from hijri_date import HijriDate
from usno_data import USNO_Data
class RajabRoza:
def __init__(self, lat, lng, start_year, end_year):
self.lat = lat
self.lng = lng
self.start_year = start_year
self.end_year = end_year
self.usno_data = USNO_Data(lat, lng)
def get_roza_durations_for_year(self, hijri_year):
durations = (timedelta.max, timedelta.min)
start_date = HijriDate(hijri_year, 7, 1).to_gregorian()
self.usno_data.get_data(start_date.year)
for day in range(0, 30):
date = start_date + timedelta(day)
if date.year != start_date.year:
self.usno_data.get_data(date.year)
sunrise = self.usno_data.sunrise(date.month, date.day)
sunset = self.usno_data.sunset(date.month, date.day)
duration = sunset - sunrise
durations = (
min(durations[0], duration), max(durations[1], duration)
)
return durations
def get_roza_durations(self):
self.durations = [
self.get_roza_durations_for_year(year)
for year in range(self.start_year, self.end_year + 1)
]
def save_to_yaml(self, filename):
stream = open(filename, 'w')
yaml.safe_dump({
'lat': self.lat,
'lng': self.lng,
'start_year': self.start_year,
'end_year': self.end_year,
'durations': [
[duration[0].total_seconds(), duration[1].total_seconds()]
for duration in self.durations
]
}, stream, allow_unicode=True)
stream.close()
@staticmethod
def load_from_yaml(filename):
stream = open(filename, 'r')
data = yaml.load(stream)
stream.close()
rajab_roza = RajabRoza(
data['lat'], data['lng'], data['start_year'], data['end_year']
)
rajab_roza.durations = [
(timedelta(seconds=duration[0]), timedelta(seconds=duration[1]))
for duration in data['durations']
]
return rajab_roza
|
Add class to accumulate and load/save roza durations.from datetime import timedelta
import yaml
from hijri_date import HijriDate
from usno_data import USNO_Data
class RajabRoza:
def __init__(self, lat, lng, start_year, end_year):
self.lat = lat
self.lng = lng
self.start_year = start_year
self.end_year = end_year
self.usno_data = USNO_Data(lat, lng)
def get_roza_durations_for_year(self, hijri_year):
durations = (timedelta.max, timedelta.min)
start_date = HijriDate(hijri_year, 7, 1).to_gregorian()
self.usno_data.get_data(start_date.year)
for day in range(0, 30):
date = start_date + timedelta(day)
if date.year != start_date.year:
self.usno_data.get_data(date.year)
sunrise = self.usno_data.sunrise(date.month, date.day)
sunset = self.usno_data.sunset(date.month, date.day)
duration = sunset - sunrise
durations = (
min(durations[0], duration), max(durations[1], duration)
)
return durations
def get_roza_durations(self):
self.durations = [
self.get_roza_durations_for_year(year)
for year in range(self.start_year, self.end_year + 1)
]
def save_to_yaml(self, filename):
stream = open(filename, 'w')
yaml.safe_dump({
'lat': self.lat,
'lng': self.lng,
'start_year': self.start_year,
'end_year': self.end_year,
'durations': [
[duration[0].total_seconds(), duration[1].total_seconds()]
for duration in self.durations
]
}, stream, allow_unicode=True)
stream.close()
@staticmethod
def load_from_yaml(filename):
stream = open(filename, 'r')
data = yaml.load(stream)
stream.close()
rajab_roza = RajabRoza(
data['lat'], data['lng'], data['start_year'], data['end_year']
)
rajab_roza.durations = [
(timedelta(seconds=duration[0]), timedelta(seconds=duration[1]))
for duration in data['durations']
]
return rajab_roza
|
<commit_before><commit_msg>Add class to accumulate and load/save roza durations.<commit_after>from datetime import timedelta
import yaml
from hijri_date import HijriDate
from usno_data import USNO_Data
class RajabRoza:
def __init__(self, lat, lng, start_year, end_year):
self.lat = lat
self.lng = lng
self.start_year = start_year
self.end_year = end_year
self.usno_data = USNO_Data(lat, lng)
def get_roza_durations_for_year(self, hijri_year):
durations = (timedelta.max, timedelta.min)
start_date = HijriDate(hijri_year, 7, 1).to_gregorian()
self.usno_data.get_data(start_date.year)
for day in range(0, 30):
date = start_date + timedelta(day)
if date.year != start_date.year:
self.usno_data.get_data(date.year)
sunrise = self.usno_data.sunrise(date.month, date.day)
sunset = self.usno_data.sunset(date.month, date.day)
duration = sunset - sunrise
durations = (
min(durations[0], duration), max(durations[1], duration)
)
return durations
def get_roza_durations(self):
self.durations = [
self.get_roza_durations_for_year(year)
for year in range(self.start_year, self.end_year + 1)
]
def save_to_yaml(self, filename):
stream = open(filename, 'w')
yaml.safe_dump({
'lat': self.lat,
'lng': self.lng,
'start_year': self.start_year,
'end_year': self.end_year,
'durations': [
[duration[0].total_seconds(), duration[1].total_seconds()]
for duration in self.durations
]
}, stream, allow_unicode=True)
stream.close()
@staticmethod
def load_from_yaml(filename):
stream = open(filename, 'r')
data = yaml.load(stream)
stream.close()
rajab_roza = RajabRoza(
data['lat'], data['lng'], data['start_year'], data['end_year']
)
rajab_roza.durations = [
(timedelta(seconds=duration[0]), timedelta(seconds=duration[1]))
for duration in data['durations']
]
return rajab_roza
|
|
e94f83ea8f409b83f27fc4682c81706f3003bbba
|
bin/merge_apis.py
|
bin/merge_apis.py
|
#!/usr/bin/env python
import sys
import json
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
data = sys.argv[1:]
merged_data = {'data': []}
for path, tag in zip(data[0::2], data[1::2]):
with open(path, 'r') as handle:
ldata = json.load(handle)
for element in ldata['data']:
element['tag'] = tag
merged_data['data'].append(element)
json.dump(merged_data, sys.stdout)
|
Add script to merge json files
|
Add script to merge json files
|
Python
|
mit
|
gregvonkuster/cargo-port,gregvonkuster/cargo-port,erasche/community-package-cache,erasche/community-package-cache,erasche/community-package-cache,galaxyproject/cargo-port,gregvonkuster/cargo-port,galaxyproject/cargo-port
|
Add script to merge json files
|
#!/usr/bin/env python
import sys
import json
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
data = sys.argv[1:]
merged_data = {'data': []}
for path, tag in zip(data[0::2], data[1::2]):
with open(path, 'r') as handle:
ldata = json.load(handle)
for element in ldata['data']:
element['tag'] = tag
merged_data['data'].append(element)
json.dump(merged_data, sys.stdout)
|
<commit_before><commit_msg>Add script to merge json files<commit_after>
|
#!/usr/bin/env python
import sys
import json
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
data = sys.argv[1:]
merged_data = {'data': []}
for path, tag in zip(data[0::2], data[1::2]):
with open(path, 'r') as handle:
ldata = json.load(handle)
for element in ldata['data']:
element['tag'] = tag
merged_data['data'].append(element)
json.dump(merged_data, sys.stdout)
|
Add script to merge json files#!/usr/bin/env python
import sys
import json
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
data = sys.argv[1:]
merged_data = {'data': []}
for path, tag in zip(data[0::2], data[1::2]):
with open(path, 'r') as handle:
ldata = json.load(handle)
for element in ldata['data']:
element['tag'] = tag
merged_data['data'].append(element)
json.dump(merged_data, sys.stdout)
|
<commit_before><commit_msg>Add script to merge json files<commit_after>#!/usr/bin/env python
import sys
import json
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
data = sys.argv[1:]
merged_data = {'data': []}
for path, tag in zip(data[0::2], data[1::2]):
with open(path, 'r') as handle:
ldata = json.load(handle)
for element in ldata['data']:
element['tag'] = tag
merged_data['data'].append(element)
json.dump(merged_data, sys.stdout)
|
|
c5c4f619fcc6052782e7f68968656ef3de8b5489
|
shoop/core/migrations/0007_product_media.py
|
shoop/core/migrations/0007_product_media.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shoop', '0006_shop_add_logo_and_public_name'),
]
operations = [
migrations.AlterField(
model_name='productmedia',
name='external_url',
field=models.URLField(help_text="Enter URL to external file. If this field is filled, the selected media doesn't apply.", null=True, verbose_name='URL', blank=True),
),
]
|
Add missing migration for product media external_url
|
Core: Add missing migration for product media external_url
|
Python
|
agpl-3.0
|
suutari/shoop,hrayr-artunyan/shuup,jorge-marques/shoop,suutari/shoop,hrayr-artunyan/shuup,suutari-ai/shoop,taedori81/shoop,shoopio/shoop,shawnadelic/shuup,shoopio/shoop,suutari-ai/shoop,shawnadelic/shuup,suutari-ai/shoop,taedori81/shoop,jorge-marques/shoop,jorge-marques/shoop,suutari/shoop,akx/shoop,akx/shoop,taedori81/shoop,akx/shoop,hrayr-artunyan/shuup,shoopio/shoop,shawnadelic/shuup
|
Core: Add missing migration for product media external_url
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shoop', '0006_shop_add_logo_and_public_name'),
]
operations = [
migrations.AlterField(
model_name='productmedia',
name='external_url',
field=models.URLField(help_text="Enter URL to external file. If this field is filled, the selected media doesn't apply.", null=True, verbose_name='URL', blank=True),
),
]
|
<commit_before><commit_msg>Core: Add missing migration for product media external_url<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shoop', '0006_shop_add_logo_and_public_name'),
]
operations = [
migrations.AlterField(
model_name='productmedia',
name='external_url',
field=models.URLField(help_text="Enter URL to external file. If this field is filled, the selected media doesn't apply.", null=True, verbose_name='URL', blank=True),
),
]
|
Core: Add missing migration for product media external_url# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shoop', '0006_shop_add_logo_and_public_name'),
]
operations = [
migrations.AlterField(
model_name='productmedia',
name='external_url',
field=models.URLField(help_text="Enter URL to external file. If this field is filled, the selected media doesn't apply.", null=True, verbose_name='URL', blank=True),
),
]
|
<commit_before><commit_msg>Core: Add missing migration for product media external_url<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shoop', '0006_shop_add_logo_and_public_name'),
]
operations = [
migrations.AlterField(
model_name='productmedia',
name='external_url',
field=models.URLField(help_text="Enter URL to external file. If this field is filled, the selected media doesn't apply.", null=True, verbose_name='URL', blank=True),
),
]
|
|
95ca8cdbd46dba984774cf5eb484642f2c007de3
|
src/p2.py
|
src/p2.py
|
from itertools import takewhile
def fib(n):
if n == 0:
return 1
elif n == 1:
return 2
else:
return fib(n-2) + fib(n-1)
def fib_iter():
n = 0
while True:
yield fib(n)
n += 1
def calc():
values = takewhile(lambda x: x <= 4000000, fib_iter())
return sum(x for x in values if (x % 2) == 0)
if __name__ == "__main__":
print(calc())
|
Add solution to second problem
|
Add solution to second problem
|
Python
|
mit
|
gsnedders/projecteuler
|
Add solution to second problem
|
from itertools import takewhile
def fib(n):
if n == 0:
return 1
elif n == 1:
return 2
else:
return fib(n-2) + fib(n-1)
def fib_iter():
n = 0
while True:
yield fib(n)
n += 1
def calc():
values = takewhile(lambda x: x <= 4000000, fib_iter())
return sum(x for x in values if (x % 2) == 0)
if __name__ == "__main__":
print(calc())
|
<commit_before><commit_msg>Add solution to second problem<commit_after>
|
from itertools import takewhile
def fib(n):
if n == 0:
return 1
elif n == 1:
return 2
else:
return fib(n-2) + fib(n-1)
def fib_iter():
n = 0
while True:
yield fib(n)
n += 1
def calc():
values = takewhile(lambda x: x <= 4000000, fib_iter())
return sum(x for x in values if (x % 2) == 0)
if __name__ == "__main__":
print(calc())
|
Add solution to second problemfrom itertools import takewhile
def fib(n):
if n == 0:
return 1
elif n == 1:
return 2
else:
return fib(n-2) + fib(n-1)
def fib_iter():
n = 0
while True:
yield fib(n)
n += 1
def calc():
values = takewhile(lambda x: x <= 4000000, fib_iter())
return sum(x for x in values if (x % 2) == 0)
if __name__ == "__main__":
print(calc())
|
<commit_before><commit_msg>Add solution to second problem<commit_after>from itertools import takewhile
def fib(n):
if n == 0:
return 1
elif n == 1:
return 2
else:
return fib(n-2) + fib(n-1)
def fib_iter():
n = 0
while True:
yield fib(n)
n += 1
def calc():
values = takewhile(lambda x: x <= 4000000, fib_iter())
return sum(x for x in values if (x % 2) == 0)
if __name__ == "__main__":
print(calc())
|
|
0b5d508f0c8c04443de7858b1c3af2f05ad8a6f0
|
traittypes/tests/test_validators.py
|
traittypes/tests/test_validators.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from traitlets import HasTraits, TraitError
from ..traittypes import SciType
def test_coercion_validator():
# Test with a squeeze coercion
def truncate(trait, value):
return value[:10]
class Foo(HasTraits):
bar = SciType().valid(truncate)
foo = Foo(bar=list(range(20)))
assert foo.bar == list(range(10))
foo.bar = list(range(10, 40))
assert foo.bar == list(range(10, 20))
def test_validaton_error():
# Test with a squeeze coercion
def maxlen(trait, value):
if len(value) > 10:
raise ValueError('Too long sequence!')
return value
class Foo(HasTraits):
bar = SciType().valid(maxlen)
# Check that it works as expected:
foo = Foo(bar=list(range(5)))
assert foo.bar == list(range(5))
# Check that it fails as expected:
with pytest.raises(TraitError): # Should convert ValueError to TraitError
foo.bar = list(range(10, 40))
assert foo.bar == list(range(5))
# Check that it can again be set correctly
foo = Foo(bar=list(range(5, 10)))
assert foo.bar == list(range(5, 10))
|
Add tests for SciType validators
|
Add tests for SciType validators
|
Python
|
bsd-3-clause
|
jupyter-incubator/traittypes,SylvainCorlay/traittypes
|
Add tests for SciType validators
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from traitlets import HasTraits, TraitError
from ..traittypes import SciType
def test_coercion_validator():
# Test with a squeeze coercion
def truncate(trait, value):
return value[:10]
class Foo(HasTraits):
bar = SciType().valid(truncate)
foo = Foo(bar=list(range(20)))
assert foo.bar == list(range(10))
foo.bar = list(range(10, 40))
assert foo.bar == list(range(10, 20))
def test_validaton_error():
# Test with a squeeze coercion
def maxlen(trait, value):
if len(value) > 10:
raise ValueError('Too long sequence!')
return value
class Foo(HasTraits):
bar = SciType().valid(maxlen)
# Check that it works as expected:
foo = Foo(bar=list(range(5)))
assert foo.bar == list(range(5))
# Check that it fails as expected:
with pytest.raises(TraitError): # Should convert ValueError to TraitError
foo.bar = list(range(10, 40))
assert foo.bar == list(range(5))
# Check that it can again be set correctly
foo = Foo(bar=list(range(5, 10)))
assert foo.bar == list(range(5, 10))
|
<commit_before><commit_msg>Add tests for SciType validators<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from traitlets import HasTraits, TraitError
from ..traittypes import SciType
def test_coercion_validator():
# Test with a squeeze coercion
def truncate(trait, value):
return value[:10]
class Foo(HasTraits):
bar = SciType().valid(truncate)
foo = Foo(bar=list(range(20)))
assert foo.bar == list(range(10))
foo.bar = list(range(10, 40))
assert foo.bar == list(range(10, 20))
def test_validaton_error():
# Test with a squeeze coercion
def maxlen(trait, value):
if len(value) > 10:
raise ValueError('Too long sequence!')
return value
class Foo(HasTraits):
bar = SciType().valid(maxlen)
# Check that it works as expected:
foo = Foo(bar=list(range(5)))
assert foo.bar == list(range(5))
# Check that it fails as expected:
with pytest.raises(TraitError): # Should convert ValueError to TraitError
foo.bar = list(range(10, 40))
assert foo.bar == list(range(5))
# Check that it can again be set correctly
foo = Foo(bar=list(range(5, 10)))
assert foo.bar == list(range(5, 10))
|
Add tests for SciType validators#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from traitlets import HasTraits, TraitError
from ..traittypes import SciType
def test_coercion_validator():
# Test with a squeeze coercion
def truncate(trait, value):
return value[:10]
class Foo(HasTraits):
bar = SciType().valid(truncate)
foo = Foo(bar=list(range(20)))
assert foo.bar == list(range(10))
foo.bar = list(range(10, 40))
assert foo.bar == list(range(10, 20))
def test_validaton_error():
# Test with a squeeze coercion
def maxlen(trait, value):
if len(value) > 10:
raise ValueError('Too long sequence!')
return value
class Foo(HasTraits):
bar = SciType().valid(maxlen)
# Check that it works as expected:
foo = Foo(bar=list(range(5)))
assert foo.bar == list(range(5))
# Check that it fails as expected:
with pytest.raises(TraitError): # Should convert ValueError to TraitError
foo.bar = list(range(10, 40))
assert foo.bar == list(range(5))
# Check that it can again be set correctly
foo = Foo(bar=list(range(5, 10)))
assert foo.bar == list(range(5, 10))
|
<commit_before><commit_msg>Add tests for SciType validators<commit_after>#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import pytest
from traitlets import HasTraits, TraitError
from ..traittypes import SciType
def test_coercion_validator():
# Test with a squeeze coercion
def truncate(trait, value):
return value[:10]
class Foo(HasTraits):
bar = SciType().valid(truncate)
foo = Foo(bar=list(range(20)))
assert foo.bar == list(range(10))
foo.bar = list(range(10, 40))
assert foo.bar == list(range(10, 20))
def test_validaton_error():
# Test with a squeeze coercion
def maxlen(trait, value):
if len(value) > 10:
raise ValueError('Too long sequence!')
return value
class Foo(HasTraits):
bar = SciType().valid(maxlen)
# Check that it works as expected:
foo = Foo(bar=list(range(5)))
assert foo.bar == list(range(5))
# Check that it fails as expected:
with pytest.raises(TraitError): # Should convert ValueError to TraitError
foo.bar = list(range(10, 40))
assert foo.bar == list(range(5))
# Check that it can again be set correctly
foo = Foo(bar=list(range(5, 10)))
assert foo.bar == list(range(5, 10))
|
|
ace3ea15bc99cb384a59cc27d38a98d7aa7a2948
|
tests/pytests/unit/auth/test_rest.py
|
tests/pytests/unit/auth/test_rest.py
|
import pytest
import salt.auth.rest as rest
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
"""
Rest module configuration
"""
return {
rest: {
"__opts__": {
"external_auth": {
"rest": {"^url": "https://test_url/rest", "fred": [".*", "@runner"]}
}
}
}
}
def test_rest_auth_config():
ret = rest._rest_auth_setup()
assert ret == "https://test_url/rest"
def test_fetch_call_failed():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 401})):
ret = rest.fetch("foo", None)
assert ret is False
def test_fetch_call_success_dict_none():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.fetch("foo", None)
assert ret == []
def test_fetch_call_success_dict_acl():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": {"foo": ["@wheel"]}}),
):
ret = rest.fetch("foo", None)
assert ret == {"foo": ["@wheel"]}
def test_auth_nopass():
ret = rest.auth("foo", None)
assert ret is False
def test_auth_nouser():
ret = rest.auth(None, "foo")
assert ret is False
def test_auth_nouserandpass():
ret = rest.auth(None, None)
assert ret is False
def test_auth_ok():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.auth("foo", None)
assert ret is True
def test_acl_without_merge():
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_unauthorized():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 400})):
ret = rest.acl("foo", password="password")
assert ret is None
def test_acl_no_merge():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_merge():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner", "@wheel"]
|
Add tests for REST eauth
|
Add tests for REST eauth
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests for REST eauth
|
import pytest
import salt.auth.rest as rest
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
"""
Rest module configuration
"""
return {
rest: {
"__opts__": {
"external_auth": {
"rest": {"^url": "https://test_url/rest", "fred": [".*", "@runner"]}
}
}
}
}
def test_rest_auth_config():
ret = rest._rest_auth_setup()
assert ret == "https://test_url/rest"
def test_fetch_call_failed():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 401})):
ret = rest.fetch("foo", None)
assert ret is False
def test_fetch_call_success_dict_none():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.fetch("foo", None)
assert ret == []
def test_fetch_call_success_dict_acl():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": {"foo": ["@wheel"]}}),
):
ret = rest.fetch("foo", None)
assert ret == {"foo": ["@wheel"]}
def test_auth_nopass():
ret = rest.auth("foo", None)
assert ret is False
def test_auth_nouser():
ret = rest.auth(None, "foo")
assert ret is False
def test_auth_nouserandpass():
ret = rest.auth(None, None)
assert ret is False
def test_auth_ok():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.auth("foo", None)
assert ret is True
def test_acl_without_merge():
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_unauthorized():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 400})):
ret = rest.acl("foo", password="password")
assert ret is None
def test_acl_no_merge():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_merge():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner", "@wheel"]
|
<commit_before><commit_msg>Add tests for REST eauth<commit_after>
|
import pytest
import salt.auth.rest as rest
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
"""
Rest module configuration
"""
return {
rest: {
"__opts__": {
"external_auth": {
"rest": {"^url": "https://test_url/rest", "fred": [".*", "@runner"]}
}
}
}
}
def test_rest_auth_config():
ret = rest._rest_auth_setup()
assert ret == "https://test_url/rest"
def test_fetch_call_failed():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 401})):
ret = rest.fetch("foo", None)
assert ret is False
def test_fetch_call_success_dict_none():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.fetch("foo", None)
assert ret == []
def test_fetch_call_success_dict_acl():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": {"foo": ["@wheel"]}}),
):
ret = rest.fetch("foo", None)
assert ret == {"foo": ["@wheel"]}
def test_auth_nopass():
ret = rest.auth("foo", None)
assert ret is False
def test_auth_nouser():
ret = rest.auth(None, "foo")
assert ret is False
def test_auth_nouserandpass():
ret = rest.auth(None, None)
assert ret is False
def test_auth_ok():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.auth("foo", None)
assert ret is True
def test_acl_without_merge():
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_unauthorized():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 400})):
ret = rest.acl("foo", password="password")
assert ret is None
def test_acl_no_merge():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_merge():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner", "@wheel"]
|
Add tests for REST eauthimport pytest
import salt.auth.rest as rest
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
"""
Rest module configuration
"""
return {
rest: {
"__opts__": {
"external_auth": {
"rest": {"^url": "https://test_url/rest", "fred": [".*", "@runner"]}
}
}
}
}
def test_rest_auth_config():
ret = rest._rest_auth_setup()
assert ret == "https://test_url/rest"
def test_fetch_call_failed():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 401})):
ret = rest.fetch("foo", None)
assert ret is False
def test_fetch_call_success_dict_none():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.fetch("foo", None)
assert ret == []
def test_fetch_call_success_dict_acl():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": {"foo": ["@wheel"]}}),
):
ret = rest.fetch("foo", None)
assert ret == {"foo": ["@wheel"]}
def test_auth_nopass():
ret = rest.auth("foo", None)
assert ret is False
def test_auth_nouser():
ret = rest.auth(None, "foo")
assert ret is False
def test_auth_nouserandpass():
ret = rest.auth(None, None)
assert ret is False
def test_auth_ok():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.auth("foo", None)
assert ret is True
def test_acl_without_merge():
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_unauthorized():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 400})):
ret = rest.acl("foo", password="password")
assert ret is None
def test_acl_no_merge():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_merge():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner", "@wheel"]
|
<commit_before><commit_msg>Add tests for REST eauth<commit_after>import pytest
import salt.auth.rest as rest
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
"""
Rest module configuration
"""
return {
rest: {
"__opts__": {
"external_auth": {
"rest": {"^url": "https://test_url/rest", "fred": [".*", "@runner"]}
}
}
}
}
def test_rest_auth_config():
ret = rest._rest_auth_setup()
assert ret == "https://test_url/rest"
def test_fetch_call_failed():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 401})):
ret = rest.fetch("foo", None)
assert ret is False
def test_fetch_call_success_dict_none():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.fetch("foo", None)
assert ret == []
def test_fetch_call_success_dict_acl():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": {"foo": ["@wheel"]}}),
):
ret = rest.fetch("foo", None)
assert ret == {"foo": ["@wheel"]}
def test_auth_nopass():
ret = rest.auth("foo", None)
assert ret is False
def test_auth_nouser():
ret = rest.auth(None, "foo")
assert ret is False
def test_auth_nouserandpass():
ret = rest.auth(None, None)
assert ret is False
def test_auth_ok():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.auth("foo", None)
assert ret is True
def test_acl_without_merge():
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_unauthorized():
with patch("salt.utils.http.query", MagicMock(return_value={"status": 400})):
ret = rest.acl("foo", password="password")
assert ret is None
def test_acl_no_merge():
with patch(
"salt.utils.http.query", MagicMock(return_value={"status": 200, "dict": None})
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner"]
def test_acl_merge():
with patch(
"salt.utils.http.query",
MagicMock(return_value={"status": 200, "dict": ["@wheel"]}),
):
ret = rest.acl("fred", password="password")
assert ret == [".*", "@runner", "@wheel"]
|
|
2c17cb829516d0a3856d870b12f25271efb296ce
|
examples/kde_joyplot.py
|
examples/kde_joyplot.py
|
"""
Overlapping KDEs ('Joy Division plot')
======================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="g", hue="g", aspect=12, size=.5, palette="tab10")
# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play will with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
|
Add an example script to make a Joy Division plot
|
Add an example script to make a Joy Division plot
|
Python
|
bsd-3-clause
|
phobson/seaborn,lukauskas/seaborn,anntzer/seaborn,arokem/seaborn,phobson/seaborn,anntzer/seaborn,arokem/seaborn,mwaskom/seaborn,lukauskas/seaborn,petebachant/seaborn,mwaskom/seaborn,sauliusl/seaborn
|
Add an example script to make a Joy Division plot
|
"""
Overlapping KDEs ('Joy Division plot')
======================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="g", hue="g", aspect=12, size=.5, palette="tab10")
# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play will with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
|
<commit_before><commit_msg>Add an example script to make a Joy Division plot<commit_after>
|
"""
Overlapping KDEs ('Joy Division plot')
======================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="g", hue="g", aspect=12, size=.5, palette="tab10")
# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play will with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
|
Add an example script to make a Joy Division plot"""
Overlapping KDEs ('Joy Division plot')
======================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="g", hue="g", aspect=12, size=.5, palette="tab10")
# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play will with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
|
<commit_before><commit_msg>Add an example script to make a Joy Division plot<commit_after>"""
Overlapping KDEs ('Joy Division plot')
======================================
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
# Create the data
rs = np.random.RandomState(1979)
x = rs.randn(500)
g = np.tile(list("ABCDEFGHIJ"), 50)
df = pd.DataFrame(dict(x=x, g=g))
m = df.g.map(ord)
df["x"] += m
# Initialize the FacetGrid object
g = sns.FacetGrid(df, row="g", hue="g", aspect=12, size=.5, palette="tab10")
# Draw the densities in a few steps
g.map(sns.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(sns.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play will with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
|
|
cdfc702741a0209fe55c8aff5ac6c61ed81cb332
|
scripts/patches/fsx.py
|
scripts/patches/fsx.py
|
patches = [
# Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type",
"value": "VolumeOntapConfiguration",
},
# Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type",
"value": "VolumeOpenZFSConfiguration",
},
]
|
Fix duplicate resource names due to FSx::Volume
|
Fix duplicate resource names due to FSx::Volume
|
Python
|
bsd-2-clause
|
cloudtools/troposphere,cloudtools/troposphere
|
Fix duplicate resource names due to FSx::Volume
|
patches = [
# Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type",
"value": "VolumeOntapConfiguration",
},
# Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type",
"value": "VolumeOpenZFSConfiguration",
},
]
|
<commit_before><commit_msg>Fix duplicate resource names due to FSx::Volume<commit_after>
|
patches = [
# Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type",
"value": "VolumeOntapConfiguration",
},
# Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type",
"value": "VolumeOpenZFSConfiguration",
},
]
|
Fix duplicate resource names due to FSx::Volumepatches = [
# Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type",
"value": "VolumeOntapConfiguration",
},
# Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type",
"value": "VolumeOpenZFSConfiguration",
},
]
|
<commit_before><commit_msg>Fix duplicate resource names due to FSx::Volume<commit_after>patches = [
# Rename AWS::FSx::Volume.OntapConfiguration to AWS::FSx::Volume.VolumeOntapConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OntapConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOntapConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OntapConfiguration/Type",
"value": "VolumeOntapConfiguration",
},
# Rename AWS::FSx::Volume.OpenZFSConfiguration to AWS::FSx::Volume.VolumeOpenZFSConfiguration - duplicate property name
{
"op": "move",
"from": "/PropertyTypes/AWS::FSx::Volume.OpenZFSConfiguration",
"path": "/PropertyTypes/AWS::FSx::Volume.VolumeOpenZFSConfiguration",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::FSx::Volume/Properties/OpenZFSConfiguration/Type",
"value": "VolumeOpenZFSConfiguration",
},
]
|
|
7893e7c268a8ae144b47cf6d2d9eed44696b17a2
|
tests/test_widgets_simple.py
|
tests/test_widgets_simple.py
|
from controlcenter.widgets.contrib import simple
from . import TestCase
FAKE_VALUE_LIST = ['Label 1', 'Label 2']
FAKE_KEY_VALUE_LIST = {'Key 1': 'Value 1', 'Key 2': 'Value 2'}
class SimpleWidgetTest(TestCase):
def setUp(self):
self.widget = simple.SimpleWidget(request=None)
def test_get_data_raises(self):
with self.assertRaises(NotImplementedError):
self.widget.get_data()
class ValueListTest(TestCase):
def setUp(self):
self.widget = ExampleValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_VALUE_LIST)
class KeyValueListTest(TestCase):
def setUp(self):
self.widget = ExampleKeyValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_KEY_VALUE_LIST.items())
class ExampleValueList(simple.ValueList):
title = 'Value list widget'
def get_data(self):
return FAKE_VALUE_LIST
class ExampleKeyValueList(simple.KeyValueList):
title = 'Key-value list widget'
def get_data(self):
return FAKE_KEY_VALUE_LIST
|
Add tests for simple data widgets
|
Add tests for simple data widgets
|
Python
|
bsd-3-clause
|
byashimov/django-controlcenter,byashimov/django-controlcenter,byashimov/django-controlcenter
|
Add tests for simple data widgets
|
from controlcenter.widgets.contrib import simple
from . import TestCase
FAKE_VALUE_LIST = ['Label 1', 'Label 2']
FAKE_KEY_VALUE_LIST = {'Key 1': 'Value 1', 'Key 2': 'Value 2'}
class SimpleWidgetTest(TestCase):
def setUp(self):
self.widget = simple.SimpleWidget(request=None)
def test_get_data_raises(self):
with self.assertRaises(NotImplementedError):
self.widget.get_data()
class ValueListTest(TestCase):
def setUp(self):
self.widget = ExampleValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_VALUE_LIST)
class KeyValueListTest(TestCase):
def setUp(self):
self.widget = ExampleKeyValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_KEY_VALUE_LIST.items())
class ExampleValueList(simple.ValueList):
title = 'Value list widget'
def get_data(self):
return FAKE_VALUE_LIST
class ExampleKeyValueList(simple.KeyValueList):
title = 'Key-value list widget'
def get_data(self):
return FAKE_KEY_VALUE_LIST
|
<commit_before><commit_msg>Add tests for simple data widgets<commit_after>
|
from controlcenter.widgets.contrib import simple
from . import TestCase
FAKE_VALUE_LIST = ['Label 1', 'Label 2']
FAKE_KEY_VALUE_LIST = {'Key 1': 'Value 1', 'Key 2': 'Value 2'}
class SimpleWidgetTest(TestCase):
def setUp(self):
self.widget = simple.SimpleWidget(request=None)
def test_get_data_raises(self):
with self.assertRaises(NotImplementedError):
self.widget.get_data()
class ValueListTest(TestCase):
def setUp(self):
self.widget = ExampleValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_VALUE_LIST)
class KeyValueListTest(TestCase):
def setUp(self):
self.widget = ExampleKeyValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_KEY_VALUE_LIST.items())
class ExampleValueList(simple.ValueList):
title = 'Value list widget'
def get_data(self):
return FAKE_VALUE_LIST
class ExampleKeyValueList(simple.KeyValueList):
title = 'Key-value list widget'
def get_data(self):
return FAKE_KEY_VALUE_LIST
|
Add tests for simple data widgetsfrom controlcenter.widgets.contrib import simple
from . import TestCase
FAKE_VALUE_LIST = ['Label 1', 'Label 2']
FAKE_KEY_VALUE_LIST = {'Key 1': 'Value 1', 'Key 2': 'Value 2'}
class SimpleWidgetTest(TestCase):
def setUp(self):
self.widget = simple.SimpleWidget(request=None)
def test_get_data_raises(self):
with self.assertRaises(NotImplementedError):
self.widget.get_data()
class ValueListTest(TestCase):
def setUp(self):
self.widget = ExampleValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_VALUE_LIST)
class KeyValueListTest(TestCase):
def setUp(self):
self.widget = ExampleKeyValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_KEY_VALUE_LIST.items())
class ExampleValueList(simple.ValueList):
title = 'Value list widget'
def get_data(self):
return FAKE_VALUE_LIST
class ExampleKeyValueList(simple.KeyValueList):
title = 'Key-value list widget'
def get_data(self):
return FAKE_KEY_VALUE_LIST
|
<commit_before><commit_msg>Add tests for simple data widgets<commit_after>from controlcenter.widgets.contrib import simple
from . import TestCase
FAKE_VALUE_LIST = ['Label 1', 'Label 2']
FAKE_KEY_VALUE_LIST = {'Key 1': 'Value 1', 'Key 2': 'Value 2'}
class SimpleWidgetTest(TestCase):
def setUp(self):
self.widget = simple.SimpleWidget(request=None)
def test_get_data_raises(self):
with self.assertRaises(NotImplementedError):
self.widget.get_data()
class ValueListTest(TestCase):
def setUp(self):
self.widget = ExampleValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_VALUE_LIST)
class KeyValueListTest(TestCase):
def setUp(self):
self.widget = ExampleKeyValueList(request=None)
def test_basic(self):
self.assertIsNotNone(self.widget.template_name)
def test_default_not_sortable(self):
self.assertFalse(self.widget.show_column_headers())
def test_get_data(self):
self.assertItemsEqual(self.widget.items(), FAKE_KEY_VALUE_LIST.items())
class ExampleValueList(simple.ValueList):
title = 'Value list widget'
def get_data(self):
return FAKE_VALUE_LIST
class ExampleKeyValueList(simple.KeyValueList):
title = 'Key-value list widget'
def get_data(self):
return FAKE_KEY_VALUE_LIST
|
|
5b94733fb2983a923e31ec00fe05ec8614bd56f4
|
tests/buildurl_test.py
|
tests/buildurl_test.py
|
from ass2m.storage import Storage
from ass2m.server import Server
from ass2m.template import build_url, build_root_url
from ass2m.filters import quote_url
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import shutil
class BuildURLTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.storage = Storage.create(self.root)
server = Server(self.root)
app = TestApp(server.process)
# fill root_url
app.get('http://penguin:42/')
self.root_url = build_root_url(self.storage)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_buildRootUrl(self):
assert self.root_url.url == 'http://penguin:42/'
assert build_root_url(None) is None
def test_buildSimpleUrl(self):
assert quote_url(build_url(self.root_url, self.storage.get_file(''))) \
== 'http://penguin:42/'
assert quote_url(build_url(self.root_url, self.storage.get_file('/penguin'))) \
== 'http://penguin:42/penguin'
|
Add tests for URL building
|
Add tests for URL building
|
Python
|
agpl-3.0
|
laurentb/assnet,laurentb/assnet
|
Add tests for URL building
|
from ass2m.storage import Storage
from ass2m.server import Server
from ass2m.template import build_url, build_root_url
from ass2m.filters import quote_url
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import shutil
class BuildURLTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.storage = Storage.create(self.root)
server = Server(self.root)
app = TestApp(server.process)
# fill root_url
app.get('http://penguin:42/')
self.root_url = build_root_url(self.storage)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_buildRootUrl(self):
assert self.root_url.url == 'http://penguin:42/'
assert build_root_url(None) is None
def test_buildSimpleUrl(self):
assert quote_url(build_url(self.root_url, self.storage.get_file(''))) \
== 'http://penguin:42/'
assert quote_url(build_url(self.root_url, self.storage.get_file('/penguin'))) \
== 'http://penguin:42/penguin'
|
<commit_before><commit_msg>Add tests for URL building<commit_after>
|
from ass2m.storage import Storage
from ass2m.server import Server
from ass2m.template import build_url, build_root_url
from ass2m.filters import quote_url
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import shutil
class BuildURLTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.storage = Storage.create(self.root)
server = Server(self.root)
app = TestApp(server.process)
# fill root_url
app.get('http://penguin:42/')
self.root_url = build_root_url(self.storage)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_buildRootUrl(self):
assert self.root_url.url == 'http://penguin:42/'
assert build_root_url(None) is None
def test_buildSimpleUrl(self):
assert quote_url(build_url(self.root_url, self.storage.get_file(''))) \
== 'http://penguin:42/'
assert quote_url(build_url(self.root_url, self.storage.get_file('/penguin'))) \
== 'http://penguin:42/penguin'
|
Add tests for URL buildingfrom ass2m.storage import Storage
from ass2m.server import Server
from ass2m.template import build_url, build_root_url
from ass2m.filters import quote_url
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import shutil
class BuildURLTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.storage = Storage.create(self.root)
server = Server(self.root)
app = TestApp(server.process)
# fill root_url
app.get('http://penguin:42/')
self.root_url = build_root_url(self.storage)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_buildRootUrl(self):
assert self.root_url.url == 'http://penguin:42/'
assert build_root_url(None) is None
def test_buildSimpleUrl(self):
assert quote_url(build_url(self.root_url, self.storage.get_file(''))) \
== 'http://penguin:42/'
assert quote_url(build_url(self.root_url, self.storage.get_file('/penguin'))) \
== 'http://penguin:42/penguin'
|
<commit_before><commit_msg>Add tests for URL building<commit_after>from ass2m.storage import Storage
from ass2m.server import Server
from ass2m.template import build_url, build_root_url
from ass2m.filters import quote_url
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import shutil
class BuildURLTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
self.storage = Storage.create(self.root)
server = Server(self.root)
app = TestApp(server.process)
# fill root_url
app.get('http://penguin:42/')
self.root_url = build_root_url(self.storage)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_buildRootUrl(self):
assert self.root_url.url == 'http://penguin:42/'
assert build_root_url(None) is None
def test_buildSimpleUrl(self):
assert quote_url(build_url(self.root_url, self.storage.get_file(''))) \
== 'http://penguin:42/'
assert quote_url(build_url(self.root_url, self.storage.get_file('/penguin'))) \
== 'http://penguin:42/penguin'
|
|
e81f0407a5514a0216b731077d8def7e30d708b0
|
ovp_users/migrations/0016_auto_20170216_1930.py
|
ovp_users/migrations/0016_auto_20170216_1930.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 19:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0015_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
Set UserProfile.user to OneToOne instead o ForeignKey
|
Set UserProfile.user to OneToOne instead o ForeignKey
|
Python
|
agpl-3.0
|
OpenVolunteeringPlatform/django-ovp-users,OpenVolunteeringPlatform/django-ovp-users
|
Set UserProfile.user to OneToOne instead o ForeignKey
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 19:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0015_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Set UserProfile.user to OneToOne instead o ForeignKey<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 19:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0015_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
Set UserProfile.user to OneToOne instead o ForeignKey# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 19:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0015_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Set UserProfile.user to OneToOne instead o ForeignKey<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 19:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ovp_users', '0015_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
|
299ab440ff8b783d66d67d02c3510eccac4ccbf9
|
server/cli.py
|
server/cli.py
|
# coding=UTF-8
#
# thickishstring server
# Copyright © 2013 David Given
#
# This software is redistributable under the terms of the Simplified BSD
# open source license. Please see the COPYING file in the distribution for
# the full text.
from ws4py.client.threadedclient import WebSocketClient
import anyjson as json
socket = WebSocketClient("ws://localhost:8086")
print("connecting")
socket.connect()
def sendMsg(msg):
j = json.serialize(msg)
socket.send(j)
def recvMsg():
j = socket.receive()
return json.deserialize(j)
print("authenticating")
sendMsg(
{
"command": "connect",
"username": "thoth",
"password": "testpassword"
}
)
|
Add very very basic CLI test app.
|
Add very very basic CLI test app.
|
Python
|
bsd-2-clause
|
davidgiven/gruntle,davidgiven/gruntle
|
Add very very basic CLI test app.
|
# coding=UTF-8
#
# thickishstring server
# Copyright © 2013 David Given
#
# This software is redistributable under the terms of the Simplified BSD
# open source license. Please see the COPYING file in the distribution for
# the full text.
from ws4py.client.threadedclient import WebSocketClient
import anyjson as json
socket = WebSocketClient("ws://localhost:8086")
print("connecting")
socket.connect()
def sendMsg(msg):
j = json.serialize(msg)
socket.send(j)
def recvMsg():
j = socket.receive()
return json.deserialize(j)
print("authenticating")
sendMsg(
{
"command": "connect",
"username": "thoth",
"password": "testpassword"
}
)
|
<commit_before><commit_msg>Add very very basic CLI test app.<commit_after>
|
# coding=UTF-8
#
# thickishstring server
# Copyright © 2013 David Given
#
# This software is redistributable under the terms of the Simplified BSD
# open source license. Please see the COPYING file in the distribution for
# the full text.
from ws4py.client.threadedclient import WebSocketClient
import anyjson as json
socket = WebSocketClient("ws://localhost:8086")
print("connecting")
socket.connect()
def sendMsg(msg):
j = json.serialize(msg)
socket.send(j)
def recvMsg():
j = socket.receive()
return json.deserialize(j)
print("authenticating")
sendMsg(
{
"command": "connect",
"username": "thoth",
"password": "testpassword"
}
)
|
Add very very basic CLI test app.# coding=UTF-8
#
# thickishstring server
# Copyright © 2013 David Given
#
# This software is redistributable under the terms of the Simplified BSD
# open source license. Please see the COPYING file in the distribution for
# the full text.
from ws4py.client.threadedclient import WebSocketClient
import anyjson as json
socket = WebSocketClient("ws://localhost:8086")
print("connecting")
socket.connect()
def sendMsg(msg):
j = json.serialize(msg)
socket.send(j)
def recvMsg():
j = socket.receive()
return json.deserialize(j)
print("authenticating")
sendMsg(
{
"command": "connect",
"username": "thoth",
"password": "testpassword"
}
)
|
<commit_before><commit_msg>Add very very basic CLI test app.<commit_after># coding=UTF-8
#
# thickishstring server
# Copyright © 2013 David Given
#
# This software is redistributable under the terms of the Simplified BSD
# open source license. Please see the COPYING file in the distribution for
# the full text.
from ws4py.client.threadedclient import WebSocketClient
import anyjson as json
socket = WebSocketClient("ws://localhost:8086")
print("connecting")
socket.connect()
def sendMsg(msg):
j = json.serialize(msg)
socket.send(j)
def recvMsg():
j = socket.receive()
return json.deserialize(j)
print("authenticating")
sendMsg(
{
"command": "connect",
"username": "thoth",
"password": "testpassword"
}
)
|
|
416a12561ffb65705d80a62de22ddeac3c46d8ec
|
devicecloud/test/test_conditions.py
|
devicecloud/test/test_conditions.py
|
import unittest
import datetime
from devicecloud.conditions import Attribute
class TestConditions(unittest.TestCase):
def test_gt(self):
a = Attribute("a")
self.assertEqual((a > 21).compile(), "a>'21'")
def test_lt(self):
a = Attribute("a")
self.assertEqual((a < 25).compile(), "a<'25'")
def test_eq(self):
a = Attribute("a")
self.assertEqual((a == "a string").compile(), "a='a%20string'")
def test_like(self):
a = Attribute("a")
self.assertEqual(a.like(r"%.txt").compile(), "a like '%25.txt'")
def test_and(self):
a = Attribute("a")
b = Attribute("b")
expr = (a > 21) & (b == "Amsterdam")
self.assertEqual(expr.compile(), "a>'21' and b='Amsterdam'")
def test_or(self):
a = Attribute("a")
b = Attribute("b")
expr = (a.like("%.csv")) | (b < 1024)
self.assertEqual(expr.compile(), "a like '%25.csv' or b<'1024'")
def test_datacmp(self):
a = Attribute("a")
self.assertEqual((a < datetime.datetime(2014, 7, 7)).compile(),
"a<'2014-07-07T00%3A00%3A00'")
def test_multi_combination(self):
a = Attribute("a")
self.assertEqual(((a > 1) & (a > 2) & (a > 3)).compile(),
"a>'1' and a>'2' and a>'3'")
if __name__ == '__main__':
unittest.main()
|
Add test coverage for conditions logic
|
PYTHONDC-6: Add test coverage for conditions logic
|
Python
|
mpl-2.0
|
michaelcho/python-devicecloud,ctrlaltdel/python-devicecloud,ctrlaltdel/python-devicecloud,michaelcho/python-devicecloud,brucetsao/python-devicecloud,brucetsao/python-devicecloud,digidotcom/python-devicecloud,digidotcom/python-devicecloud
|
PYTHONDC-6: Add test coverage for conditions logic
|
import unittest
import datetime
from devicecloud.conditions import Attribute
class TestConditions(unittest.TestCase):
def test_gt(self):
a = Attribute("a")
self.assertEqual((a > 21).compile(), "a>'21'")
def test_lt(self):
a = Attribute("a")
self.assertEqual((a < 25).compile(), "a<'25'")
def test_eq(self):
a = Attribute("a")
self.assertEqual((a == "a string").compile(), "a='a%20string'")
def test_like(self):
a = Attribute("a")
self.assertEqual(a.like(r"%.txt").compile(), "a like '%25.txt'")
def test_and(self):
a = Attribute("a")
b = Attribute("b")
expr = (a > 21) & (b == "Amsterdam")
self.assertEqual(expr.compile(), "a>'21' and b='Amsterdam'")
def test_or(self):
a = Attribute("a")
b = Attribute("b")
expr = (a.like("%.csv")) | (b < 1024)
self.assertEqual(expr.compile(), "a like '%25.csv' or b<'1024'")
def test_datacmp(self):
a = Attribute("a")
self.assertEqual((a < datetime.datetime(2014, 7, 7)).compile(),
"a<'2014-07-07T00%3A00%3A00'")
def test_multi_combination(self):
a = Attribute("a")
self.assertEqual(((a > 1) & (a > 2) & (a > 3)).compile(),
"a>'1' and a>'2' and a>'3'")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>PYTHONDC-6: Add test coverage for conditions logic<commit_after>
|
import unittest
import datetime
from devicecloud.conditions import Attribute
class TestConditions(unittest.TestCase):
def test_gt(self):
a = Attribute("a")
self.assertEqual((a > 21).compile(), "a>'21'")
def test_lt(self):
a = Attribute("a")
self.assertEqual((a < 25).compile(), "a<'25'")
def test_eq(self):
a = Attribute("a")
self.assertEqual((a == "a string").compile(), "a='a%20string'")
def test_like(self):
a = Attribute("a")
self.assertEqual(a.like(r"%.txt").compile(), "a like '%25.txt'")
def test_and(self):
a = Attribute("a")
b = Attribute("b")
expr = (a > 21) & (b == "Amsterdam")
self.assertEqual(expr.compile(), "a>'21' and b='Amsterdam'")
def test_or(self):
a = Attribute("a")
b = Attribute("b")
expr = (a.like("%.csv")) | (b < 1024)
self.assertEqual(expr.compile(), "a like '%25.csv' or b<'1024'")
def test_datacmp(self):
a = Attribute("a")
self.assertEqual((a < datetime.datetime(2014, 7, 7)).compile(),
"a<'2014-07-07T00%3A00%3A00'")
def test_multi_combination(self):
a = Attribute("a")
self.assertEqual(((a > 1) & (a > 2) & (a > 3)).compile(),
"a>'1' and a>'2' and a>'3'")
if __name__ == '__main__':
unittest.main()
|
PYTHONDC-6: Add test coverage for conditions logicimport unittest
import datetime
from devicecloud.conditions import Attribute
class TestConditions(unittest.TestCase):
def test_gt(self):
a = Attribute("a")
self.assertEqual((a > 21).compile(), "a>'21'")
def test_lt(self):
a = Attribute("a")
self.assertEqual((a < 25).compile(), "a<'25'")
def test_eq(self):
a = Attribute("a")
self.assertEqual((a == "a string").compile(), "a='a%20string'")
def test_like(self):
a = Attribute("a")
self.assertEqual(a.like(r"%.txt").compile(), "a like '%25.txt'")
def test_and(self):
a = Attribute("a")
b = Attribute("b")
expr = (a > 21) & (b == "Amsterdam")
self.assertEqual(expr.compile(), "a>'21' and b='Amsterdam'")
def test_or(self):
a = Attribute("a")
b = Attribute("b")
expr = (a.like("%.csv")) | (b < 1024)
self.assertEqual(expr.compile(), "a like '%25.csv' or b<'1024'")
def test_datacmp(self):
a = Attribute("a")
self.assertEqual((a < datetime.datetime(2014, 7, 7)).compile(),
"a<'2014-07-07T00%3A00%3A00'")
def test_multi_combination(self):
a = Attribute("a")
self.assertEqual(((a > 1) & (a > 2) & (a > 3)).compile(),
"a>'1' and a>'2' and a>'3'")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>PYTHONDC-6: Add test coverage for conditions logic<commit_after>import unittest
import datetime
from devicecloud.conditions import Attribute
class TestConditions(unittest.TestCase):
def test_gt(self):
a = Attribute("a")
self.assertEqual((a > 21).compile(), "a>'21'")
def test_lt(self):
a = Attribute("a")
self.assertEqual((a < 25).compile(), "a<'25'")
def test_eq(self):
a = Attribute("a")
self.assertEqual((a == "a string").compile(), "a='a%20string'")
def test_like(self):
a = Attribute("a")
self.assertEqual(a.like(r"%.txt").compile(), "a like '%25.txt'")
def test_and(self):
a = Attribute("a")
b = Attribute("b")
expr = (a > 21) & (b == "Amsterdam")
self.assertEqual(expr.compile(), "a>'21' and b='Amsterdam'")
def test_or(self):
a = Attribute("a")
b = Attribute("b")
expr = (a.like("%.csv")) | (b < 1024)
self.assertEqual(expr.compile(), "a like '%25.csv' or b<'1024'")
def test_datacmp(self):
a = Attribute("a")
self.assertEqual((a < datetime.datetime(2014, 7, 7)).compile(),
"a<'2014-07-07T00%3A00%3A00'")
def test_multi_combination(self):
a = Attribute("a")
self.assertEqual(((a > 1) & (a > 2) & (a > 3)).compile(),
"a>'1' and a>'2' and a>'3'")
if __name__ == '__main__':
unittest.main()
|
|
ce799933ef7a15bfb70f9ab681f7ba47270cbed8
|
docs/src/examples/over_available.py
|
docs/src/examples/over_available.py
|
from scikits.audiolab import available_file_formats, available_encodings
for format in available_file_formats():
print "File format %s is supported; available encodings are:" % format
for enc in available_encodings(format):
print "\t%s" % enc
print ""
|
Add example of usage for available_* funcs.
|
Add example of usage for available_* funcs.
|
Python
|
lgpl-2.1
|
cournape/audiolab,cournape/audiolab,cournape/audiolab
|
Add example of usage for available_* funcs.
|
from scikits.audiolab import available_file_formats, available_encodings
for format in available_file_formats():
print "File format %s is supported; available encodings are:" % format
for enc in available_encodings(format):
print "\t%s" % enc
print ""
|
<commit_before><commit_msg>Add example of usage for available_* funcs.<commit_after>
|
from scikits.audiolab import available_file_formats, available_encodings
for format in available_file_formats():
print "File format %s is supported; available encodings are:" % format
for enc in available_encodings(format):
print "\t%s" % enc
print ""
|
Add example of usage for available_* funcs.from scikits.audiolab import available_file_formats, available_encodings
for format in available_file_formats():
print "File format %s is supported; available encodings are:" % format
for enc in available_encodings(format):
print "\t%s" % enc
print ""
|
<commit_before><commit_msg>Add example of usage for available_* funcs.<commit_after>from scikits.audiolab import available_file_formats, available_encodings
for format in available_file_formats():
print "File format %s is supported; available encodings are:" % format
for enc in available_encodings(format):
print "\t%s" % enc
print ""
|
|
0e0105d6e5a5583432d39d019569483bf28dc860
|
imageio/plugins/feisem.py
|
imageio/plugins/feisem.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
from __future__ import absolute_import, unicode_literals
from .tifffile import TiffFormat
class FEISEMFormat(TiffFormat):
"""Provide read support for TIFFs produced by an FEI SEM microscope."""
class Reader(TiffFormat.Reader):
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError(
'Input file %s contains no FEI metadata.' % filename)
self._meta.update(md)
return md
|
Add FEI-SEM plugin based on TIFF
|
Add FEI-SEM plugin based on TIFF
|
Python
|
bsd-2-clause
|
imageio/imageio
|
Add FEI-SEM plugin based on TIFF
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
from __future__ import absolute_import, unicode_literals
from .tifffile import TiffFormat
class FEISEMFormat(TiffFormat):
"""Provide read support for TIFFs produced by an FEI SEM microscope."""
class Reader(TiffFormat.Reader):
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError(
'Input file %s contains no FEI metadata.' % filename)
self._meta.update(md)
return md
|
<commit_before><commit_msg>Add FEI-SEM plugin based on TIFF<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
from __future__ import absolute_import, unicode_literals
from .tifffile import TiffFormat
class FEISEMFormat(TiffFormat):
"""Provide read support for TIFFs produced by an FEI SEM microscope."""
class Reader(TiffFormat.Reader):
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError(
'Input file %s contains no FEI metadata.' % filename)
self._meta.update(md)
return md
|
Add FEI-SEM plugin based on TIFF# -*- coding: utf-8 -*-
# Copyright (c) 2016, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
from __future__ import absolute_import, unicode_literals
from .tifffile import TiffFormat
class FEISEMFormat(TiffFormat):
"""Provide read support for TIFFs produced by an FEI SEM microscope."""
class Reader(TiffFormat.Reader):
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError(
'Input file %s contains no FEI metadata.' % filename)
self._meta.update(md)
return md
|
<commit_before><commit_msg>Add FEI-SEM plugin based on TIFF<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2016, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
from __future__ import absolute_import, unicode_literals
from .tifffile import TiffFormat
class FEISEMFormat(TiffFormat):
"""Provide read support for TIFFs produced by an FEI SEM microscope."""
class Reader(TiffFormat.Reader):
def _get_meta_data(self, index=None):
"""Read the metadata from an FEI SEM TIFF.
This metadata is included as ASCII text at the end of the file.
The index, if provided, is ignored.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
filename = self.request.get_local_filename()
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError(
'Input file %s contains no FEI metadata.' % filename)
self._meta.update(md)
return md
|
|
69c449b0bd90f59e578e35ab9475b54ce6c8f0ce
|
backend/breach/tests/test_sniffer.py
|
backend/breach/tests/test_sniffer.py
|
from mock import patch
from django.test import TestCase
from breach.sniffer import Sniffer
class SnifferTest(TestCase):
def setUp(self):
self.endpoint = 'http://localhost'
self.sniffer = Sniffer(self.endpoint)
self.source_ip = '147.102.239.229'
self.destination_host = 'dionyziz.com'
@patch('breach.sniffer.requests')
def test_sniffer_start(self, requests):
self.sniffer.start(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
@patch('breach.sniffer.requests')
def test_sniffer_read(self, requests):
self.sniffer.read(self.source_ip, self.destination_host)
self.assertTrue(requests.get.called)
@patch('breach.sniffer.requests')
def test_sniffer_stop(self, requests):
self.sniffer.stop(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
|
Add basic sniffer client test
|
Add basic sniffer client test
|
Python
|
mit
|
dionyziz/rupture,dionyziz/rupture,esarafianou/rupture,dimkarakostas/rupture,dimriou/rupture,dimriou/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture
|
Add basic sniffer client test
|
from mock import patch
from django.test import TestCase
from breach.sniffer import Sniffer
class SnifferTest(TestCase):
def setUp(self):
self.endpoint = 'http://localhost'
self.sniffer = Sniffer(self.endpoint)
self.source_ip = '147.102.239.229'
self.destination_host = 'dionyziz.com'
@patch('breach.sniffer.requests')
def test_sniffer_start(self, requests):
self.sniffer.start(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
@patch('breach.sniffer.requests')
def test_sniffer_read(self, requests):
self.sniffer.read(self.source_ip, self.destination_host)
self.assertTrue(requests.get.called)
@patch('breach.sniffer.requests')
def test_sniffer_stop(self, requests):
self.sniffer.stop(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
|
<commit_before><commit_msg>Add basic sniffer client test<commit_after>
|
from mock import patch
from django.test import TestCase
from breach.sniffer import Sniffer
class SnifferTest(TestCase):
def setUp(self):
self.endpoint = 'http://localhost'
self.sniffer = Sniffer(self.endpoint)
self.source_ip = '147.102.239.229'
self.destination_host = 'dionyziz.com'
@patch('breach.sniffer.requests')
def test_sniffer_start(self, requests):
self.sniffer.start(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
@patch('breach.sniffer.requests')
def test_sniffer_read(self, requests):
self.sniffer.read(self.source_ip, self.destination_host)
self.assertTrue(requests.get.called)
@patch('breach.sniffer.requests')
def test_sniffer_stop(self, requests):
self.sniffer.stop(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
|
Add basic sniffer client testfrom mock import patch
from django.test import TestCase
from breach.sniffer import Sniffer
class SnifferTest(TestCase):
def setUp(self):
self.endpoint = 'http://localhost'
self.sniffer = Sniffer(self.endpoint)
self.source_ip = '147.102.239.229'
self.destination_host = 'dionyziz.com'
@patch('breach.sniffer.requests')
def test_sniffer_start(self, requests):
self.sniffer.start(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
@patch('breach.sniffer.requests')
def test_sniffer_read(self, requests):
self.sniffer.read(self.source_ip, self.destination_host)
self.assertTrue(requests.get.called)
@patch('breach.sniffer.requests')
def test_sniffer_stop(self, requests):
self.sniffer.stop(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
|
<commit_before><commit_msg>Add basic sniffer client test<commit_after>from mock import patch
from django.test import TestCase
from breach.sniffer import Sniffer
class SnifferTest(TestCase):
def setUp(self):
self.endpoint = 'http://localhost'
self.sniffer = Sniffer(self.endpoint)
self.source_ip = '147.102.239.229'
self.destination_host = 'dionyziz.com'
@patch('breach.sniffer.requests')
def test_sniffer_start(self, requests):
self.sniffer.start(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
@patch('breach.sniffer.requests')
def test_sniffer_read(self, requests):
self.sniffer.read(self.source_ip, self.destination_host)
self.assertTrue(requests.get.called)
@patch('breach.sniffer.requests')
def test_sniffer_stop(self, requests):
self.sniffer.stop(self.source_ip, self.destination_host)
self.assertTrue(requests.post.called)
|
|
be75470198065f6d66f179e52d96908f11275222
|
tests/rules_tests/grammarManipulation_tests/__init__.py
|
tests/rules_tests/grammarManipulation_tests/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Add directory for tests responsible rule - grammar manipulations
|
Add directory for tests responsible rule - grammar manipulations
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add directory for tests responsible rule - grammar manipulations
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add directory for tests responsible rule - grammar manipulations<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Add directory for tests responsible rule - grammar manipulations#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add directory for tests responsible rule - grammar manipulations<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
|
fba553b19585c26ba6eed73c71519b882a57add5
|
test_hash.py
|
test_hash.py
|
from hash import HashTable
import io
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_hash():
t = HashTable()
t.set('coffee', 'coffee')
assert t.get('coffee') == 'coffee'
def test_duplicate_hash_val():
t = HashTable()
t.set('bob', 'bob')
t.set('obb', 'obb')
assert t.get('bob') == 'bob'
assert t.get('obb') == 'obb'
def test_word_file():
t = HashTable()
for word in words:
t.set(word, word)
assert t.get(words[654]) == words[654]
assert t.get(words[3541]) == words[3541]
assert t.get(words[6541]) == words[6541]
|
Add tests for hash table
|
Add tests for hash table
|
Python
|
mit
|
nbeck90/data_structures_2
|
Add tests for hash table
|
from hash import HashTable
import io
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_hash():
t = HashTable()
t.set('coffee', 'coffee')
assert t.get('coffee') == 'coffee'
def test_duplicate_hash_val():
t = HashTable()
t.set('bob', 'bob')
t.set('obb', 'obb')
assert t.get('bob') == 'bob'
assert t.get('obb') == 'obb'
def test_word_file():
t = HashTable()
for word in words:
t.set(word, word)
assert t.get(words[654]) == words[654]
assert t.get(words[3541]) == words[3541]
assert t.get(words[6541]) == words[6541]
|
<commit_before><commit_msg>Add tests for hash table<commit_after>
|
from hash import HashTable
import io
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_hash():
t = HashTable()
t.set('coffee', 'coffee')
assert t.get('coffee') == 'coffee'
def test_duplicate_hash_val():
t = HashTable()
t.set('bob', 'bob')
t.set('obb', 'obb')
assert t.get('bob') == 'bob'
assert t.get('obb') == 'obb'
def test_word_file():
t = HashTable()
for word in words:
t.set(word, word)
assert t.get(words[654]) == words[654]
assert t.get(words[3541]) == words[3541]
assert t.get(words[6541]) == words[6541]
|
Add tests for hash tablefrom hash import HashTable
import io
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_hash():
t = HashTable()
t.set('coffee', 'coffee')
assert t.get('coffee') == 'coffee'
def test_duplicate_hash_val():
t = HashTable()
t.set('bob', 'bob')
t.set('obb', 'obb')
assert t.get('bob') == 'bob'
assert t.get('obb') == 'obb'
def test_word_file():
t = HashTable()
for word in words:
t.set(word, word)
assert t.get(words[654]) == words[654]
assert t.get(words[3541]) == words[3541]
assert t.get(words[6541]) == words[6541]
|
<commit_before><commit_msg>Add tests for hash table<commit_after>from hash import HashTable
import io
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_hash():
t = HashTable()
t.set('coffee', 'coffee')
assert t.get('coffee') == 'coffee'
def test_duplicate_hash_val():
t = HashTable()
t.set('bob', 'bob')
t.set('obb', 'obb')
assert t.get('bob') == 'bob'
assert t.get('obb') == 'obb'
def test_word_file():
t = HashTable()
for word in words:
t.set(word, word)
assert t.get(words[654]) == words[654]
assert t.get(words[3541]) == words[3541]
assert t.get(words[6541]) == words[6541]
|
|
678dd502ef3d8c044c2915ed6a55bb10857f653a
|
zephyr/management/commands/profile_request.py
|
zephyr/management/commands/profile_request.py
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zephyr.models import get_user_profile_by_email, UserMessage
from zephyr.views import get_old_messages_backend
import cProfile
import time
import logging
from zephyr.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
|
Add command-line tool to profile get_old_messages requests.
|
Add command-line tool to profile get_old_messages requests.
(imported from commit bd7fc27b0c6fc1ae4f82bb74763736f9163b90bf)
|
Python
|
apache-2.0
|
LeeRisk/zulip,mansilladev/zulip,guiquanz/zulip,swinghu/zulip,stamhe/zulip,adnanh/zulip,mahim97/zulip,tbutter/zulip,johnny9/zulip,isht3/zulip,so0k/zulip,dxq-git/zulip,mdavid/zulip,samatdav/zulip,guiquanz/zulip,wangdeshui/zulip,Batterfii/zulip,dhcrzf/zulip,mohsenSy/zulip,dattatreya303/zulip,zulip/zulip,bowlofstew/zulip,calvinleenyc/zulip,zhaoweigg/zulip,babbage/zulip,RobotCaleb/zulip,Suninus/zulip,punchagan/zulip,j831/zulip,atomic-labs/zulip,nicholasbs/zulip,jerryge/zulip,m1ssou/zulip,jeffcao/zulip,levixie/zulip,Cheppers/zulip,hayderimran7/zulip,dattatreya303/zulip,amanharitsh123/zulip,JPJPJPOPOP/zulip,jphilipsen05/zulip,punchagan/zulip,tiansiyuan/zulip,MayB/zulip,yuvipanda/zulip,hafeez3000/zulip,brockwhittaker/zulip,dawran6/zulip,vakila/zulip,kaiyuanheshang/zulip,dwrpayne/zulip,jrowan/zulip,verma-varsha/zulip,Batterfii/zulip,stamhe/zulip,SmartPeople/zulip,KingxBanana/zulip,qq1012803704/zulip,rht/zulip,hafeez3000/zulip,samatdav/zulip,dawran6/zulip,blaze225/zulip,KingxBanana/zulip,timabbott/zulip,stamhe/zulip,LeeRisk/zulip,zachallaun/zulip,willingc/zulip,he15his/zulip,dnmfarrell/zulip,hayderimran7/zulip,seapasulli/zulip,zachallaun/zulip,zofuthan/zulip,natanovia/zulip,sharmaeklavya2/zulip,wdaher/zulip,jphilipsen05/zulip,akuseru/zulip,ahmadassaf/zulip,dxq-git/zulip,proliming/zulip,bitemyapp/zulip,Frouk/zulip,DazWorrall/zulip,atomic-labs/zulip,kou/zulip,littledogboy/zulip,dwrpayne/zulip,susansls/zulip,AZtheAsian/zulip,tdr130/zulip,jphilipsen05/zulip,Batterfii/zulip,aliceriot/zulip,AZtheAsian/zulip,brockwhittaker/zulip,ericzhou2008/zulip,udxxabp/zulip,ipernet/zulip,m1ssou/zulip,andersk/zulip,hackerkid/zulip,JanzTam/zulip,gigawhitlocks/zulip,Drooids/zulip,praveenaki/zulip,itnihao/zulip,zwily/zulip,xuanhan863/zulip,Qgap/zulip,swinghu/zulip,TigorC/zulip,bluesea/zulip,xuxiao/zulip,jessedhillon/zulip,zwily/zulip,jonesgithub/zulip,glovebx/zulip,babbage/zulip,PhilSk/zulip,jonesgithub/zulip,akuseru/zulip,ryanbackman/zulip,andersk/zulip,arpith/zulip,eastlhu/zulip,cosmicAsymmetry/zulip,susansls/zulip,sonali0901/zulip,gigawhitlocks/zulip,bitemyapp/zulip,showell/zulip,yocome/zulip,Drooids/zulip,hustlzp/zulip,ahmadassaf/zulip,LAndreas/zulip,adnanh/zulip,bssrdf/zulip,kaiyuanheshang/zulip,deer-hope/zulip,JanzTam/zulip,bitemyapp/zulip,cosmicAsymmetry/zulip,wavelets/zulip,sharmaeklavya2/zulip,EasonYi/zulip,LAndreas/zulip,moria/zulip,alliejones/zulip,rishig/zulip,synicalsyntax/zulip,Juanvulcano/zulip,Cheppers/zulip,huangkebo/zulip,jackrzhang/zulip,zwily/zulip,kokoar/zulip,eastlhu/zulip,huangkebo/zulip,codeKonami/zulip,karamcnair/zulip,jeffcao/zulip,ApsOps/zulip,j831/zulip,he15his/zulip,brainwane/zulip,jimmy54/zulip,hustlzp/zulip,dwrpayne/zulip,avastu/zulip,Drooids/zulip,blaze225/zulip,zofuthan/zulip,tbutter/zulip,so0k/zulip,Suninus/zulip,KingxBanana/zulip,dhcrzf/zulip,sonali0901/zulip,jphilipsen05/zulip,aakash-cr7/zulip,qq1012803704/zulip,mohsenSy/zulip,jerryge/zulip,Juanvulcano/zulip,mdavid/zulip,ipernet/zulip,bssrdf/zulip,MariaFaBella85/zulip,reyha/zulip,karamcnair/zulip,Qgap/zulip,voidException/zulip,dhcrzf/zulip,JanzTam/zulip,EasonYi/zulip,noroot/zulip,sup95/zulip,armooo/zulip,RobotCaleb/zulip,brockwhittaker/zulip,bastianh/zulip,DazWorrall/zulip,vabs22/zulip,umkay/zulip,zwily/zulip,EasonYi/zulip,saitodisse/zulip,aps-sids/zulip,easyfmxu/zulip,itnihao/zulip,kaiyuanheshang/zulip,krtkmj/zulip,mansilladev/zulip,xuxiao/zulip,itnihao/zulip,LAndreas/zulip,arpitpanwar/zulip,jrowan/zulip,hackerkid/zulip,hj3938/zulip,moria/zulip,tiansiyuan/zulip,easyfmxu/zulip,ryanbackman/zulip,technicalpickles/zulip,yocome/zulip,luyifan/zulip,brockwhittaker/zulip,qq1012803704/zulip,amanharitsh123/zulip,hayderimran7/zulip,jrowan/zulip,Cheppers/zulip,niftynei/zulip,sonali0901/zulip,shubhamdhama/zulip,johnnygaddarr/zulip,hackerkid/zulip,hustlzp/zulip,yuvipanda/zulip,aliceriot/zulip,fw1121/zulip,souravbadami/zulip,PaulPetring/zulip,niftynei/zulip,akuseru/zulip,jessedhillon/zulip,lfranchi/zulip,cosmicAsymmetry/zulip,proliming/zulip,tiansiyuan/zulip,ikasumiwt/zulip,niftynei/zulip,verma-varsha/zulip,stamhe/zulip,pradiptad/zulip,udxxabp/zulip,eastlhu/zulip,developerfm/zulip,jrowan/zulip,bastianh/zulip,jeffcao/zulip,tdr130/zulip,dotcool/zulip,ahmadassaf/zulip,brockwhittaker/zulip,bluesea/zulip,jimmy54/zulip,timabbott/zulip,technicalpickles/zulip,developerfm/zulip,tbutter/zulip,LAndreas/zulip,krtkmj/zulip,yocome/zulip,willingc/zulip,moria/zulip,avastu/zulip,joyhchen/zulip,zofuthan/zulip,Qgap/zulip,TigorC/zulip,willingc/zulip,jessedhillon/zulip,TigorC/zulip,bluesea/zulip,vakila/zulip,xuanhan863/zulip,isht3/zulip,seapasulli/zulip,andersk/zulip,krtkmj/zulip,christi3k/zulip,themass/zulip,proliming/zulip,kaiyuanheshang/zulip,Batterfii/zulip,easyfmxu/zulip,SmartPeople/zulip,themass/zulip,schatt/zulip,mansilladev/zulip,wavelets/zulip,souravbadami/zulip,joyhchen/zulip,bssrdf/zulip,avastu/zulip,bowlofstew/zulip,akuseru/zulip,dawran6/zulip,mansilladev/zulip,karamcnair/zulip,voidException/zulip,cosmicAsymmetry/zulip,zofuthan/zulip,johnny9/zulip,MariaFaBella85/zulip,ApsOps/zulip,ryansnowboarder/zulip,gigawhitlocks/zulip,hayderimran7/zulip,luyifan/zulip,willingc/zulip,sonali0901/zulip,levixie/zulip,joyhchen/zulip,dotcool/zulip,hj3938/zulip,ericzhou2008/zulip,ApsOps/zulip,Galexrt/zulip,shubhamdhama/zulip,Suninus/zulip,schatt/zulip,zorojean/zulip,MayB/zulip,sharmaeklavya2/zulip,ahmadassaf/zulip,Jianchun1/zulip,vakila/zulip,dotcool/zulip,ericzhou2008/zulip,j831/zulip,amallia/zulip,hengqujushi/zulip,mdavid/zulip,susansls/zulip,moria/zulip,calvinleenyc/zulip,dnmfarrell/zulip,glovebx/zulip,showell/zulip,guiquanz/zulip,suxinde2009/zulip,zorojean/zulip,MayB/zulip,EasonYi/zulip,bastianh/zulip,mdavid/zulip,littledogboy/zulip,wdaher/zulip,RobotCaleb/zulip,gigawhitlocks/zulip,saitodisse/zulip,calvinleenyc/zulip,Jianchun1/zulip,levixie/zulip,Cheppers/zulip,amyliu345/zulip,qq1012803704/zulip,jerryge/zulip,jphilipsen05/zulip,dxq-git/zulip,avastu/zulip,dhcrzf/zulip,shrikrishnaholla/zulip,calvinleenyc/zulip,bowlofstew/zulip,umkay/zulip,DazWorrall/zulip,Cheppers/zulip,peiwei/zulip,saitodisse/zulip,technicalpickles/zulip,wweiradio/zulip,easyfmxu/zulip,dnmfarrell/zulip,bssrdf/zulip,lfranchi/zulip,timabbott/zulip,ryanbackman/zulip,alliejones/zulip,blaze225/zulip,luyifan/zulip,thomasboyt/zulip,calvinleenyc/zulip,kaiyuanheshang/zulip,amallia/zulip,Frouk/zulip,amanharitsh123/zulip,mdavid/zulip,johnnygaddarr/zulip,esander91/zulip,hj3938/zulip,blaze225/zulip,wdaher/zulip,suxinde2009/zulip,gkotian/zulip,ryanbackman/zulip,ApsOps/zulip,wangdeshui/zulip,huangkebo/zulip,bowlofstew/zulip,akuseru/zulip,jainayush975/zulip,Diptanshu8/zulip,vakila/zulip,bluesea/zulip,christi3k/zulip,tommyip/zulip,aakash-cr7/zulip,xuanhan863/zulip,JPJPJPOPOP/zulip,grave-w-grave/zulip,esander91/zulip,zachallaun/zulip,shrikrishnaholla/zulip,j831/zulip,grave-w-grave/zulip,esander91/zulip,glovebx/zulip,isht3/zulip,EasonYi/zulip,zulip/zulip,JPJPJPOPOP/zulip,KJin99/zulip,babbage/zulip,ryanbackman/zulip,dotcool/zulip,susansls/zulip,littledogboy/zulip,shaunstanislaus/zulip,Galexrt/zulip,ryansnowboarder/zulip,alliejones/zulip,alliejones/zulip,proliming/zulip,esander91/zulip,PaulPetring/zulip,DazWorrall/zulip,vabs22/zulip,nicholasbs/zulip,wavelets/zulip,schatt/zulip,kokoar/zulip,brainwane/zulip,sharmaeklavya2/zulip,dxq-git/zulip,praveenaki/zulip,he15his/zulip,atomic-labs/zulip,blaze225/zulip,ApsOps/zulip,Vallher/zulip,showell/zulip,aps-sids/zulip,guiquanz/zulip,ufosky-server/zulip,gkotian/zulip,Vallher/zulip,SmartPeople/zulip,joshisa/zulip,bastianh/zulip,hj3938/zulip,eastlhu/zulip,shrikrishnaholla/zulip,dotcool/zulip,gkotian/zulip,paxapy/zulip,dhcrzf/zulip,peguin40/zulip,Juanvulcano/zulip,grave-w-grave/zulip,mahim97/zulip,sup95/zulip,rishig/zulip,deer-hope/zulip,itnihao/zulip,kou/zulip,showell/zulip,so0k/zulip,umkay/zulip,LeeRisk/zulip,souravbadami/zulip,wweiradio/zulip,mahim97/zulip,ashwinirudrappa/zulip,noroot/zulip,rishig/zulip,mansilladev/zulip,themass/zulip,LeeRisk/zulip,KJin99/zulip,bitemyapp/zulip,zacps/zulip,arpith/zulip,jimmy54/zulip,jrowan/zulip,jerryge/zulip,tiansiyuan/zulip,shaunstanislaus/zulip,brainwane/zulip,ikasumiwt/zulip,ufosky-server/zulip,swinghu/zulip,PaulPetring/zulip,pradiptad/zulip,glovebx/zulip,Gabriel0402/zulip,dxq-git/zulip,lfranchi/zulip,fw1121/zulip,xuxiao/zulip,Vallher/zulip,amanharitsh123/zulip,luyifan/zulip,itnihao/zulip,karamcnair/zulip,Batterfii/zulip,littledogboy/zulip,arpith/zulip,sup95/zulip,swinghu/zulip,tdr130/zulip,moria/zulip,samatdav/zulip,Gabriel0402/zulip,grave-w-grave/zulip,kaiyuanheshang/zulip,dhcrzf/zulip,he15his/zulip,wweiradio/zulip,m1ssou/zulip,KJin99/zulip,MariaFaBella85/zulip,hackerkid/zulip,timabbott/zulip,armooo/zulip,wangdeshui/zulip,voidException/zulip,eastlhu/zulip,proliming/zulip,peiwei/zulip,shubhamdhama/zulip,dattatreya303/zulip,eeshangarg/zulip,hafeez3000/zulip,pradiptad/zulip,showell/zulip,joshisa/zulip,armooo/zulip,MayB/zulip,EasonYi/zulip,shubhamdhama/zulip,aakash-cr7/zulip,armooo/zulip,shubhamdhama/zulip,mahim97/zulip,natanovia/zulip,SmartPeople/zulip,jackrzhang/zulip,wavelets/zulip,he15his/zulip,reyha/zulip,themass/zulip,zwily/zulip,lfranchi/zulip,jackrzhang/zulip,dxq-git/zulip,JanzTam/zulip,aps-sids/zulip,bowlofstew/zulip,praveenaki/zulip,natanovia/zulip,joshisa/zulip,wdaher/zulip,m1ssou/zulip,ryansnowboarder/zulip,firstblade/zulip,timabbott/zulip,jrowan/zulip,dwrpayne/zulip,mdavid/zulip,developerfm/zulip,eastlhu/zulip,dnmfarrell/zulip,atomic-labs/zulip,guiquanz/zulip,PhilSk/zulip,synicalsyntax/zulip,JanzTam/zulip,zofuthan/zulip,Diptanshu8/zulip,niftynei/zulip,johnny9/zulip,luyifan/zulip,moria/zulip,arpitpanwar/zulip,christi3k/zulip,vikas-parashar/zulip,zofuthan/zulip,Jianchun1/zulip,Juanvulcano/zulip,verma-varsha/zulip,tommyip/zulip,ashwinirudrappa/zulip,nicholasbs/zulip,souravbadami/zulip,zachallaun/zulip,sup95/zulip,johnnygaddarr/zulip,wdaher/zulip,noroot/zulip,levixie/zulip,ashwinirudrappa/zulip,tiansiyuan/zulip,arpith/zulip,wangdeshui/zulip,shrikrishnaholla/zulip,bitemyapp/zulip,noroot/zulip,fw1121/zulip,arpitpanwar/zulip,bitemyapp/zulip,themass/zulip,noroot/zulip,aps-sids/zulip,willingc/zulip,zhaoweigg/zulip,tdr130/zulip,alliejones/zulip,avastu/zulip,hustlzp/zulip,Qgap/zulip,huangkebo/zulip,rishig/zulip,tbutter/zulip,grave-w-grave/zulip,jonesgithub/zulip,armooo/zulip,cosmicAsymmetry/zulip,paxapy/zulip,hafeez3000/zulip,kokoar/zulip,zorojean/zulip,zulip/zulip,pradiptad/zulip,peguin40/zulip,joyhchen/zulip,jeffcao/zulip,zorojean/zulip,rishig/zulip,KJin99/zulip,jeffcao/zulip,LeeRisk/zulip,aliceriot/zulip,hackerkid/zulip,jackrzhang/zulip,synicalsyntax/zulip,ahmadassaf/zulip,dwrpayne/zulip,hj3938/zulip,AZtheAsian/zulip,thomasboyt/zulip,gigawhitlocks/zulip,blaze225/zulip,verma-varsha/zulip,synicalsyntax/zulip,udxxabp/zulip,zachallaun/zulip,xuanhan863/zulip,esander91/zulip,xuanhan863/zulip,LeeRisk/zulip,natanovia/zulip,ApsOps/zulip,vaidap/zulip,TigorC/zulip,rht/zulip,punchagan/zulip,amanharitsh123/zulip,peguin40/zulip,arpitpanwar/zulip,bastianh/zulip,alliejones/zulip,dawran6/zulip,schatt/zulip,Galexrt/zulip,levixie/zulip,arpitpanwar/zulip,dwrpayne/zulip,Qgap/zulip,Suninus/zulip,mahim97/zulip,krtkmj/zulip,PaulPetring/zulip,jainayush975/zulip,AZtheAsian/zulip,kokoar/zulip,PhilSk/zulip,shaunstanislaus/zulip,souravbadami/zulip,LAndreas/zulip,babbage/zulip,luyifan/zulip,wavelets/zulip,Gabriel0402/zulip,mansilladev/zulip,xuxiao/zulip,zulip/zulip,arpith/zulip,synicalsyntax/zulip,so0k/zulip,Frouk/zulip,fw1121/zulip,mohsenSy/zulip,RobotCaleb/zulip,AZtheAsian/zulip,glovebx/zulip,codeKonami/zulip,ahmadassaf/zulip,jainayush975/zulip,firstblade/zulip,peiwei/zulip,ryansnowboarder/zulip,babbage/zulip,rishig/zulip,ryanbackman/zulip,synicalsyntax/zulip,jonesgithub/zulip,KJin99/zulip,zorojean/zulip,jeffcao/zulip,avastu/zulip,codeKonami/zulip,littledogboy/zulip,paxapy/zulip,Drooids/zulip,Drooids/zulip,voidException/zulip,DazWorrall/zulip,sonali0901/zulip,suxinde2009/zulip,souravbadami/zulip,sharmaeklavya2/zulip,punchagan/zulip,aliceriot/zulip,umkay/zulip,joshisa/zulip,suxinde2009/zulip,firstblade/zulip,ericzhou2008/zulip,PaulPetring/zulip,huangkebo/zulip,Diptanshu8/zulip,swinghu/zulip,xuxiao/zulip,stamhe/zulip,RobotCaleb/zulip,levixie/zulip,ericzhou2008/zulip,technicalpickles/zulip,zhaoweigg/zulip,vaidap/zulip,hustlzp/zulip,Drooids/zulip,ipernet/zulip,dawran6/zulip,zorojean/zulip,peguin40/zulip,johnny9/zulip,ryansnowboarder/zulip,hayderimran7/zulip,Cheppers/zulip,PhilSk/zulip,punchagan/zulip,saitodisse/zulip,andersk/zulip,johnny9/zulip,Suninus/zulip,brainwane/zulip,codeKonami/zulip,ashwinirudrappa/zulip,hj3938/zulip,mohsenSy/zulip,synicalsyntax/zulip,LAndreas/zulip,christi3k/zulip,littledogboy/zulip,Jianchun1/zulip,peiwei/zulip,easyfmxu/zulip,proliming/zulip,jonesgithub/zulip,jessedhillon/zulip,bssrdf/zulip,umkay/zulip,Batterfii/zulip,zhaoweigg/zulip,kou/zulip,firstblade/zulip,susansls/zulip,joshisa/zulip,amyliu345/zulip,aliceriot/zulip,bitemyapp/zulip,JPJPJPOPOP/zulip,aliceriot/zulip,codeKonami/zulip,wangdeshui/zulip,grave-w-grave/zulip,niftynei/zulip,stamhe/zulip,dnmfarrell/zulip,isht3/zulip,j831/zulip,samatdav/zulip,isht3/zulip,samatdav/zulip,karamcnair/zulip,vakila/zulip,Galexrt/zulip,developerfm/zulip,bluesea/zulip,peiwei/zulip,niftynei/zulip,JanzTam/zulip,dxq-git/zulip,natanovia/zulip,Vallher/zulip,praveenaki/zulip,moria/zulip,amallia/zulip,ipernet/zulip,amallia/zulip,voidException/zulip,ikasumiwt/zulip,dattatreya303/zulip,ryansnowboarder/zulip,ashwinirudrappa/zulip,Drooids/zulip,seapasulli/zulip,shaunstanislaus/zulip,yuvipanda/zulip,sup95/zulip,ipernet/zulip,levixie/zulip,tommyip/zulip,akuseru/zulip,shaunstanislaus/zulip,reyha/zulip,kou/zulip,Galexrt/zulip,wdaher/zulip,AZtheAsian/zulip,PhilSk/zulip,nicholasbs/zulip,voidException/zulip,jessedhillon/zulip,lfranchi/zulip,dawran6/zulip,firstblade/zulip,willingc/zulip,bastianh/zulip,dwrpayne/zulip,aliceriot/zulip,krtkmj/zulip,bowlofstew/zulip,udxxabp/zulip,kokoar/zulip,aakash-cr7/zulip,johnny9/zulip,Qgap/zulip,PhilSk/zulip,PaulPetring/zulip,gigawhitlocks/zulip,wweiradio/zulip,aakash-cr7/zulip,mdavid/zulip,nicholasbs/zulip,LAndreas/zulip,luyifan/zulip,hafeez3000/zulip,thomasboyt/zulip,saitodisse/zulip,natanovia/zulip,noroot/zulip,seapasulli/zulip,shaunstanislaus/zulip,guiquanz/zulip,m1ssou/zulip,eeshangarg/zulip,so0k/zulip,kou/zulip,paxapy/zulip,susansls/zulip,KingxBanana/zulip,vikas-parashar/zulip,jeffcao/zulip,zorojean/zulip,babbage/zulip,ashwinirudrappa/zulip,fw1121/zulip,Frouk/zulip,bluesea/zulip,amyliu345/zulip,hengqujushi/zulip,arpith/zulip,pradiptad/zulip,easyfmxu/zulip,gigawhitlocks/zulip,kou/zulip,hengqujushi/zulip,armooo/zulip,seapasulli/zulip,Qgap/zulip,verma-varsha/zulip,wangdeshui/zulip,deer-hope/zulip,zacps/zulip,huangkebo/zulip,codeKonami/zulip,Gabriel0402/zulip,joyhchen/zulip,zachallaun/zulip,wweiradio/zulip,rht/zulip,ikasumiwt/zulip,itnihao/zulip,tdr130/zulip,karamcnair/zulip,eeshangarg/zulip,wangdeshui/zulip,jonesgithub/zulip,hustlzp/zulip,vabs22/zulip,ikasumiwt/zulip,thomasboyt/zulip,zacps/zulip,paxapy/zulip,codeKonami/zulip,PaulPetring/zulip,brainwane/zulip,jerryge/zulip,tommyip/zulip,jainayush975/zulip,seapasulli/zulip,adnanh/zulip,rht/zulip,jainayush975/zulip,johnny9/zulip,ipernet/zulip,hengqujushi/zulip,tiansiyuan/zulip,ahmadassaf/zulip,vikas-parashar/zulip,vaidap/zulip,hackerkid/zulip,tdr130/zulip,johnnygaddarr/zulip,ikasumiwt/zulip,eeshangarg/zulip,brainwane/zulip,zachallaun/zulip,easyfmxu/zulip,jimmy54/zulip,Vallher/zulip,zulip/zulip,qq1012803704/zulip,mohsenSy/zulip,shaunstanislaus/zulip,qq1012803704/zulip,jerryge/zulip,zofuthan/zulip,joyhchen/zulip,he15his/zulip,hackerkid/zulip,deer-hope/zulip,armooo/zulip,KingxBanana/zulip,amallia/zulip,andersk/zulip,SmartPeople/zulip,esander91/zulip,yuvipanda/zulip,jackrzhang/zulip,bssrdf/zulip,amallia/zulip,arpitpanwar/zulip,punchagan/zulip,Diptanshu8/zulip,so0k/zulip,technicalpickles/zulip,shrikrishnaholla/zulip,he15his/zulip,schatt/zulip,tbutter/zulip,itnihao/zulip,dattatreya303/zulip,qq1012803704/zulip,aps-sids/zulip,eeshangarg/zulip,Diptanshu8/zulip,praveenaki/zulip,sonali0901/zulip,nicholasbs/zulip,pradiptad/zulip,xuanhan863/zulip,zwily/zulip,brockwhittaker/zulip,bssrdf/zulip,KJin99/zulip,Suninus/zulip,showell/zulip,zhaoweigg/zulip,brainwane/zulip,eeshangarg/zulip,MayB/zulip,saitodisse/zulip,rht/zulip,peiwei/zulip,hayderimran7/zulip,yuvipanda/zulip,schatt/zulip,yocome/zulip,hengqujushi/zulip,dattatreya303/zulip,ApsOps/zulip,thomasboyt/zulip,Vallher/zulip,amallia/zulip,TigorC/zulip,developerfm/zulip,vikas-parashar/zulip,MariaFaBella85/zulip,tbutter/zulip,jerryge/zulip,andersk/zulip,hj3938/zulip,ufosky-server/zulip,vakila/zulip,alliejones/zulip,deer-hope/zulip,deer-hope/zulip,atomic-labs/zulip,MayB/zulip,ryansnowboarder/zulip,joshisa/zulip,ipernet/zulip,verma-varsha/zulip,mohsenSy/zulip,dotcool/zulip,technicalpickles/zulip,saitodisse/zulip,shubhamdhama/zulip,aps-sids/zulip,firstblade/zulip,zhaoweigg/zulip,swinghu/zulip,JPJPJPOPOP/zulip,avastu/zulip,yocome/zulip,Galexrt/zulip,technicalpickles/zulip,hengqujushi/zulip,johnnygaddarr/zulip,Gabriel0402/zulip,vabs22/zulip,Gabriel0402/zulip,mansilladev/zulip,reyha/zulip,MariaFaBella85/zulip,DazWorrall/zulip,vaidap/zulip,tommyip/zulip,DazWorrall/zulip,lfranchi/zulip,Jianchun1/zulip,TigorC/zulip,Suninus/zulip,Cheppers/zulip,udxxabp/zulip,Juanvulcano/zulip,zulip/zulip,ikasumiwt/zulip,vabs22/zulip,gkotian/zulip,gkotian/zulip,vikas-parashar/zulip,fw1121/zulip,RobotCaleb/zulip,eastlhu/zulip,lfranchi/zulip,SmartPeople/zulip,schatt/zulip,swinghu/zulip,praveenaki/zulip,ericzhou2008/zulip,aakash-cr7/zulip,developerfm/zulip,jackrzhang/zulip,vabs22/zulip,eeshangarg/zulip,wweiradio/zulip,LeeRisk/zulip,Batterfii/zulip,ufosky-server/zulip,amyliu345/zulip,calvinleenyc/zulip,bastianh/zulip,Diptanshu8/zulip,krtkmj/zulip,thomasboyt/zulip,timabbott/zulip,hustlzp/zulip,willingc/zulip,developerfm/zulip,Juanvulcano/zulip,atomic-labs/zulip,umkay/zulip,hafeez3000/zulip,amyliu345/zulip,isht3/zulip,stamhe/zulip,noroot/zulip,kokoar/zulip,umkay/zulip,wavelets/zulip,peguin40/zulip,dhcrzf/zulip,peiwei/zulip,amyliu345/zulip,ufosky-server/zulip,zhaoweigg/zulip,kokoar/zulip,m1ssou/zulip,tommyip/zulip,kaiyuanheshang/zulip,glovebx/zulip,jimmy54/zulip,arpitpanwar/zulip,esander91/zulip,Gabriel0402/zulip,nicholasbs/zulip,sharmaeklavya2/zulip,yuvipanda/zulip,paxapy/zulip,thomasboyt/zulip,udxxabp/zulip,EasonYi/zulip,shubhamdhama/zulip,praveenaki/zulip,mahim97/zulip,proliming/zulip,MayB/zulip,MariaFaBella85/zulip,KJin99/zulip,kou/zulip,xuxiao/zulip,hengqujushi/zulip,krtkmj/zulip,rht/zulip,MariaFaBella85/zulip,peguin40/zulip,JanzTam/zulip,adnanh/zulip,johnnygaddarr/zulip,zwily/zulip,Vallher/zulip,littledogboy/zulip,sup95/zulip,akuseru/zulip,yuvipanda/zulip,dnmfarrell/zulip,udxxabp/zulip,RobotCaleb/zulip,christi3k/zulip,christi3k/zulip,babbage/zulip,karamcnair/zulip,gkotian/zulip,so0k/zulip,zulip/zulip,reyha/zulip,rishig/zulip,bluesea/zulip,voidException/zulip,jimmy54/zulip,Jianchun1/zulip,jphilipsen05/zulip,andersk/zulip,vikas-parashar/zulip,suxinde2009/zulip,Frouk/zulip,zacps/zulip,gkotian/zulip,hayderimran7/zulip,dotcool/zulip,jimmy54/zulip,yocome/zulip,glovebx/zulip,j831/zulip,dnmfarrell/zulip,reyha/zulip,ufosky-server/zulip,ashwinirudrappa/zulip,xuxiao/zulip,Galexrt/zulip,yocome/zulip,atomic-labs/zulip,showell/zulip,Frouk/zulip,pradiptad/zulip,suxinde2009/zulip,themass/zulip,adnanh/zulip,ufosky-server/zulip,adnanh/zulip,jackrzhang/zulip,jessedhillon/zulip,aps-sids/zulip,jessedhillon/zulip,wdaher/zulip,KingxBanana/zulip,JPJPJPOPOP/zulip,hafeez3000/zulip,amanharitsh123/zulip,Frouk/zulip,vaidap/zulip,cosmicAsymmetry/zulip,suxinde2009/zulip,seapasulli/zulip,ericzhou2008/zulip,timabbott/zulip,rht/zulip,zacps/zulip,samatdav/zulip,deer-hope/zulip,wweiradio/zulip,johnnygaddarr/zulip,tommyip/zulip,firstblade/zulip,punchagan/zulip,xuanhan863/zulip,vaidap/zulip,jainayush975/zulip,tbutter/zulip,fw1121/zulip,wavelets/zulip,themass/zulip,jonesgithub/zulip,tdr130/zulip,shrikrishnaholla/zulip,zacps/zulip,natanovia/zulip,m1ssou/zulip,joshisa/zulip,huangkebo/zulip,guiquanz/zulip,adnanh/zulip,shrikrishnaholla/zulip,tiansiyuan/zulip,bowlofstew/zulip,vakila/zulip
|
Add command-line tool to profile get_old_messages requests.
(imported from commit bd7fc27b0c6fc1ae4f82bb74763736f9163b90bf)
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zephyr.models import get_user_profile_by_email, UserMessage
from zephyr.views import get_old_messages_backend
import cProfile
import time
import logging
from zephyr.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
|
<commit_before><commit_msg>Add command-line tool to profile get_old_messages requests.
(imported from commit bd7fc27b0c6fc1ae4f82bb74763736f9163b90bf)<commit_after>
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zephyr.models import get_user_profile_by_email, UserMessage
from zephyr.views import get_old_messages_backend
import cProfile
import time
import logging
from zephyr.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
|
Add command-line tool to profile get_old_messages requests.
(imported from commit bd7fc27b0c6fc1ae4f82bb74763736f9163b90bf)from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zephyr.models import get_user_profile_by_email, UserMessage
from zephyr.views import get_old_messages_backend
import cProfile
import time
import logging
from zephyr.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
|
<commit_before><commit_msg>Add command-line tool to profile get_old_messages requests.
(imported from commit bd7fc27b0c6fc1ae4f82bb74763736f9163b90bf)<commit_after>from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from confirmation.models import Confirmation
from zephyr.models import get_user_profile_by_email, UserMessage
from zephyr.views import get_old_messages_backend
import cProfile
import time
import logging
from zephyr.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
|
|
bba6f8aad17719ff909281a62f6d449ebb08d859
|
tests/cmp_mkpy.py
|
tests/cmp_mkpy.py
|
#!/usr/bin/env python
"""Determine which tests are in the directory which should be added to the makefile."""
import os
import sys
import re
import glob
def main():
"""Compare the tests in this directory to the tests in the makefile."""
tests_cwd = set(glob.glob('*.py'))
tests_mk = _get_makefile_tests()
_prt_tests_to_add_to_mk(tests_cwd, tests_mk)
def _prt_tests_to_add_to_mk(tests_cwd, tests_mk):
"""Print tests to add to the makefile."""
tests = tests_cwd.symmetric_difference(tests_mk)
exclude = set([__file__[2:], "test_goea_errors.py"])
if tests:
sys.stdout.write("ADD THESE TESTS TO THE MAKEFILE:\n")
for test in tests:
if test not in exclude:
sys.stdout.write("\t$(PYTHON) {TEST}\n".format(TEST=test))
def _get_makefile_tests():
"""Get the list of tests in the makefile."""
fin_makefile = "makefile"
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, fin_makefile)) as ifstrm:
tests = set()
for line in ifstrm:
if line[:11] == "\t$(PYTHON) ":
mtch = re.match(r'(\S+.py)', line[11:])
if mtch:
tests.add(mtch.group(1))
sys.stdout.write("READ: {MK}\n".format(MK=fin_makefile))
return tests
if __name__ == '__main__':
main()
|
Determine if tests are in test dir, but not in test/makefile
|
Determine if tests are in test dir, but not in test/makefile
|
Python
|
bsd-2-clause
|
lileiting/goatools,tanghaibao/goatools,tanghaibao/goatools,lileiting/goatools
|
Determine if tests are in test dir, but not in test/makefile
|
#!/usr/bin/env python
"""Determine which tests are in the directory which should be added to the makefile."""
import os
import sys
import re
import glob
def main():
"""Compare the tests in this directory to the tests in the makefile."""
tests_cwd = set(glob.glob('*.py'))
tests_mk = _get_makefile_tests()
_prt_tests_to_add_to_mk(tests_cwd, tests_mk)
def _prt_tests_to_add_to_mk(tests_cwd, tests_mk):
"""Print tests to add to the makefile."""
tests = tests_cwd.symmetric_difference(tests_mk)
exclude = set([__file__[2:], "test_goea_errors.py"])
if tests:
sys.stdout.write("ADD THESE TESTS TO THE MAKEFILE:\n")
for test in tests:
if test not in exclude:
sys.stdout.write("\t$(PYTHON) {TEST}\n".format(TEST=test))
def _get_makefile_tests():
"""Get the list of tests in the makefile."""
fin_makefile = "makefile"
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, fin_makefile)) as ifstrm:
tests = set()
for line in ifstrm:
if line[:11] == "\t$(PYTHON) ":
mtch = re.match(r'(\S+.py)', line[11:])
if mtch:
tests.add(mtch.group(1))
sys.stdout.write("READ: {MK}\n".format(MK=fin_makefile))
return tests
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Determine if tests are in test dir, but not in test/makefile<commit_after>
|
#!/usr/bin/env python
"""Determine which tests are in the directory which should be added to the makefile."""
import os
import sys
import re
import glob
def main():
"""Compare the tests in this directory to the tests in the makefile."""
tests_cwd = set(glob.glob('*.py'))
tests_mk = _get_makefile_tests()
_prt_tests_to_add_to_mk(tests_cwd, tests_mk)
def _prt_tests_to_add_to_mk(tests_cwd, tests_mk):
"""Print tests to add to the makefile."""
tests = tests_cwd.symmetric_difference(tests_mk)
exclude = set([__file__[2:], "test_goea_errors.py"])
if tests:
sys.stdout.write("ADD THESE TESTS TO THE MAKEFILE:\n")
for test in tests:
if test not in exclude:
sys.stdout.write("\t$(PYTHON) {TEST}\n".format(TEST=test))
def _get_makefile_tests():
"""Get the list of tests in the makefile."""
fin_makefile = "makefile"
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, fin_makefile)) as ifstrm:
tests = set()
for line in ifstrm:
if line[:11] == "\t$(PYTHON) ":
mtch = re.match(r'(\S+.py)', line[11:])
if mtch:
tests.add(mtch.group(1))
sys.stdout.write("READ: {MK}\n".format(MK=fin_makefile))
return tests
if __name__ == '__main__':
main()
|
Determine if tests are in test dir, but not in test/makefile#!/usr/bin/env python
"""Determine which tests are in the directory which should be added to the makefile."""
import os
import sys
import re
import glob
def main():
"""Compare the tests in this directory to the tests in the makefile."""
tests_cwd = set(glob.glob('*.py'))
tests_mk = _get_makefile_tests()
_prt_tests_to_add_to_mk(tests_cwd, tests_mk)
def _prt_tests_to_add_to_mk(tests_cwd, tests_mk):
"""Print tests to add to the makefile."""
tests = tests_cwd.symmetric_difference(tests_mk)
exclude = set([__file__[2:], "test_goea_errors.py"])
if tests:
sys.stdout.write("ADD THESE TESTS TO THE MAKEFILE:\n")
for test in tests:
if test not in exclude:
sys.stdout.write("\t$(PYTHON) {TEST}\n".format(TEST=test))
def _get_makefile_tests():
"""Get the list of tests in the makefile."""
fin_makefile = "makefile"
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, fin_makefile)) as ifstrm:
tests = set()
for line in ifstrm:
if line[:11] == "\t$(PYTHON) ":
mtch = re.match(r'(\S+.py)', line[11:])
if mtch:
tests.add(mtch.group(1))
sys.stdout.write("READ: {MK}\n".format(MK=fin_makefile))
return tests
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Determine if tests are in test dir, but not in test/makefile<commit_after>#!/usr/bin/env python
"""Determine which tests are in the directory which should be added to the makefile."""
import os
import sys
import re
import glob
def main():
"""Compare the tests in this directory to the tests in the makefile."""
tests_cwd = set(glob.glob('*.py'))
tests_mk = _get_makefile_tests()
_prt_tests_to_add_to_mk(tests_cwd, tests_mk)
def _prt_tests_to_add_to_mk(tests_cwd, tests_mk):
"""Print tests to add to the makefile."""
tests = tests_cwd.symmetric_difference(tests_mk)
exclude = set([__file__[2:], "test_goea_errors.py"])
if tests:
sys.stdout.write("ADD THESE TESTS TO THE MAKEFILE:\n")
for test in tests:
if test not in exclude:
sys.stdout.write("\t$(PYTHON) {TEST}\n".format(TEST=test))
def _get_makefile_tests():
"""Get the list of tests in the makefile."""
fin_makefile = "makefile"
cwd = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(cwd, fin_makefile)) as ifstrm:
tests = set()
for line in ifstrm:
if line[:11] == "\t$(PYTHON) ":
mtch = re.match(r'(\S+.py)', line[11:])
if mtch:
tests.add(mtch.group(1))
sys.stdout.write("READ: {MK}\n".format(MK=fin_makefile))
return tests
if __name__ == '__main__':
main()
|
|
039001c71ce5b4eb4eb7796e5ee56e0ee459687b
|
tests/conftest.py
|
tests/conftest.py
|
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
Add conf test for django settings
|
Add conf test for django settings
|
Python
|
mit
|
NorakGithub/django-excel-tools
|
Add conf test for django settings
|
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
<commit_before><commit_msg>Add conf test for django settings<commit_after>
|
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
Add conf test for django settingsdef pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
<commit_before><commit_msg>Add conf test for django settings<commit_after>def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
|
a3d35b7e654a3cfd84a442396b470d19212d9b26
|
src/proposals/management/commands/loadproposals.py
|
src/proposals/management/commands/loadproposals.py
|
import json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Load talk proposals from data dumped by `manage.py dumpdata`.'
def add_arguments(self, parser):
parser.add_argument('filename', help='Name of file to load data from')
def handle(self, *args, filename, **options):
with open(filename) as f:
data = json.load(f)
for dataset in data:
model = apps.get_model(*dataset['model'].split('.'))
fields = dataset.pop('fields', {})
submitter = fields.pop('submitter', None)
if submitter is not None:
try:
submitter = User.objects.get(pk=submitter)
except User.DoesNotExist:
submitter = User.objects.first()
fields['submitter'] = submitter
model.objects.update_or_create(fields, pk=dataset['pk'])
|
Add command to load proposals from dumped data
|
Add command to load proposals from dumped data
|
Python
|
mit
|
pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016
|
Add command to load proposals from dumped data
|
import json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Load talk proposals from data dumped by `manage.py dumpdata`.'
def add_arguments(self, parser):
parser.add_argument('filename', help='Name of file to load data from')
def handle(self, *args, filename, **options):
with open(filename) as f:
data = json.load(f)
for dataset in data:
model = apps.get_model(*dataset['model'].split('.'))
fields = dataset.pop('fields', {})
submitter = fields.pop('submitter', None)
if submitter is not None:
try:
submitter = User.objects.get(pk=submitter)
except User.DoesNotExist:
submitter = User.objects.first()
fields['submitter'] = submitter
model.objects.update_or_create(fields, pk=dataset['pk'])
|
<commit_before><commit_msg>Add command to load proposals from dumped data<commit_after>
|
import json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Load talk proposals from data dumped by `manage.py dumpdata`.'
def add_arguments(self, parser):
parser.add_argument('filename', help='Name of file to load data from')
def handle(self, *args, filename, **options):
with open(filename) as f:
data = json.load(f)
for dataset in data:
model = apps.get_model(*dataset['model'].split('.'))
fields = dataset.pop('fields', {})
submitter = fields.pop('submitter', None)
if submitter is not None:
try:
submitter = User.objects.get(pk=submitter)
except User.DoesNotExist:
submitter = User.objects.first()
fields['submitter'] = submitter
model.objects.update_or_create(fields, pk=dataset['pk'])
|
Add command to load proposals from dumped dataimport json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Load talk proposals from data dumped by `manage.py dumpdata`.'
def add_arguments(self, parser):
parser.add_argument('filename', help='Name of file to load data from')
def handle(self, *args, filename, **options):
with open(filename) as f:
data = json.load(f)
for dataset in data:
model = apps.get_model(*dataset['model'].split('.'))
fields = dataset.pop('fields', {})
submitter = fields.pop('submitter', None)
if submitter is not None:
try:
submitter = User.objects.get(pk=submitter)
except User.DoesNotExist:
submitter = User.objects.first()
fields['submitter'] = submitter
model.objects.update_or_create(fields, pk=dataset['pk'])
|
<commit_before><commit_msg>Add command to load proposals from dumped data<commit_after>import json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Load talk proposals from data dumped by `manage.py dumpdata`.'
def add_arguments(self, parser):
parser.add_argument('filename', help='Name of file to load data from')
def handle(self, *args, filename, **options):
with open(filename) as f:
data = json.load(f)
for dataset in data:
model = apps.get_model(*dataset['model'].split('.'))
fields = dataset.pop('fields', {})
submitter = fields.pop('submitter', None)
if submitter is not None:
try:
submitter = User.objects.get(pk=submitter)
except User.DoesNotExist:
submitter = User.objects.first()
fields['submitter'] = submitter
model.objects.update_or_create(fields, pk=dataset['pk'])
|
|
ce5fbcfdac8e8ba5bf85f48ed9a87553a621b34a
|
scripts/remove_duplicate_preprint_logs.py
|
scripts/remove_duplicate_preprint_logs.py
|
import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website.project.model import Node, NodeLog
logger = logging.getLogger(__name__)
# This is where all your migration log will go
def do_migration():
dupe_nodes = [n for n in Node.find(Q('_id', 'in', list(set([l.node._id for l in NodeLog.find(Q('action', 'eq', 'preprint_license_updated'))])))) if NodeLog.find(Q('action', 'eq', 'preprint_license_updated') & Q('node', 'eq', n._id)).count() > 1]
logger.info('Found {} nodes with multiple preprint_license_updated logs'.format(len(dupe_nodes)))
for node in dupe_nodes:
preprint_license_updated_logs = [log for log in node.logs if log.action == 'preprint_license_updated']
log = preprint_license_updated_logs.pop()
while(preprint_license_updated_logs):
next_log = preprint_license_updated_logs.pop()
timedelta = log.date - next_log.date
if timedelta.seconds < 1:
logger.info(
'Hiding duplicate preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log.should_hide = True
log.save()
else:
logger.info(
'Skipping preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log = next_log
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
Add script to remove duplicate preprint logs.
|
Add script to remove duplicate preprint logs.
|
Python
|
apache-2.0
|
mluo613/osf.io,mfraezz/osf.io,Nesiehr/osf.io,pattisdr/osf.io,mattclark/osf.io,felliott/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,icereval/osf.io,baylee-d/osf.io,mfraezz/osf.io,caseyrollins/osf.io,cslzchen/osf.io,mfraezz/osf.io,TomBaxter/osf.io,acshi/osf.io,laurenrevere/osf.io,mattclark/osf.io,Nesiehr/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,felliott/osf.io,acshi/osf.io,sloria/osf.io,chrisseto/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,erinspace/osf.io,caseyrollins/osf.io,cslzchen/osf.io,caneruguz/osf.io,alexschiller/osf.io,erinspace/osf.io,alexschiller/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,aaxelb/osf.io,cslzchen/osf.io,laurenrevere/osf.io,chennan47/osf.io,chrisseto/osf.io,crcresearch/osf.io,adlius/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,aaxelb/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,crcresearch/osf.io,icereval/osf.io,binoculars/osf.io,Nesiehr/osf.io,icereval/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,alexschiller/osf.io,caseyrollins/osf.io,saradbowman/osf.io,monikagrabowska/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,hmoco/osf.io,cslzchen/osf.io,baylee-d/osf.io,acshi/osf.io,mluo613/osf.io,acshi/osf.io,monikagrabowska/osf.io,sloria/osf.io,aaxelb/osf.io,adlius/osf.io,binoculars/osf.io,chrisseto/osf.io,mluo613/osf.io,saradbowman/osf.io,hmoco/osf.io,alexschiller/osf.io,cwisecarver/osf.io,mluo613/osf.io,caneruguz/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,TomBaxter/osf.io,aaxelb/osf.io,chennan47/osf.io,baylee-d/osf.io,hmoco/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,acshi/osf.io,adlius/osf.io,felliott/osf.io,caneruguz/osf.io,leb2dg/osf.io,leb2dg/osf.io,binoculars/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,felliott/osf.io,laurenrevere/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,Johnetordoff/osf.io,adlius/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,mluo613/osf.io
|
Add script to remove duplicate preprint logs.
|
import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website.project.model import Node, NodeLog
logger = logging.getLogger(__name__)
# This is where all your migration log will go
def do_migration():
dupe_nodes = [n for n in Node.find(Q('_id', 'in', list(set([l.node._id for l in NodeLog.find(Q('action', 'eq', 'preprint_license_updated'))])))) if NodeLog.find(Q('action', 'eq', 'preprint_license_updated') & Q('node', 'eq', n._id)).count() > 1]
logger.info('Found {} nodes with multiple preprint_license_updated logs'.format(len(dupe_nodes)))
for node in dupe_nodes:
preprint_license_updated_logs = [log for log in node.logs if log.action == 'preprint_license_updated']
log = preprint_license_updated_logs.pop()
while(preprint_license_updated_logs):
next_log = preprint_license_updated_logs.pop()
timedelta = log.date - next_log.date
if timedelta.seconds < 1:
logger.info(
'Hiding duplicate preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log.should_hide = True
log.save()
else:
logger.info(
'Skipping preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log = next_log
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Add script to remove duplicate preprint logs.<commit_after>
|
import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website.project.model import Node, NodeLog
logger = logging.getLogger(__name__)
# This is where all your migration log will go
def do_migration():
dupe_nodes = [n for n in Node.find(Q('_id', 'in', list(set([l.node._id for l in NodeLog.find(Q('action', 'eq', 'preprint_license_updated'))])))) if NodeLog.find(Q('action', 'eq', 'preprint_license_updated') & Q('node', 'eq', n._id)).count() > 1]
logger.info('Found {} nodes with multiple preprint_license_updated logs'.format(len(dupe_nodes)))
for node in dupe_nodes:
preprint_license_updated_logs = [log for log in node.logs if log.action == 'preprint_license_updated']
log = preprint_license_updated_logs.pop()
while(preprint_license_updated_logs):
next_log = preprint_license_updated_logs.pop()
timedelta = log.date - next_log.date
if timedelta.seconds < 1:
logger.info(
'Hiding duplicate preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log.should_hide = True
log.save()
else:
logger.info(
'Skipping preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log = next_log
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
Add script to remove duplicate preprint logs.import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website.project.model import Node, NodeLog
logger = logging.getLogger(__name__)
# This is where all your migration log will go
def do_migration():
dupe_nodes = [n for n in Node.find(Q('_id', 'in', list(set([l.node._id for l in NodeLog.find(Q('action', 'eq', 'preprint_license_updated'))])))) if NodeLog.find(Q('action', 'eq', 'preprint_license_updated') & Q('node', 'eq', n._id)).count() > 1]
logger.info('Found {} nodes with multiple preprint_license_updated logs'.format(len(dupe_nodes)))
for node in dupe_nodes:
preprint_license_updated_logs = [log for log in node.logs if log.action == 'preprint_license_updated']
log = preprint_license_updated_logs.pop()
while(preprint_license_updated_logs):
next_log = preprint_license_updated_logs.pop()
timedelta = log.date - next_log.date
if timedelta.seconds < 1:
logger.info(
'Hiding duplicate preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log.should_hide = True
log.save()
else:
logger.info(
'Skipping preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log = next_log
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Add script to remove duplicate preprint logs.<commit_after>import sys
import logging
from modularodm import Q
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website.project.model import Node, NodeLog
logger = logging.getLogger(__name__)
# This is where all your migration log will go
def do_migration():
dupe_nodes = [n for n in Node.find(Q('_id', 'in', list(set([l.node._id for l in NodeLog.find(Q('action', 'eq', 'preprint_license_updated'))])))) if NodeLog.find(Q('action', 'eq', 'preprint_license_updated') & Q('node', 'eq', n._id)).count() > 1]
logger.info('Found {} nodes with multiple preprint_license_updated logs'.format(len(dupe_nodes)))
for node in dupe_nodes:
preprint_license_updated_logs = [log for log in node.logs if log.action == 'preprint_license_updated']
log = preprint_license_updated_logs.pop()
while(preprint_license_updated_logs):
next_log = preprint_license_updated_logs.pop()
timedelta = log.date - next_log.date
if timedelta.seconds < 1:
logger.info(
'Hiding duplicate preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log.should_hide = True
log.save()
else:
logger.info(
'Skipping preprint_license_updated log with ID {} from node {}, timedelta was {}'.format(
log._id, node._id, timedelta
)
)
log = next_log
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
|
ff459ece8deb05e141cc1055421f88721b16f7d1
|
securedrop/tests/test_unit_crypto_util.py
|
securedrop/tests/test_unit_crypto_util.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import common
import crypto_util
class TestCryptoUtil(unittest.TestCase):
"""The set of tests for crypto_util.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_clean(self):
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add coverage of clean function in crypto_util
|
Add coverage of clean function in crypto_util
|
Python
|
agpl-3.0
|
conorsch/securedrop,heartsucker/securedrop,conorsch/securedrop,micahflee/securedrop,ehartsuyker/securedrop,micahflee/securedrop,ehartsuyker/securedrop,heartsucker/securedrop,garrettr/securedrop,micahflee/securedrop,ageis/securedrop,heartsucker/securedrop,garrettr/securedrop,ageis/securedrop,garrettr/securedrop,conorsch/securedrop,heartsucker/securedrop,ehartsuyker/securedrop,micahflee/securedrop,ageis/securedrop,ageis/securedrop,ehartsuyker/securedrop,conorsch/securedrop,ehartsuyker/securedrop,ehartsuyker/securedrop,garrettr/securedrop,heartsucker/securedrop,conorsch/securedrop
|
Add coverage of clean function in crypto_util
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import common
import crypto_util
class TestCryptoUtil(unittest.TestCase):
"""The set of tests for crypto_util.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_clean(self):
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add coverage of clean function in crypto_util<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import common
import crypto_util
class TestCryptoUtil(unittest.TestCase):
"""The set of tests for crypto_util.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_clean(self):
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add coverage of clean function in crypto_util#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import common
import crypto_util
class TestCryptoUtil(unittest.TestCase):
"""The set of tests for crypto_util.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_clean(self):
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add coverage of clean function in crypto_util<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import common
import crypto_util
class TestCryptoUtil(unittest.TestCase):
"""The set of tests for crypto_util.py."""
def setUp(self):
common.shared_setup()
def tearDown(self):
common.shared_teardown()
def test_clean(self):
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
3ac4d3aeee4308d7db09151e340ec02ab4da1403
|
challenge1.py
|
challenge1.py
|
#!/usr/bin/python
file = open("story.txt", "r+")
words = 0
unique = 0
paragraphs = 0
sentences = 0
for word in file.read().split():
words += 1
print words
|
Add challenge word count fragment
|
Add challenge word count fragment
|
Python
|
mit
|
Stahpware/rlp_weekly_challenge_1
|
Add challenge word count fragment
|
#!/usr/bin/python
file = open("story.txt", "r+")
words = 0
unique = 0
paragraphs = 0
sentences = 0
for word in file.read().split():
words += 1
print words
|
<commit_before><commit_msg>Add challenge word count fragment<commit_after>
|
#!/usr/bin/python
file = open("story.txt", "r+")
words = 0
unique = 0
paragraphs = 0
sentences = 0
for word in file.read().split():
words += 1
print words
|
Add challenge word count fragment#!/usr/bin/python
file = open("story.txt", "r+")
words = 0
unique = 0
paragraphs = 0
sentences = 0
for word in file.read().split():
words += 1
print words
|
<commit_before><commit_msg>Add challenge word count fragment<commit_after>#!/usr/bin/python
file = open("story.txt", "r+")
words = 0
unique = 0
paragraphs = 0
sentences = 0
for word in file.read().split():
words += 1
print words
|
|
e87864550eb6d4cee1dc2149e89274d7c6c63a29
|
aids/strings/is_anagram.py
|
aids/strings/is_anagram.py
|
'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
|
Add function to determine if two strings are anagrams
|
Add function to determine if two strings are anagrams
|
Python
|
mit
|
ueg1990/aids
|
Add function to determine if two strings are anagrams
|
'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
|
<commit_before><commit_msg>Add function to determine if two strings are anagrams<commit_after>
|
'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
|
Add function to determine if two strings are anagrams'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
|
<commit_before><commit_msg>Add function to determine if two strings are anagrams<commit_after>'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
|
|
cd24a6de4d7b17105370bf142b5237f9ab90aa09
|
candidates/management/commands/candidates_find_max_person_id.py
|
candidates/management/commands/candidates_find_max_person_id.py
|
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
max_person_id = -1
for person in popit_unwrap_pagination(
self.api.persons,
per_page=100
):
person_id = int(person['id'])
max_person_id = max(person_id, max_person_id)
print "Maximum person ID is:", max_person_id
|
Add a helper command to find the maximum person ID
|
Add a helper command to find the maximum person ID
This is useful if by deleting and reimporting data directly in MongoDB
the maximum person ID in the YNMP database gets out of sync: this
command returns the maximum ID, and it's then your reponsibility what to
do about that. (i.e. It doesn't update it in the database for you.)
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mhl/yournextmp-popit,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,neavouli/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,mhl/yournextmp-popit,mhl/yournextmp-popit,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative
|
Add a helper command to find the maximum person ID
This is useful if by deleting and reimporting data directly in MongoDB
the maximum person ID in the YNMP database gets out of sync: this
command returns the maximum ID, and it's then your reponsibility what to
do about that. (i.e. It doesn't update it in the database for you.)
|
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
max_person_id = -1
for person in popit_unwrap_pagination(
self.api.persons,
per_page=100
):
person_id = int(person['id'])
max_person_id = max(person_id, max_person_id)
print "Maximum person ID is:", max_person_id
|
<commit_before><commit_msg>Add a helper command to find the maximum person ID
This is useful if by deleting and reimporting data directly in MongoDB
the maximum person ID in the YNMP database gets out of sync: this
command returns the maximum ID, and it's then your reponsibility what to
do about that. (i.e. It doesn't update it in the database for you.)<commit_after>
|
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
max_person_id = -1
for person in popit_unwrap_pagination(
self.api.persons,
per_page=100
):
person_id = int(person['id'])
max_person_id = max(person_id, max_person_id)
print "Maximum person ID is:", max_person_id
|
Add a helper command to find the maximum person ID
This is useful if by deleting and reimporting data directly in MongoDB
the maximum person ID in the YNMP database gets out of sync: this
command returns the maximum ID, and it's then your reponsibility what to
do about that. (i.e. It doesn't update it in the database for you.)from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
max_person_id = -1
for person in popit_unwrap_pagination(
self.api.persons,
per_page=100
):
person_id = int(person['id'])
max_person_id = max(person_id, max_person_id)
print "Maximum person ID is:", max_person_id
|
<commit_before><commit_msg>Add a helper command to find the maximum person ID
This is useful if by deleting and reimporting data directly in MongoDB
the maximum person ID in the YNMP database gets out of sync: this
command returns the maximum ID, and it's then your reponsibility what to
do about that. (i.e. It doesn't update it in the database for you.)<commit_after>from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
max_person_id = -1
for person in popit_unwrap_pagination(
self.api.persons,
per_page=100
):
person_id = int(person['id'])
max_person_id = max(person_id, max_person_id)
print "Maximum person ID is:", max_person_id
|
|
124b0ae0b1eb1e7e1e4e4dec9b5af8870c0de270
|
oedb_datamodels/versions/46fb02acc3b1_add_meta_tables.py
|
oedb_datamodels/versions/46fb02acc3b1_add_meta_tables.py
|
"""Add meta tables
Revision ID: 46fb02acc3b1
Revises:
Create Date: 2017-11-23 11:08:50.199160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46fb02acc3b1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'_edit_base',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(200)),
sa.Column('_id', sa.BigInteger, nullable=False),
sa.Column('_message', sa.Text),
sa.Column('_user', sa.String(50)),
sa.Column('_submitted', sa.TIMESTAMP, default=sa.func.now),
sa.Column('_autocheck', sa.Boolean, default=False),
sa.Column('_humancheck', sa.Boolean, default=False),
sa.Column('_type', sa.String(8)),
sa.Column('_applied', sa.Boolean, default=False),
)
op.create_table(
'api_columns',
sa.Column('column_name', sa.String(50)),
sa.Column('not_null', sa.Boolean),
sa.Column('data_type', sa.String(50)),
sa.Column('new_name', sa.String(50)),
sa.Column('reviewed', sa.Boolean, default=False),
sa.Column('changed', sa.Boolean, default=False),
sa.Column('id', sa.BigInteger, nullable=False, primary_key=True),
sa.Column('c_schema', sa.String(50)),
sa.Column('c_table', sa.String(50))
)
def downgrade():
op.drop_table('_edit_base')
|
Add meta tables to alembic migrations
|
Add meta tables to alembic migrations
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
|
Add meta tables to alembic migrations
|
"""Add meta tables
Revision ID: 46fb02acc3b1
Revises:
Create Date: 2017-11-23 11:08:50.199160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46fb02acc3b1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'_edit_base',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(200)),
sa.Column('_id', sa.BigInteger, nullable=False),
sa.Column('_message', sa.Text),
sa.Column('_user', sa.String(50)),
sa.Column('_submitted', sa.TIMESTAMP, default=sa.func.now),
sa.Column('_autocheck', sa.Boolean, default=False),
sa.Column('_humancheck', sa.Boolean, default=False),
sa.Column('_type', sa.String(8)),
sa.Column('_applied', sa.Boolean, default=False),
)
op.create_table(
'api_columns',
sa.Column('column_name', sa.String(50)),
sa.Column('not_null', sa.Boolean),
sa.Column('data_type', sa.String(50)),
sa.Column('new_name', sa.String(50)),
sa.Column('reviewed', sa.Boolean, default=False),
sa.Column('changed', sa.Boolean, default=False),
sa.Column('id', sa.BigInteger, nullable=False, primary_key=True),
sa.Column('c_schema', sa.String(50)),
sa.Column('c_table', sa.String(50))
)
def downgrade():
op.drop_table('_edit_base')
|
<commit_before><commit_msg>Add meta tables to alembic migrations<commit_after>
|
"""Add meta tables
Revision ID: 46fb02acc3b1
Revises:
Create Date: 2017-11-23 11:08:50.199160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46fb02acc3b1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'_edit_base',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(200)),
sa.Column('_id', sa.BigInteger, nullable=False),
sa.Column('_message', sa.Text),
sa.Column('_user', sa.String(50)),
sa.Column('_submitted', sa.TIMESTAMP, default=sa.func.now),
sa.Column('_autocheck', sa.Boolean, default=False),
sa.Column('_humancheck', sa.Boolean, default=False),
sa.Column('_type', sa.String(8)),
sa.Column('_applied', sa.Boolean, default=False),
)
op.create_table(
'api_columns',
sa.Column('column_name', sa.String(50)),
sa.Column('not_null', sa.Boolean),
sa.Column('data_type', sa.String(50)),
sa.Column('new_name', sa.String(50)),
sa.Column('reviewed', sa.Boolean, default=False),
sa.Column('changed', sa.Boolean, default=False),
sa.Column('id', sa.BigInteger, nullable=False, primary_key=True),
sa.Column('c_schema', sa.String(50)),
sa.Column('c_table', sa.String(50))
)
def downgrade():
op.drop_table('_edit_base')
|
Add meta tables to alembic migrations"""Add meta tables
Revision ID: 46fb02acc3b1
Revises:
Create Date: 2017-11-23 11:08:50.199160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46fb02acc3b1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'_edit_base',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(200)),
sa.Column('_id', sa.BigInteger, nullable=False),
sa.Column('_message', sa.Text),
sa.Column('_user', sa.String(50)),
sa.Column('_submitted', sa.TIMESTAMP, default=sa.func.now),
sa.Column('_autocheck', sa.Boolean, default=False),
sa.Column('_humancheck', sa.Boolean, default=False),
sa.Column('_type', sa.String(8)),
sa.Column('_applied', sa.Boolean, default=False),
)
op.create_table(
'api_columns',
sa.Column('column_name', sa.String(50)),
sa.Column('not_null', sa.Boolean),
sa.Column('data_type', sa.String(50)),
sa.Column('new_name', sa.String(50)),
sa.Column('reviewed', sa.Boolean, default=False),
sa.Column('changed', sa.Boolean, default=False),
sa.Column('id', sa.BigInteger, nullable=False, primary_key=True),
sa.Column('c_schema', sa.String(50)),
sa.Column('c_table', sa.String(50))
)
def downgrade():
op.drop_table('_edit_base')
|
<commit_before><commit_msg>Add meta tables to alembic migrations<commit_after>"""Add meta tables
Revision ID: 46fb02acc3b1
Revises:
Create Date: 2017-11-23 11:08:50.199160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '46fb02acc3b1'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'_edit_base',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('description', sa.Unicode(200)),
sa.Column('_id', sa.BigInteger, nullable=False),
sa.Column('_message', sa.Text),
sa.Column('_user', sa.String(50)),
sa.Column('_submitted', sa.TIMESTAMP, default=sa.func.now),
sa.Column('_autocheck', sa.Boolean, default=False),
sa.Column('_humancheck', sa.Boolean, default=False),
sa.Column('_type', sa.String(8)),
sa.Column('_applied', sa.Boolean, default=False),
)
op.create_table(
'api_columns',
sa.Column('column_name', sa.String(50)),
sa.Column('not_null', sa.Boolean),
sa.Column('data_type', sa.String(50)),
sa.Column('new_name', sa.String(50)),
sa.Column('reviewed', sa.Boolean, default=False),
sa.Column('changed', sa.Boolean, default=False),
sa.Column('id', sa.BigInteger, nullable=False, primary_key=True),
sa.Column('c_schema', sa.String(50)),
sa.Column('c_table', sa.String(50))
)
def downgrade():
op.drop_table('_edit_base')
|
|
1b90326eac82dc73195363d4c57c3095ab3e90bc
|
dynamic_dynamodb/tests/test_dynamodb.py
|
dynamic_dynamodb/tests/test_dynamodb.py
|
# -*- coding: utf-8 -*-
""" Test dynamodb utils """
import unittest
from moto import mock_dynamodb2
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from dynamic_dynamodb.aws import dynamodb
class TestDynamodb(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
super(TestDynamodb, self).setUp()
@mock_dynamodb2
def test_list_no_tables(self):
tables = dynamodb.list_tables()
self.assertEquals([], tables)
@mock_dynamodb2
def test_list_many_tables(self):
for i in range(0,50):
Table.create('test_%s' % i, schema=[HashKey('key'),])
tables = dynamodb.list_tables()
self.assertEquals(50, len(tables))
|
Add basic test to check moto is working as expected
|
Add basic test to check moto is working as expected
|
Python
|
apache-2.0
|
tellybug/dynamic-dynamodb
|
Add basic test to check moto is working as expected
|
# -*- coding: utf-8 -*-
""" Test dynamodb utils """
import unittest
from moto import mock_dynamodb2
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from dynamic_dynamodb.aws import dynamodb
class TestDynamodb(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
super(TestDynamodb, self).setUp()
@mock_dynamodb2
def test_list_no_tables(self):
tables = dynamodb.list_tables()
self.assertEquals([], tables)
@mock_dynamodb2
def test_list_many_tables(self):
for i in range(0,50):
Table.create('test_%s' % i, schema=[HashKey('key'),])
tables = dynamodb.list_tables()
self.assertEquals(50, len(tables))
|
<commit_before><commit_msg>Add basic test to check moto is working as expected<commit_after>
|
# -*- coding: utf-8 -*-
""" Test dynamodb utils """
import unittest
from moto import mock_dynamodb2
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from dynamic_dynamodb.aws import dynamodb
class TestDynamodb(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
super(TestDynamodb, self).setUp()
@mock_dynamodb2
def test_list_no_tables(self):
tables = dynamodb.list_tables()
self.assertEquals([], tables)
@mock_dynamodb2
def test_list_many_tables(self):
for i in range(0,50):
Table.create('test_%s' % i, schema=[HashKey('key'),])
tables = dynamodb.list_tables()
self.assertEquals(50, len(tables))
|
Add basic test to check moto is working as expected# -*- coding: utf-8 -*-
""" Test dynamodb utils """
import unittest
from moto import mock_dynamodb2
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from dynamic_dynamodb.aws import dynamodb
class TestDynamodb(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
super(TestDynamodb, self).setUp()
@mock_dynamodb2
def test_list_no_tables(self):
tables = dynamodb.list_tables()
self.assertEquals([], tables)
@mock_dynamodb2
def test_list_many_tables(self):
for i in range(0,50):
Table.create('test_%s' % i, schema=[HashKey('key'),])
tables = dynamodb.list_tables()
self.assertEquals(50, len(tables))
|
<commit_before><commit_msg>Add basic test to check moto is working as expected<commit_after># -*- coding: utf-8 -*-
""" Test dynamodb utils """
import unittest
from moto import mock_dynamodb2
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from dynamic_dynamodb.aws import dynamodb
class TestDynamodb(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
super(TestDynamodb, self).setUp()
@mock_dynamodb2
def test_list_no_tables(self):
tables = dynamodb.list_tables()
self.assertEquals([], tables)
@mock_dynamodb2
def test_list_many_tables(self):
for i in range(0,50):
Table.create('test_%s' % i, schema=[HashKey('key'),])
tables = dynamodb.list_tables()
self.assertEquals(50, len(tables))
|
|
ed96c1c5d0e80f86e18c5fd555bad4cfafbd8e5e
|
kolibri/core/content/migrations/0020_le_utils_0_20_upgrade_migration.py
|
kolibri/core/content/migrations/0020_le_utils_0_20_upgrade_migration.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-22 17:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0019_contentnode_slideshow_options")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
("h5p", "H5P Zip"),
("h5p_thumbnail", "H5P Thumbnail"),
("slideshow_image", "Slideshow Image"),
("slideshow_thumbnail", "Slideshow Thumbnail"),
("slideshow_manifest", "Slideshow Manifest"),
],
max_length=150,
),
),
migrations.AlterField(
model_name="localfile",
name="extension",
field=models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("graphie", "Graphie Exercise"),
("zip", "HTML5 Zip"),
("h5p", "H5P"),
("epub", "ePub Document"),
],
max_length=40,
),
),
]
|
Add content migrations for latest le_utils update.
|
Add content migrations for latest le_utils update.
|
Python
|
mit
|
mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,mrpau/kolibri,mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri,mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri,learningequality/kolibri
|
Add content migrations for latest le_utils update.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-22 17:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0019_contentnode_slideshow_options")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
("h5p", "H5P Zip"),
("h5p_thumbnail", "H5P Thumbnail"),
("slideshow_image", "Slideshow Image"),
("slideshow_thumbnail", "Slideshow Thumbnail"),
("slideshow_manifest", "Slideshow Manifest"),
],
max_length=150,
),
),
migrations.AlterField(
model_name="localfile",
name="extension",
field=models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("graphie", "Graphie Exercise"),
("zip", "HTML5 Zip"),
("h5p", "H5P"),
("epub", "ePub Document"),
],
max_length=40,
),
),
]
|
<commit_before><commit_msg>Add content migrations for latest le_utils update.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-22 17:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0019_contentnode_slideshow_options")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
("h5p", "H5P Zip"),
("h5p_thumbnail", "H5P Thumbnail"),
("slideshow_image", "Slideshow Image"),
("slideshow_thumbnail", "Slideshow Thumbnail"),
("slideshow_manifest", "Slideshow Manifest"),
],
max_length=150,
),
),
migrations.AlterField(
model_name="localfile",
name="extension",
field=models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("graphie", "Graphie Exercise"),
("zip", "HTML5 Zip"),
("h5p", "H5P"),
("epub", "ePub Document"),
],
max_length=40,
),
),
]
|
Add content migrations for latest le_utils update.# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-22 17:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0019_contentnode_slideshow_options")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
("h5p", "H5P Zip"),
("h5p_thumbnail", "H5P Thumbnail"),
("slideshow_image", "Slideshow Image"),
("slideshow_thumbnail", "Slideshow Thumbnail"),
("slideshow_manifest", "Slideshow Manifest"),
],
max_length=150,
),
),
migrations.AlterField(
model_name="localfile",
name="extension",
field=models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("graphie", "Graphie Exercise"),
("zip", "HTML5 Zip"),
("h5p", "H5P"),
("epub", "ePub Document"),
],
max_length=40,
),
),
]
|
<commit_before><commit_msg>Add content migrations for latest le_utils update.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-22 17:32
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("content", "0019_contentnode_slideshow_options")]
operations = [
migrations.AlterField(
model_name="file",
name="preset",
field=models.CharField(
blank=True,
choices=[
("high_res_video", "High Resolution"),
("low_res_video", "Low Resolution"),
("video_thumbnail", "Thumbnail"),
("video_subtitle", "Subtitle"),
("video_dependency", "Video (dependency)"),
("audio", "Audio"),
("audio_thumbnail", "Thumbnail"),
("document", "Document"),
("epub", "ePub Document"),
("document_thumbnail", "Thumbnail"),
("exercise", "Exercise"),
("exercise_thumbnail", "Thumbnail"),
("exercise_image", "Exercise Image"),
("exercise_graphie", "Exercise Graphie"),
("channel_thumbnail", "Channel Thumbnail"),
("topic_thumbnail", "Thumbnail"),
("html5_zip", "HTML5 Zip"),
("html5_dependency", "HTML5 Dependency (Zip format)"),
("html5_thumbnail", "HTML5 Thumbnail"),
("h5p", "H5P Zip"),
("h5p_thumbnail", "H5P Thumbnail"),
("slideshow_image", "Slideshow Image"),
("slideshow_thumbnail", "Slideshow Thumbnail"),
("slideshow_manifest", "Slideshow Manifest"),
],
max_length=150,
),
),
migrations.AlterField(
model_name="localfile",
name="extension",
field=models.CharField(
blank=True,
choices=[
("mp4", "MP4 Video"),
("vtt", "VTT Subtitle"),
("mp3", "MP3 Audio"),
("pdf", "PDF Document"),
("jpg", "JPG Image"),
("jpeg", "JPEG Image"),
("png", "PNG Image"),
("gif", "GIF Image"),
("json", "JSON"),
("svg", "SVG Image"),
("perseus", "Perseus Exercise"),
("graphie", "Graphie Exercise"),
("zip", "HTML5 Zip"),
("h5p", "H5P"),
("epub", "ePub Document"),
],
max_length=40,
),
),
]
|
|
dd01830cf9be3672d4223cdb37ed8bb410730b62
|
devil/devil/android/tools/adb_run_shell_cmd.py
|
devil/devil/android/tools/adb_run_shell_cmd.py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import sys
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser(
'Run an adb shell command on selected devices')
parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
parser.add_argument('-d', '--device', action='append', dest='devices',
help='Device to run cmd on. Runs on all devices if not '
'specified. Set multiple times for multiple devices')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--blacklist-file', help='Device blacklist file.')
parser.add_argument('--as-root', action='store_true', help='Run as root.')
parser.add_argument('--json-output',
help='File to dump json output to.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
args.blacklist_file = device_blacklist.Blacklist(
args.blacklist_file) if args.blacklist_file else None
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist=args.blacklist_file)
if args.devices:
selected_devices = []
attached_devices = {str(d): d for d in attached_devices}
for serial in args.devices:
if serial in attached_devices:
selected_devices.append(attached_devices[serial])
else:
logging.warning('Specified device %s not found.', serial)
else:
selected_devices = attached_devices
if not selected_devices:
raise device_errors.NoDevicesError
p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
args.cmd, large_output=True, as_root=args.as_root, check_return=True)
.pGet(None))
data = {}
for device, output in zip(selected_devices, p_out):
for line in output:
print '%s: %s' %(device, line)
data[str(device)] = output
if args.json_output:
with open(args.json_output, 'w') as f:
json.dump(data, f)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add util for running adb shell commands on device.
|
[Android] Add util for running adb shell commands on device.
BUG=543257
Review URL: https://codereview.chromium.org/1498113002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#365301}
|
Python
|
bsd-3-clause
|
sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult-csm,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,benschmaus/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,sahiljain/catapult,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult
|
[Android] Add util for running adb shell commands on device.
BUG=543257
Review URL: https://codereview.chromium.org/1498113002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#365301}
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import sys
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser(
'Run an adb shell command on selected devices')
parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
parser.add_argument('-d', '--device', action='append', dest='devices',
help='Device to run cmd on. Runs on all devices if not '
'specified. Set multiple times for multiple devices')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--blacklist-file', help='Device blacklist file.')
parser.add_argument('--as-root', action='store_true', help='Run as root.')
parser.add_argument('--json-output',
help='File to dump json output to.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
args.blacklist_file = device_blacklist.Blacklist(
args.blacklist_file) if args.blacklist_file else None
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist=args.blacklist_file)
if args.devices:
selected_devices = []
attached_devices = {str(d): d for d in attached_devices}
for serial in args.devices:
if serial in attached_devices:
selected_devices.append(attached_devices[serial])
else:
logging.warning('Specified device %s not found.', serial)
else:
selected_devices = attached_devices
if not selected_devices:
raise device_errors.NoDevicesError
p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
args.cmd, large_output=True, as_root=args.as_root, check_return=True)
.pGet(None))
data = {}
for device, output in zip(selected_devices, p_out):
for line in output:
print '%s: %s' %(device, line)
data[str(device)] = output
if args.json_output:
with open(args.json_output, 'w') as f:
json.dump(data, f)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>[Android] Add util for running adb shell commands on device.
BUG=543257
Review URL: https://codereview.chromium.org/1498113002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#365301}<commit_after>
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import sys
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser(
'Run an adb shell command on selected devices')
parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
parser.add_argument('-d', '--device', action='append', dest='devices',
help='Device to run cmd on. Runs on all devices if not '
'specified. Set multiple times for multiple devices')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--blacklist-file', help='Device blacklist file.')
parser.add_argument('--as-root', action='store_true', help='Run as root.')
parser.add_argument('--json-output',
help='File to dump json output to.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
args.blacklist_file = device_blacklist.Blacklist(
args.blacklist_file) if args.blacklist_file else None
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist=args.blacklist_file)
if args.devices:
selected_devices = []
attached_devices = {str(d): d for d in attached_devices}
for serial in args.devices:
if serial in attached_devices:
selected_devices.append(attached_devices[serial])
else:
logging.warning('Specified device %s not found.', serial)
else:
selected_devices = attached_devices
if not selected_devices:
raise device_errors.NoDevicesError
p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
args.cmd, large_output=True, as_root=args.as_root, check_return=True)
.pGet(None))
data = {}
for device, output in zip(selected_devices, p_out):
for line in output:
print '%s: %s' %(device, line)
data[str(device)] = output
if args.json_output:
with open(args.json_output, 'w') as f:
json.dump(data, f)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[Android] Add util for running adb shell commands on device.
BUG=543257
Review URL: https://codereview.chromium.org/1498113002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#365301}#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import sys
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser(
'Run an adb shell command on selected devices')
parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
parser.add_argument('-d', '--device', action='append', dest='devices',
help='Device to run cmd on. Runs on all devices if not '
'specified. Set multiple times for multiple devices')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--blacklist-file', help='Device blacklist file.')
parser.add_argument('--as-root', action='store_true', help='Run as root.')
parser.add_argument('--json-output',
help='File to dump json output to.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
args.blacklist_file = device_blacklist.Blacklist(
args.blacklist_file) if args.blacklist_file else None
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist=args.blacklist_file)
if args.devices:
selected_devices = []
attached_devices = {str(d): d for d in attached_devices}
for serial in args.devices:
if serial in attached_devices:
selected_devices.append(attached_devices[serial])
else:
logging.warning('Specified device %s not found.', serial)
else:
selected_devices = attached_devices
if not selected_devices:
raise device_errors.NoDevicesError
p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
args.cmd, large_output=True, as_root=args.as_root, check_return=True)
.pGet(None))
data = {}
for device, output in zip(selected_devices, p_out):
for line in output:
print '%s: %s' %(device, line)
data[str(device)] = output
if args.json_output:
with open(args.json_output, 'w') as f:
json.dump(data, f)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>[Android] Add util for running adb shell commands on device.
BUG=543257
Review URL: https://codereview.chromium.org/1498113002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#365301}<commit_after>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import logging
import sys
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
def main():
parser = argparse.ArgumentParser(
'Run an adb shell command on selected devices')
parser.add_argument('cmd', help='Adb shell command to run.', nargs="+")
parser.add_argument('-d', '--device', action='append', dest='devices',
help='Device to run cmd on. Runs on all devices if not '
'specified. Set multiple times for multiple devices')
parser.add_argument('-v', '--verbose', default=0, action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--blacklist-file', help='Device blacklist file.')
parser.add_argument('--as-root', action='store_true', help='Run as root.')
parser.add_argument('--json-output',
help='File to dump json output to.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
args.blacklist_file = device_blacklist.Blacklist(
args.blacklist_file) if args.blacklist_file else None
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist=args.blacklist_file)
if args.devices:
selected_devices = []
attached_devices = {str(d): d for d in attached_devices}
for serial in args.devices:
if serial in attached_devices:
selected_devices.append(attached_devices[serial])
else:
logging.warning('Specified device %s not found.', serial)
else:
selected_devices = attached_devices
if not selected_devices:
raise device_errors.NoDevicesError
p_out = (device_utils.DeviceUtils.parallel(selected_devices).RunShellCommand(
args.cmd, large_output=True, as_root=args.as_root, check_return=True)
.pGet(None))
data = {}
for device, output in zip(selected_devices, p_out):
for line in output:
print '%s: %s' %(device, line)
data[str(device)] = output
if args.json_output:
with open(args.json_output, 'w') as f:
json.dump(data, f)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
308d5dec656a17c2dba97be0ad641fe8d390636d
|
thinc/neural/tests/unit/Params/test_params.py
|
thinc/neural/tests/unit/Params/test_params.py
|
import pytest
from ....params import Params
from ....ops import NumpyOps
@pytest.fixture
def ops():
return NumpyOps()
@pytest.mark.parametrize('size', [0, 10, 1000, 7, 12])
def test_init_allocates_mem(ops, size):
params = Params(ops, size)
assert params._mem.size == size
assert params._i == 0
@pytest.mark.parametrize('size', [-10, -1000, -1, -7, -12])
def test_init_rejects_negative_sizes(ops, size):
with pytest.raises(ValueError):
params = Params(ops, size)
def test_add_param_within_size(ops):
model = Params(ops, size=128)
model.add('W', (5, 10))
assert model._offsets['W'] == (0, (5, 10))
model.add('b', (5,))
assert model._offsets['b'] == (5*10, (5,))
def test_add_param_realloc(ops):
model = Params(ops, size=10)
model.add('b', (5,))
assert model._offsets['b'] == (0, (5,))
model.add('W', (5, 10))
assert model._offsets['W'] == (5, (5, 10))
assert model._offsets['b'] == (0, (5,))
def test_get_param_present(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('b')
b[0] = 100
assert b[0] == b2[0]
def test_get_param_absent(ops):
model = Params(ops, size=10)
b = model.get('b')
assert b is None
def test_get_first_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
def test_get_existing_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
b2[0] = 20.
b3 = model.get('d_b')
assert b3[0] == b2[0]
def test_get_gradient_absent_parameter(ops):
model = Params(ops, size=10)
d_b = model.get('d_b')
assert d_b is None
|
Add unit tests for Params class
|
Add unit tests for Params class
|
Python
|
mit
|
explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc
|
Add unit tests for Params class
|
import pytest
from ....params import Params
from ....ops import NumpyOps
@pytest.fixture
def ops():
return NumpyOps()
@pytest.mark.parametrize('size', [0, 10, 1000, 7, 12])
def test_init_allocates_mem(ops, size):
params = Params(ops, size)
assert params._mem.size == size
assert params._i == 0
@pytest.mark.parametrize('size', [-10, -1000, -1, -7, -12])
def test_init_rejects_negative_sizes(ops, size):
with pytest.raises(ValueError):
params = Params(ops, size)
def test_add_param_within_size(ops):
model = Params(ops, size=128)
model.add('W', (5, 10))
assert model._offsets['W'] == (0, (5, 10))
model.add('b', (5,))
assert model._offsets['b'] == (5*10, (5,))
def test_add_param_realloc(ops):
model = Params(ops, size=10)
model.add('b', (5,))
assert model._offsets['b'] == (0, (5,))
model.add('W', (5, 10))
assert model._offsets['W'] == (5, (5, 10))
assert model._offsets['b'] == (0, (5,))
def test_get_param_present(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('b')
b[0] = 100
assert b[0] == b2[0]
def test_get_param_absent(ops):
model = Params(ops, size=10)
b = model.get('b')
assert b is None
def test_get_first_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
def test_get_existing_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
b2[0] = 20.
b3 = model.get('d_b')
assert b3[0] == b2[0]
def test_get_gradient_absent_parameter(ops):
model = Params(ops, size=10)
d_b = model.get('d_b')
assert d_b is None
|
<commit_before><commit_msg>Add unit tests for Params class<commit_after>
|
import pytest
from ....params import Params
from ....ops import NumpyOps
@pytest.fixture
def ops():
return NumpyOps()
@pytest.mark.parametrize('size', [0, 10, 1000, 7, 12])
def test_init_allocates_mem(ops, size):
params = Params(ops, size)
assert params._mem.size == size
assert params._i == 0
@pytest.mark.parametrize('size', [-10, -1000, -1, -7, -12])
def test_init_rejects_negative_sizes(ops, size):
with pytest.raises(ValueError):
params = Params(ops, size)
def test_add_param_within_size(ops):
model = Params(ops, size=128)
model.add('W', (5, 10))
assert model._offsets['W'] == (0, (5, 10))
model.add('b', (5,))
assert model._offsets['b'] == (5*10, (5,))
def test_add_param_realloc(ops):
model = Params(ops, size=10)
model.add('b', (5,))
assert model._offsets['b'] == (0, (5,))
model.add('W', (5, 10))
assert model._offsets['W'] == (5, (5, 10))
assert model._offsets['b'] == (0, (5,))
def test_get_param_present(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('b')
b[0] = 100
assert b[0] == b2[0]
def test_get_param_absent(ops):
model = Params(ops, size=10)
b = model.get('b')
assert b is None
def test_get_first_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
def test_get_existing_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
b2[0] = 20.
b3 = model.get('d_b')
assert b3[0] == b2[0]
def test_get_gradient_absent_parameter(ops):
model = Params(ops, size=10)
d_b = model.get('d_b')
assert d_b is None
|
Add unit tests for Params classimport pytest
from ....params import Params
from ....ops import NumpyOps
@pytest.fixture
def ops():
return NumpyOps()
@pytest.mark.parametrize('size', [0, 10, 1000, 7, 12])
def test_init_allocates_mem(ops, size):
params = Params(ops, size)
assert params._mem.size == size
assert params._i == 0
@pytest.mark.parametrize('size', [-10, -1000, -1, -7, -12])
def test_init_rejects_negative_sizes(ops, size):
with pytest.raises(ValueError):
params = Params(ops, size)
def test_add_param_within_size(ops):
model = Params(ops, size=128)
model.add('W', (5, 10))
assert model._offsets['W'] == (0, (5, 10))
model.add('b', (5,))
assert model._offsets['b'] == (5*10, (5,))
def test_add_param_realloc(ops):
model = Params(ops, size=10)
model.add('b', (5,))
assert model._offsets['b'] == (0, (5,))
model.add('W', (5, 10))
assert model._offsets['W'] == (5, (5, 10))
assert model._offsets['b'] == (0, (5,))
def test_get_param_present(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('b')
b[0] = 100
assert b[0] == b2[0]
def test_get_param_absent(ops):
model = Params(ops, size=10)
b = model.get('b')
assert b is None
def test_get_first_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
def test_get_existing_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
b2[0] = 20.
b3 = model.get('d_b')
assert b3[0] == b2[0]
def test_get_gradient_absent_parameter(ops):
model = Params(ops, size=10)
d_b = model.get('d_b')
assert d_b is None
|
<commit_before><commit_msg>Add unit tests for Params class<commit_after>import pytest
from ....params import Params
from ....ops import NumpyOps
@pytest.fixture
def ops():
return NumpyOps()
@pytest.mark.parametrize('size', [0, 10, 1000, 7, 12])
def test_init_allocates_mem(ops, size):
params = Params(ops, size)
assert params._mem.size == size
assert params._i == 0
@pytest.mark.parametrize('size', [-10, -1000, -1, -7, -12])
def test_init_rejects_negative_sizes(ops, size):
with pytest.raises(ValueError):
params = Params(ops, size)
def test_add_param_within_size(ops):
model = Params(ops, size=128)
model.add('W', (5, 10))
assert model._offsets['W'] == (0, (5, 10))
model.add('b', (5,))
assert model._offsets['b'] == (5*10, (5,))
def test_add_param_realloc(ops):
model = Params(ops, size=10)
model.add('b', (5,))
assert model._offsets['b'] == (0, (5,))
model.add('W', (5, 10))
assert model._offsets['W'] == (5, (5, 10))
assert model._offsets['b'] == (0, (5,))
def test_get_param_present(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('b')
b[0] = 100
assert b[0] == b2[0]
def test_get_param_absent(ops):
model = Params(ops, size=10)
b = model.get('b')
assert b is None
def test_get_first_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
def test_get_existing_gradient(ops):
model = Params(ops, size=10)
b = model.add('b', (5,))
b2 = model.get('d_b')
b[0] = 100
assert b2[0] == 0
b2[0] = 20.
b3 = model.get('d_b')
assert b3[0] == b2[0]
def test_get_gradient_absent_parameter(ops):
model = Params(ops, size=10)
d_b = model.get('d_b')
assert d_b is None
|
|
77d4dcd43fbc8caf3d9b727fea75b35339ed936e
|
utilities/src/d1_util/strip_xml_whitespace.py
|
utilities/src/d1_util/strip_xml_whitespace.py
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip whitespace that might interfere with XSD schema validation
Overall formatting is maintained. Note that pretty printing the doc is likely to add
the stripped whitespace back in.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform which strips potentially problematic whitespace.
- Download a Science Object from a MN or CN.
"""
import argparse
import logging
import d1_scimeta.xml_schema
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xml_path", help="Path to XML file to check")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.xml_schema.parse_xml_file(args.xml_path)
stripped_xml_tree = d1_scimeta.xml_schema.strip_whitespace(xml_tree)
d1_scimeta.xml_schema.dump_pretty_tree(stripped_xml_tree)
d1_scimeta.xml_schema.save_tree_to_file(stripped_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main()
|
Add utility/example that strips problematic whitespace from XML doc
|
Add utility/example that strips problematic whitespace from XML doc
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add utility/example that strips problematic whitespace from XML doc
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip whitespace that might interfere with XSD schema validation
Overall formatting is maintained. Note that pretty printing the doc is likely to add
the stripped whitespace back in.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform which strips potentially problematic whitespace.
- Download a Science Object from a MN or CN.
"""
import argparse
import logging
import d1_scimeta.xml_schema
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xml_path", help="Path to XML file to check")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.xml_schema.parse_xml_file(args.xml_path)
stripped_xml_tree = d1_scimeta.xml_schema.strip_whitespace(xml_tree)
d1_scimeta.xml_schema.dump_pretty_tree(stripped_xml_tree)
d1_scimeta.xml_schema.save_tree_to_file(stripped_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add utility/example that strips problematic whitespace from XML doc<commit_after>
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip whitespace that might interfere with XSD schema validation
Overall formatting is maintained. Note that pretty printing the doc is likely to add
the stripped whitespace back in.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform which strips potentially problematic whitespace.
- Download a Science Object from a MN or CN.
"""
import argparse
import logging
import d1_scimeta.xml_schema
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xml_path", help="Path to XML file to check")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.xml_schema.parse_xml_file(args.xml_path)
stripped_xml_tree = d1_scimeta.xml_schema.strip_whitespace(xml_tree)
d1_scimeta.xml_schema.dump_pretty_tree(stripped_xml_tree)
d1_scimeta.xml_schema.save_tree_to_file(stripped_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main()
|
Add utility/example that strips problematic whitespace from XML doc#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip whitespace that might interfere with XSD schema validation
Overall formatting is maintained. Note that pretty printing the doc is likely to add
the stripped whitespace back in.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform which strips potentially problematic whitespace.
- Download a Science Object from a MN or CN.
"""
import argparse
import logging
import d1_scimeta.xml_schema
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xml_path", help="Path to XML file to check")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.xml_schema.parse_xml_file(args.xml_path)
stripped_xml_tree = d1_scimeta.xml_schema.strip_whitespace(xml_tree)
d1_scimeta.xml_schema.dump_pretty_tree(stripped_xml_tree)
d1_scimeta.xml_schema.save_tree_to_file(stripped_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add utility/example that strips problematic whitespace from XML doc<commit_after>#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip whitespace that might interfere with XSD schema validation
Overall formatting is maintained. Note that pretty printing the doc is likely to add
the stripped whitespace back in.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform which strips potentially problematic whitespace.
- Download a Science Object from a MN or CN.
"""
import argparse
import logging
import d1_scimeta.xml_schema
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xml_path", help="Path to XML file to check")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.xml_schema.parse_xml_file(args.xml_path)
stripped_xml_tree = d1_scimeta.xml_schema.strip_whitespace(xml_tree)
d1_scimeta.xml_schema.dump_pretty_tree(stripped_xml_tree)
d1_scimeta.xml_schema.save_tree_to_file(stripped_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main()
|
|
74a4023c3d9e02d13456fca285e8f64eb8358434
|
elasticsearch_flex/analysis_utils.py
|
elasticsearch_flex/analysis_utils.py
|
import logging
from elasticsearch_dsl.analysis import CustomAnalyzer
logger = logging.getLogger(__name__)
class AnalysisDefinition(object):
'''
This defines a helper class for registering search analyzers.
Analyzers can be defined as callables, hence ensuring io/cpu bound analysis
configuration can be deffered until necessary.
An example use case of this is in defining Synonym Token Filter, where
the Synonyms may be loaded from a network file. This is only necessary when
the index is actually being set up, hence a callable would ensure that
the synonyms fie is not downloaded at all the time.
'''
def __init__(self, params):
self.definition = self._get_definition(params)
def _get_definition(self, anz):
if callable(anz):
# A callable definition may return either a CustomAnalyzer or a dict.
# We recursively process the return value.
return self._get_definition(anz())
elif isinstance(anz, CustomAnalyzer):
# For CustomAnalyzer, we use the definition.
return anz.get_analysis_definition()
elif isinstance(anz, dict):
# Use dicts as it is.
return anz
raise ValueError('Analyzer can be a callable, DSL CustomAnalyzer, or a dict.')
def register(self, index):
body = {'analysis': self.definition}
with index.ensure_closed_and_reopened() as ix:
conn = ix.get_connection()
conn.indices.put_settings(body=body, index=ix.index_name)
|
Add AnalysisDefinition helper for configuring analyzers
|
Add AnalysisDefinition helper for configuring analyzers
|
Python
|
mit
|
prashnts/dj-elasticsearch-flex,prashnts/dj-elasticsearch-flex
|
Add AnalysisDefinition helper for configuring analyzers
|
import logging
from elasticsearch_dsl.analysis import CustomAnalyzer
logger = logging.getLogger(__name__)
class AnalysisDefinition(object):
'''
This defines a helper class for registering search analyzers.
Analyzers can be defined as callables, hence ensuring io/cpu bound analysis
configuration can be deffered until necessary.
An example use case of this is in defining Synonym Token Filter, where
the Synonyms may be loaded from a network file. This is only necessary when
the index is actually being set up, hence a callable would ensure that
the synonyms fie is not downloaded at all the time.
'''
def __init__(self, params):
self.definition = self._get_definition(params)
def _get_definition(self, anz):
if callable(anz):
# A callable definition may return either a CustomAnalyzer or a dict.
# We recursively process the return value.
return self._get_definition(anz())
elif isinstance(anz, CustomAnalyzer):
# For CustomAnalyzer, we use the definition.
return anz.get_analysis_definition()
elif isinstance(anz, dict):
# Use dicts as it is.
return anz
raise ValueError('Analyzer can be a callable, DSL CustomAnalyzer, or a dict.')
def register(self, index):
body = {'analysis': self.definition}
with index.ensure_closed_and_reopened() as ix:
conn = ix.get_connection()
conn.indices.put_settings(body=body, index=ix.index_name)
|
<commit_before><commit_msg>Add AnalysisDefinition helper for configuring analyzers<commit_after>
|
import logging
from elasticsearch_dsl.analysis import CustomAnalyzer
logger = logging.getLogger(__name__)
class AnalysisDefinition(object):
'''
This defines a helper class for registering search analyzers.
Analyzers can be defined as callables, hence ensuring io/cpu bound analysis
configuration can be deffered until necessary.
An example use case of this is in defining Synonym Token Filter, where
the Synonyms may be loaded from a network file. This is only necessary when
the index is actually being set up, hence a callable would ensure that
the synonyms fie is not downloaded at all the time.
'''
def __init__(self, params):
self.definition = self._get_definition(params)
def _get_definition(self, anz):
if callable(anz):
# A callable definition may return either a CustomAnalyzer or a dict.
# We recursively process the return value.
return self._get_definition(anz())
elif isinstance(anz, CustomAnalyzer):
# For CustomAnalyzer, we use the definition.
return anz.get_analysis_definition()
elif isinstance(anz, dict):
# Use dicts as it is.
return anz
raise ValueError('Analyzer can be a callable, DSL CustomAnalyzer, or a dict.')
def register(self, index):
body = {'analysis': self.definition}
with index.ensure_closed_and_reopened() as ix:
conn = ix.get_connection()
conn.indices.put_settings(body=body, index=ix.index_name)
|
Add AnalysisDefinition helper for configuring analyzersimport logging
from elasticsearch_dsl.analysis import CustomAnalyzer
logger = logging.getLogger(__name__)
class AnalysisDefinition(object):
'''
This defines a helper class for registering search analyzers.
Analyzers can be defined as callables, hence ensuring io/cpu bound analysis
configuration can be deffered until necessary.
An example use case of this is in defining Synonym Token Filter, where
the Synonyms may be loaded from a network file. This is only necessary when
the index is actually being set up, hence a callable would ensure that
the synonyms fie is not downloaded at all the time.
'''
def __init__(self, params):
self.definition = self._get_definition(params)
def _get_definition(self, anz):
if callable(anz):
# A callable definition may return either a CustomAnalyzer or a dict.
# We recursively process the return value.
return self._get_definition(anz())
elif isinstance(anz, CustomAnalyzer):
# For CustomAnalyzer, we use the definition.
return anz.get_analysis_definition()
elif isinstance(anz, dict):
# Use dicts as it is.
return anz
raise ValueError('Analyzer can be a callable, DSL CustomAnalyzer, or a dict.')
def register(self, index):
body = {'analysis': self.definition}
with index.ensure_closed_and_reopened() as ix:
conn = ix.get_connection()
conn.indices.put_settings(body=body, index=ix.index_name)
|
<commit_before><commit_msg>Add AnalysisDefinition helper for configuring analyzers<commit_after>import logging
from elasticsearch_dsl.analysis import CustomAnalyzer
logger = logging.getLogger(__name__)
class AnalysisDefinition(object):
'''
This defines a helper class for registering search analyzers.
Analyzers can be defined as callables, hence ensuring io/cpu bound analysis
configuration can be deffered until necessary.
An example use case of this is in defining Synonym Token Filter, where
the Synonyms may be loaded from a network file. This is only necessary when
the index is actually being set up, hence a callable would ensure that
the synonyms fie is not downloaded at all the time.
'''
def __init__(self, params):
self.definition = self._get_definition(params)
def _get_definition(self, anz):
if callable(anz):
# A callable definition may return either a CustomAnalyzer or a dict.
# We recursively process the return value.
return self._get_definition(anz())
elif isinstance(anz, CustomAnalyzer):
# For CustomAnalyzer, we use the definition.
return anz.get_analysis_definition()
elif isinstance(anz, dict):
# Use dicts as it is.
return anz
raise ValueError('Analyzer can be a callable, DSL CustomAnalyzer, or a dict.')
def register(self, index):
body = {'analysis': self.definition}
with index.ensure_closed_and_reopened() as ix:
conn = ix.get_connection()
conn.indices.put_settings(body=body, index=ix.index_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.