commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7fa4500b22104b34b50bbcacc3b64923d6da294
|
trex/parsers.py
|
trex/parsers.py
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
|
Add a parser for plain text
|
Add a parser for plain text
|
Python
|
mit
|
bjoernricks/trex,bjoernricks/trex
|
Add a parser for plain text
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
|
<commit_before><commit_msg>Add a parser for plain text<commit_after>
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
|
Add a parser for plain text# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
|
<commit_before><commit_msg>Add a parser for plain text<commit_after># -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from io import TextIOWrapper
from rest_framework.parsers import BaseParser
class PlainTextParser(BaseParser):
media_type = "text/plain"
def parse(self, stream, media_type=None, parser_context=None):
print "Running PlainTextParser"
charset = self.get_charset(media_type)
if charset:
stream = TextIOWrapper(stream, encoding=charset)
return stream
def get_charset(self, media_type):
if not media_type:
return None
charset = None
msplit = media_type.split(" ");
for m in msplit:
m = m.strip()
if "charset" in m:
csplit = m.split("=")
if len(csplit) > 1:
charset = csplit[1]
return charset.strip().lower()
return None
|
|
e8560c42e3ae73f1753073b8ad6aef7d564e6d65
|
Host/original.py
|
Host/original.py
|
import sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
|
Implement basic active monitoring algorithm
|
Implement basic active monitoring algorithm
|
Python
|
mit
|
kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing,kaushikSarma/VM-Load-balancing
|
Implement basic active monitoring algorithm
|
import sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
|
<commit_before><commit_msg>Implement basic active monitoring algorithm<commit_after>
|
import sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
|
Implement basic active monitoring algorithmimport sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
|
<commit_before><commit_msg>Implement basic active monitoring algorithm<commit_after>import sys
from functools import reduce
tempVmId = -1
def enhancedActiveVMLoadBalancer(vmStateList, currentAllocationCounts):
'''
vmStateList: Dict<vmId, vmState>
currentAllocationCounts: Dict<vmId, currentActiveAllocationCount>
'''
global tempVmId
vmId = -1
totalAllocations = reduce(lambda x, y: x + y, currentAllocationCounts)
if(totalAllocations < len(vmStateList)):
for i, vm in enumerate(vmStateList):
if(currentAllocationCounts[i] == 0):
vmId = i
break
else:
minCount = sys.maxint
for i, vm in enumerate(vmStateList):
curCount = currentAllocationCounts[i]
if(curCount < minCount):
if(i != tempVmId):
vmId = i
break
tempVmId = vmId
print("Returning, ", vmId)
return vmId
enhancedActiveVMLoadBalancer([
{'cpu': 10, 'mem': 10},
{'cpu': 17, 'mem': 40},
{'cpu': 40, 'mem': 20},
{'cpu': 80, 'mem': 15}
], [1, 4, 1, 1])
|
|
3693b1aea769af1e0fbe31007a00f3e33bcec622
|
aids/sorting_and_searching/pair_sum.py
|
aids/sorting_and_searching/pair_sum.py
|
'''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output])
|
Add function to solve two pair sum
|
Add function to solve two pair sum
|
Python
|
mit
|
ueg1990/aids
|
Add function to solve two pair sum
|
'''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output])
|
<commit_before><commit_msg>Add function to solve two pair sum<commit_after>
|
'''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output])
|
Add function to solve two pair sum'''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output])
|
<commit_before><commit_msg>Add function to solve two pair sum<commit_after>'''
Given an integer array, output all pairs that sum up to a specific value k
'''
from binary_search import binary_search_iterative
def pair_sum_sorting(arr, k):
'''
Using sorting - O(n logn)
'''
number_of_items = len(arr)
if number_of_items < 2:
return
arr.sort()
for index, item in enumerate(arr):
index_pair = binary_search_iterative(arr, index, number_of_items - 1, k - item)
if index_pair and index_pair > index:
print item, arr[index_pair]
def pair_sum_set(arr, k):
'''
Using set - O(n) (time - average case), O(n) (space)
'''
if len(arr) < 2:
return
seen = set()
output = set()
for item in arr:
target = k - item
if target not in seen:
seen.add(target)
else:
output.add(item, target) # print item, target
# for output with non-duplicate i.e. (1,3) and (3,1) are the samw thing
# output.add((min(num, target), max(num, target)))
print '\n'.join([str(item) for item in output])
|
|
9a67d63650b751c7b876f248bb3d82e619b37725
|
frequenciesToWords.py
|
frequenciesToWords.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
|
Add new script to create a list of words from frequencies
|
Add new script to create a list of words from frequencies
|
Python
|
agpl-3.0
|
lucach/spellcorrect,lucach/spellcorrect,lucach/spellcorrect,lucach/spellcorrect
|
Add new script to create a list of words from frequencies
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add new script to create a list of words from frequencies<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
|
Add new script to create a list of words from frequencies#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add new script to create a list of words from frequencies<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Spell corrector - http://www.chiodini.org/
# Copyright © 2015 Luca Chiodini <luca@chiodini.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import codecs
import sys
def main():
parser = argparse.ArgumentParser(
description="Script to get pure words from unigrams frequencies.")
parser.add_argument("-f", "--file", help="source file to be processed",
required=True)
parser.add_argument("-o", "--output", help="output file with results",
required=True)
args = parser.parse_args()
words = set()
# Process input file and save keys.
with codecs.open(args.file, 'r', 'utf8') as f:
idx = 0
for line in f:
if idx > 0: # skip first line (header)
vals = line.rsplit(' ', 1)
words.add(vals[0])
idx += 1
# Write keys to output file.
with codecs.open(args.output, 'w', 'utf8') as f:
for w in words:
f.write("%s\n" % w)
if __name__ == '__main__':
sys.exit(main())
|
|
4535d6c41e17031b943e7016fc7de6f76b890f17
|
test/lib/test_inputsource.py
|
test/lib/test_inputsource.py
|
########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
|
Put the test into the correct directory.
|
Put the test into the correct directory.
--HG--
rename : test/xslt/test_inputsource.py => test/lib/test_inputsource.py
|
Python
|
apache-2.0
|
zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara,zepheira/amara
|
Put the test into the correct directory.
--HG--
rename : test/xslt/test_inputsource.py => test/lib/test_inputsource.py
|
########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
|
<commit_before><commit_msg>Put the test into the correct directory.
--HG--
rename : test/xslt/test_inputsource.py => test/lib/test_inputsource.py<commit_after>
|
########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
|
Put the test into the correct directory.
--HG--
rename : test/xslt/test_inputsource.py => test/lib/test_inputsource.py########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
|
<commit_before><commit_msg>Put the test into the correct directory.
--HG--
rename : test/xslt/test_inputsource.py => test/lib/test_inputsource.py<commit_after>########################################################################
# test/xslt/test_inputsource.py
import os
from amara.lib import inputsource, iri, treecompare
module_dir = os.path.dirname(os.path.abspath(__file__))
rlimit_nofile = 300
try:
import resource
except ImportError:
pass
else:
rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
def test_many_inputsources():
assert rlimit_nofile < 20000, "is your file limit really that large?"
# Amara's inputsource consumes a filehandle, in the 'stream' attribute
# See what happens if we run out of file handles.
sources = []
filename = os.path.join(module_dir, "borrowed", "da_20000714_02.xslt")
for i in range(rlimit_nofile):
try:
sources.append(inputsource(filename))
except:
print "Failed after", i, "files"
raise
|
|
732898dc4858ae5cfc7eac3e470069ac702f6c12
|
mapit/management/commands/mapit_generation_deactivate.py
|
mapit/management/commands/mapit_generation_deactivate.py
|
# This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
|
Add a command for deactivating a generation
|
Add a command for deactivating a generation
|
Python
|
agpl-3.0
|
Sinar/mapit,chris48s/mapit,New-Bamboo/mapit,Sinar/mapit,opencorato/mapit,chris48s/mapit,chris48s/mapit,Code4SA/mapit,opencorato/mapit,opencorato/mapit,Code4SA/mapit,Code4SA/mapit,New-Bamboo/mapit
|
Add a command for deactivating a generation
|
# This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
|
<commit_before><commit_msg>Add a command for deactivating a generation<commit_after>
|
# This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
|
Add a command for deactivating a generation# This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
|
<commit_before><commit_msg>Add a command for deactivating a generation<commit_after># This script deactivates a particular generation
from optparse import make_option
from django.core.management.base import BaseCommand
from mapit.models import Generation
class Command(BaseCommand):
help = 'Deactivate a generation'
args = '<GENERATION-ID>'
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),
make_option('--force', action='store_true', dest='force',
help='Force deactivation, even if it would leave no active generations'))
def handle(self, generation_id, **options):
generation_to_deactivate = Generation.objects.get(id=int(generation_id, 10))
if not generation_to_deactivate.active:
raise CommandError, "The generation %s wasn't active" % (generation_id,)
active_generations = Generation.objects.filter(active=True).count()
if active_generations <= 1 and not options['force']:
raise CommandError, "You're trying to deactivate the only active generation. If this is what you intended, please re-run the command with --force"
generation_to_deactivate.active = False
if options['commit']:
generation_to_deactivate.save()
print "%s - deactivated" % generation_to_deactivate
else:
print "%s - not deactivated, dry run" % generation_to_deactivate
|
|
98fbfe6e65c4cb32ea0f4f6ce6cba77f7fadcb7b
|
app/api/tests/test_vendor_api.py
|
app/api/tests/test_vendor_api.py
|
from django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
|
Add test for vendor object creation
|
Add test for vendor object creation
|
Python
|
mit
|
teamtaverna/core
|
Add test for vendor object creation
|
from django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
|
<commit_before><commit_msg>Add test for vendor object creation<commit_after>
|
from django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
|
Add test for vendor object creationfrom django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
|
<commit_before><commit_msg>Add test for vendor object creation<commit_after>from django.test import Client, TestCase
from .utils import obtain_api_key, create_admin_account
class VendorApiTest(TestCase):
"""Test for Vendor API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
self.admin_test_credentials = ('admin', 'admin@taverna.com', 'qwerty123')
create_admin_account(*self.admin_test_credentials)
self.header = {
'HTTP_X_TAVERNATOKEN': obtain_api_key(
self.client, *self.admin_test_credentials
)
}
self.vendors = (
('vendor1', 'info1'),
('vendor2', 'info2')
)
def make_request(self, query, method='GET'):
if method == 'GET':
return self.client.get(self.endpoint,
data={'query': query},
**self.header
).json()
if method == 'POST':
return self.client.post(self.endpoint,
data={'query': query},
**self.header
).json()
def create_vendor(self, name, info):
query = '''
mutation{
createVendor(input: {name: "%s", info: "%s"}){
vendor{
id,
originalId,
name,
info
}
}
}
''' % (name, info)
return self.make_request(query, 'POST')
def retrieve_vendor(self, vendor_id):
query = 'query {vendor(id: "%s") {name}}' % (vendor_id)
return self.make_request(query)
def create_multiple_vendors(self):
return [self.create_vendor(name, info) for name, info in self.vendors]
def test_creation_of_vendor_object(self):
# For new vendor record
response = self.create_vendor('vendor4', 'info4')
created_vendor = response['vendor']
expected = {
'vendor': {
'id': created_vendor['id'],
'originalId': created_vendor['originalId'],
'name': 'vendor4',
'info': 'info4'
}
}
self.assertEqual(expected, response)
|
|
7c33e8c7a386e911d835f81e637515d40dfc4e62
|
benchmarks/bench_laplace.py
|
benchmarks/bench_laplace.py
|
"""
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
|
Add a Laplace equation solving benchmark (from Numpy)
|
Add a Laplace equation solving benchmark (from Numpy)
|
Python
|
bsd-2-clause
|
numba/numba-benchmark
|
Add a Laplace equation solving benchmark (from Numpy)
|
"""
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
|
<commit_before><commit_msg>Add a Laplace equation solving benchmark (from Numpy)<commit_after>
|
"""
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
|
Add a Laplace equation solving benchmark (from Numpy)"""
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
|
<commit_before><commit_msg>Add a Laplace equation solving benchmark (from Numpy)<commit_after>"""
Benchmark Laplace equation solving.
From the Numpy benchmark suite, original code at
https://github.com/yarikoptic/numpy-vbench/commit/a192bfd43043d413cc5d27526a9b28ad343b2499
"""
import numpy as np
from numba import jit
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
@jit(nopython=True)
def laplace(N, Niter):
u = np.zeros((N, N))
u[0] = 1
for i in range(Niter):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
return u
class Laplace:
N = 150
Niter = 200
def setup(self):
# Warm up
self.run_laplace(10, 10)
def run_laplace(self, N, Niter):
u = laplace(N, Niter)
def time_laplace(self):
self.run_laplace(self.N, self.Niter)
|
|
9115628cf10e194f1975e01142d8ae08ab5c4b06
|
joommf/test_odtreader.py
|
joommf/test_odtreader.py
|
def test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
|
Add test for pandas dataframe loading
|
Add test for pandas dataframe loading
|
Python
|
bsd-2-clause
|
fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python
|
Add test for pandas dataframe loading
|
def test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
|
<commit_before><commit_msg>Add test for pandas dataframe loading<commit_after>
|
def test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
|
Add test for pandas dataframe loadingdef test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
|
<commit_before><commit_msg>Add test for pandas dataframe loading<commit_after>def test_odtreader_dynamics_example():
from joommf.sim import Sim
from joommf.mesh import Mesh
from joommf.energies.exchange import Exchange
from joommf.energies.demag import Demag
from joommf.energies.zeeman import FixedZeeman
from joommf.drivers import evolver
# Mesh specification.
lx = ly = lz = 50e-9 # x, y, and z dimensions (m)
dx = dy = dz = 5e-9 # x, y, and z cell dimensions (m)
Ms = 8e5 # saturation magnetisation (A/m)
A = 1e-11 # exchange energy constant (J/m)
H = (1e3, 0, 0) # external magnetic field (A/m)
m_init = (0, 0, 1) # initial magnetisation
t_sim = 0.5e-9 # simulation time (s)
gamma = 2.21e5
alpha = 0.1
# Create a mesh.
mesh = Mesh((lx, ly, lz), (dx, dy, dz))
# Create a simulation object.
sim = Sim(mesh, Ms, name='dynamics_example', debug=True)
# Add energies.
sim.add_energy(Exchange(A))
sim.add_energy(Demag())
sim.add_energy(FixedZeeman(H))
sim.set_evolver(
evolver.LLG(t_sim, m_init, Ms, alpha, gamma, name='evolver'))
# Set initial magnetisation.
sim.set_m(m_init)
# Run simulation.
sim.run()
assert sim.df.time.values[-1] == 0.5e-09
|
|
5f47cf46c82d9a48a9efe5ad11c6c3a55896da12
|
cupy/sparse/compressed.py
|
cupy/sparse/compressed.py
|
from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
|
Implement abstract class for csc and csr matrix
|
Implement abstract class for csc and csr matrix
|
Python
|
mit
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
Implement abstract class for csc and csr matrix
|
from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
|
<commit_before><commit_msg>Implement abstract class for csc and csr matrix<commit_after>
|
from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
|
Implement abstract class for csc and csr matrixfrom cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
|
<commit_before><commit_msg>Implement abstract class for csc and csr matrix<commit_after>from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
|
|
36333c275f4d3a66c8f14383c3ada5a42a197bea
|
bumblebee/modules/memory.py
|
bumblebee/modules/memory.py
|
import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Add module for displaying RAM usage
|
[modules] Add module for displaying RAM usage
Shows free RAM, total RAM, free RAM percentage
|
Python
|
mit
|
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
|
[modules] Add module for displaying RAM usage
Shows free RAM, total RAM, free RAM percentage
|
import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[modules] Add module for displaying RAM usage
Shows free RAM, total RAM, free RAM percentage<commit_after>
|
import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[modules] Add module for displaying RAM usage
Shows free RAM, total RAM, free RAM percentageimport bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[modules] Add module for displaying RAM usage
Shows free RAM, total RAM, free RAM percentage<commit_after>import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
eda3e6c005c1115a039f394d6f00baabebd39fee
|
calaccess_website/management/commands/updatebuildpublish.py
|
calaccess_website/management/commands/updatebuildpublish.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
|
Add command for full daily build process
|
Add command for full daily build process
|
Python
|
mit
|
california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website
|
Add command for full daily build process
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
|
<commit_before><commit_msg>Add command for full daily build process<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
|
Add command for full daily build process#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
|
<commit_before><commit_msg>Add command for full daily build process<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
|
|
b8acaf64187f5626ef6755ef00d2b2a1471d4914
|
numba/tests/closures/test_closure_type_inference.py
|
numba/tests/closures/test_closure_type_inference.py
|
import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod()
|
Add closure type inference test
|
Add closure type inference test
|
Python
|
bsd-2-clause
|
pombredanne/numba,pombredanne/numba,cpcloud/numba,seibert/numba,numba/numba,gmarkall/numba,gmarkall/numba,numba/numba,jriehl/numba,stefanseefeld/numba,sklam/numba,GaZ3ll3/numba,stefanseefeld/numba,gmarkall/numba,sklam/numba,stuartarchibald/numba,gdementen/numba,ssarangi/numba,GaZ3ll3/numba,GaZ3ll3/numba,sklam/numba,stonebig/numba,numba/numba,stuartarchibald/numba,ssarangi/numba,seibert/numba,numba/numba,shiquanwang/numba,stefanseefeld/numba,pitrou/numba,jriehl/numba,ssarangi/numba,seibert/numba,ssarangi/numba,jriehl/numba,stefanseefeld/numba,cpcloud/numba,pitrou/numba,sklam/numba,gmarkall/numba,IntelLabs/numba,shiquanwang/numba,gdementen/numba,seibert/numba,pitrou/numba,ssarangi/numba,sklam/numba,stuartarchibald/numba,seibert/numba,IntelLabs/numba,cpcloud/numba,GaZ3ll3/numba,gdementen/numba,stonebig/numba,stonebig/numba,IntelLabs/numba,numba/numba,pombredanne/numba,stuartarchibald/numba,cpcloud/numba,shiquanwang/numba,gdementen/numba,pitrou/numba,stonebig/numba,gmarkall/numba,IntelLabs/numba,stuartarchibald/numba,pombredanne/numba,gdementen/numba,cpcloud/numba,jriehl/numba,GaZ3ll3/numba,stefanseefeld/numba,IntelLabs/numba,pitrou/numba,stonebig/numba,jriehl/numba,pombredanne/numba
|
Add closure type inference test
|
import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod()
|
<commit_before><commit_msg>Add closure type inference test<commit_after>
|
import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod()
|
Add closure type inference testimport numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod()
|
<commit_before><commit_msg>Add closure type inference test<commit_after>import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod()
|
|
5f503f0b9ab51ca2b1985fe88d5e84ff63b7d745
|
addplaylists.py
|
addplaylists.py
|
#!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
|
Add sample playlists for testing features.
|
Add sample playlists for testing features.
|
Python
|
agpl-3.0
|
wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site
|
Add sample playlists for testing features.
|
#!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
|
<commit_before><commit_msg>Add sample playlists for testing features.<commit_after>
|
#!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
|
Add sample playlists for testing features.#!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
|
<commit_before><commit_msg>Add sample playlists for testing features.<commit_after>#!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
|
|
4fab31eef9ad80230b36039b66c70d94456e5f9b
|
tests/monad.py
|
tests/monad.py
|
'''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
|
Add missing tests file from previous commit.
|
Add missing tests file from previous commit.
|
Python
|
bsd-3-clause
|
GrAndSE/lighty
|
Add missing tests file from previous commit.
|
'''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
|
<commit_before><commit_msg>Add missing tests file from previous commit.<commit_after>
|
'''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
|
Add missing tests file from previous commit.'''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
|
<commit_before><commit_msg>Add missing tests file from previous commit.<commit_after>'''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
|
|
1a7fa8080d19909ccf8e8e89aa19c92c1413f1c1
|
apps/pyjob_submite_jobs_again.py
|
apps/pyjob_submite_jobs_again.py
|
#!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
|
Add script to submite jobs again
|
Add script to submite jobs again
|
Python
|
mit
|
lnls-fac/job_manager
|
Add script to submite jobs again
|
#!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
|
<commit_before><commit_msg>Add script to submite jobs again<commit_after>
|
#!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
|
Add script to submite jobs again#!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
|
<commit_before><commit_msg>Add script to submite jobs again<commit_after>#!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
|
|
884ae74bb75e5a0c60da74791a2e6fad9e4b83e5
|
py/find-right-interval.py
|
py/find-right-interval.py
|
from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
|
Add py solution for 436. Find Right Interval
|
Add py solution for 436. Find Right Interval
436. Find Right Interval: https://leetcode.com/problems/find-right-interval/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 436. Find Right Interval
436. Find Right Interval: https://leetcode.com/problems/find-right-interval/
|
from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
|
<commit_before><commit_msg>Add py solution for 436. Find Right Interval
436. Find Right Interval: https://leetcode.com/problems/find-right-interval/<commit_after>
|
from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
|
Add py solution for 436. Find Right Interval
436. Find Right Interval: https://leetcode.com/problems/find-right-interval/from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
|
<commit_before><commit_msg>Add py solution for 436. Find Right Interval
436. Find Right Interval: https://leetcode.com/problems/find-right-interval/<commit_after>from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
|
|
07f8fd56ab366a2d1365278c3310ade4b1d30c57
|
heat_integrationtests/functional/test_versionnegotiation.py
|
heat_integrationtests/functional/test_versionnegotiation.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
|
Add functional test for version negotiation
|
Add functional test for version negotiation
the test attempts to make an unauthenticated request to the Heat API
root.
Change-Id: Ib14628927efe561744cda683ca4dcf27b0524e20
Story: 2002531
Task: 22077
|
Python
|
apache-2.0
|
openstack/heat,noironetworks/heat,noironetworks/heat,openstack/heat
|
Add functional test for version negotiation
the test attempts to make an unauthenticated request to the Heat API
root.
Change-Id: Ib14628927efe561744cda683ca4dcf27b0524e20
Story: 2002531
Task: 22077
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
|
<commit_before><commit_msg>Add functional test for version negotiation
the test attempts to make an unauthenticated request to the Heat API
root.
Change-Id: Ib14628927efe561744cda683ca4dcf27b0524e20
Story: 2002531
Task: 22077<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
|
Add functional test for version negotiation
the test attempts to make an unauthenticated request to the Heat API
root.
Change-Id: Ib14628927efe561744cda683ca4dcf27b0524e20
Story: 2002531
Task: 22077# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
|
<commit_before><commit_msg>Add functional test for version negotiation
the test attempts to make an unauthenticated request to the Heat API
root.
Change-Id: Ib14628927efe561744cda683ca4dcf27b0524e20
Story: 2002531
Task: 22077<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
|
|
47dff2561be481ff067c22ed98d9ea6a9cf8ae10
|
test/test_notebook.py
|
test/test_notebook.py
|
import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
|
Add test to execute notebooks
|
Add test to execute notebooks
|
Python
|
mit
|
adicu/AccessibleML,alanhdu/AccessibleML
|
Add test to execute notebooks
|
import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
|
<commit_before><commit_msg>Add test to execute notebooks<commit_after>
|
import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
|
Add test to execute notebooksimport os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
|
<commit_before><commit_msg>Add test to execute notebooks<commit_after>import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
|
|
0b8d5794d2c5a1ae46659e02b65d1c21ffe8881d
|
babyonboard/api/tests/test_views.py
|
babyonboard/api/tests/test_views.py
|
import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
Implement tests for temperature endpoint
|
Implement tests for temperature endpoint
|
Python
|
mit
|
BabyOnBoard/BabyOnBoard-API,BabyOnBoard/BabyOnBoard-API
|
Implement tests for temperature endpoint
|
import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
<commit_before><commit_msg>Implement tests for temperature endpoint<commit_after>
|
import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
Implement tests for temperature endpointimport json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
<commit_before><commit_msg>Implement tests for temperature endpoint<commit_after>import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
|
aa6837e14e520f5917cf1c452bd0c9a8ce2a27dd
|
module/others/plugins.py
|
module/others/plugins.py
|
from maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
|
Add new module for plugin loading
|
Add new module for plugin loading
|
Python
|
mit
|
minoue/miExecutor
|
Add new module for plugin loading
|
from maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
|
<commit_before><commit_msg>Add new module for plugin loading<commit_after>
|
from maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
|
Add new module for plugin loadingfrom maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
|
<commit_before><commit_msg>Add new module for plugin loading<commit_after>from maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
|
|
67350e9ac3f2dc0fceb1899c8692adcd9cdd4213
|
frappe/tests/test_boot.py
|
frappe/tests/test_boot.py
|
import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
|
Add a test case to validate `get_unseen_notes`
|
test: Add a test case to validate `get_unseen_notes`
|
Python
|
mit
|
yashodhank/frappe,StrellaGroup/frappe,frappe/frappe,yashodhank/frappe,yashodhank/frappe,yashodhank/frappe,frappe/frappe,frappe/frappe,StrellaGroup/frappe,StrellaGroup/frappe
|
test: Add a test case to validate `get_unseen_notes`
|
import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
|
<commit_before><commit_msg>test: Add a test case to validate `get_unseen_notes`<commit_after>
|
import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
|
test: Add a test case to validate `get_unseen_notes`import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
|
<commit_before><commit_msg>test: Add a test case to validate `get_unseen_notes`<commit_after>import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
|
|
f7f25876d3398cacc822faf2b16cc156e88c7fd3
|
misc/jp2_kakadu_pillow.py
|
misc/jp2_kakadu_pillow.py
|
# This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
|
Use this enough, might as well add it.
|
Use this enough, might as well add it.
|
Python
|
bsd-2-clause
|
ehenneken/loris,medusa-project/loris,rlskoeser/loris,medusa-project/loris,rlskoeser/loris,ehenneken/loris
|
Use this enough, might as well add it.
|
# This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
|
<commit_before><commit_msg>Use this enough, might as well add it.<commit_after>
|
# This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
|
Use this enough, might as well add it.# This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
|
<commit_before><commit_msg>Use this enough, might as well add it.<commit_after># This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
|
|
d5e67563f23acb11fe0e4641d48b67fe3509822f
|
apps/companyprofile/migrations/0002_auto_20151014_2132.py
|
apps/companyprofile/migrations/0002_auto_20151014_2132.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
|
Add test migration removing ref to old company image
|
Add test migration removing ref to old company image
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
Add test migration removing ref to old company image
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
|
<commit_before><commit_msg>Add test migration removing ref to old company image<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
|
Add test migration removing ref to old company image# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
|
<commit_before><commit_msg>Add test migration removing ref to old company image<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
|
|
d5b622e9fb855753630cd3a6fae1a315b4be1a08
|
examples/dominant_eigenvector_pytorch.py
|
examples/dominant_eigenvector_pytorch.py
|
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
Add example using new pytorch backend
|
Add example using new pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
Python
|
bsd-3-clause
|
nkoep/pymanopt,nkoep/pymanopt,pymanopt/pymanopt,nkoep/pymanopt,pymanopt/pymanopt
|
Add example using new pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
<commit_before><commit_msg>Add example using new pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>
|
import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
Add example using new pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
<commit_before><commit_msg>Add example using new pytorch backend
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
|
9fbde5b8dd4d2555e03bc0b7915fc4e55f8333d9
|
numba/tests/test_help.py
|
numba/tests/test_help.py
|
from __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
Add test to help module
|
Add test to help module
|
Python
|
bsd-2-clause
|
numba/numba,stonebig/numba,numba/numba,stonebig/numba,seibert/numba,stuartarchibald/numba,cpcloud/numba,sklam/numba,gmarkall/numba,stuartarchibald/numba,IntelLabs/numba,seibert/numba,IntelLabs/numba,IntelLabs/numba,gmarkall/numba,gmarkall/numba,seibert/numba,numba/numba,cpcloud/numba,gmarkall/numba,stuartarchibald/numba,stonebig/numba,sklam/numba,stonebig/numba,seibert/numba,stuartarchibald/numba,gmarkall/numba,IntelLabs/numba,cpcloud/numba,sklam/numba,numba/numba,seibert/numba,IntelLabs/numba,sklam/numba,stuartarchibald/numba,stonebig/numba,sklam/numba,cpcloud/numba,cpcloud/numba,numba/numba
|
Add test to help module
|
from __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
<commit_before><commit_msg>Add test to help module<commit_after>
|
from __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
Add test to help modulefrom __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
<commit_before><commit_msg>Add test to help module<commit_after>from __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
|
e8607fce01bfe17c08de0702c4041d98504bc159
|
reunition/apps/alumni/migrations/0006_auto_20150823_2030.py
|
reunition/apps/alumni/migrations/0006_auto_20150823_2030.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
|
Add migration for changing CONTACTED_CHOICES
|
Add migration for changing CONTACTED_CHOICES
|
Python
|
mit
|
reunition/reunition,reunition/reunition,reunition/reunition
|
Add migration for changing CONTACTED_CHOICES
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
|
<commit_before><commit_msg>Add migration for changing CONTACTED_CHOICES<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
|
Add migration for changing CONTACTED_CHOICES# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
|
<commit_before><commit_msg>Add migration for changing CONTACTED_CHOICES<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
|
|
3367f9d1e394bf686bc6bbd6316265c9feef4f03
|
test/on_yubikey/test_cli_config.py
|
test/on_yubikey/test_cli_config.py
|
from .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
|
Add basic tests for config usb
|
Add basic tests for config usb
|
Python
|
bsd-2-clause
|
Yubico/yubikey-manager,Yubico/yubikey-manager
|
Add basic tests for config usb
|
from .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
|
<commit_before><commit_msg>Add basic tests for config usb<commit_after>
|
from .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
|
Add basic tests for config usbfrom .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
|
<commit_before><commit_msg>Add basic tests for config usb<commit_after>from .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
|
|
28b5fef57580640cd78775d6c0544bc633e5958a
|
generate-key.py
|
generate-key.py
|
#!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
|
Add helper script to generate API keys.
|
Add helper script to generate API keys.
|
Python
|
bsd-3-clause
|
ZeitOnline/content-api,ZeitOnline/content-api
|
Add helper script to generate API keys.
|
#!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
|
<commit_before><commit_msg>Add helper script to generate API keys.<commit_after>
|
#!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
|
Add helper script to generate API keys.#!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
|
<commit_before><commit_msg>Add helper script to generate API keys.<commit_after>#!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
|
|
3b27b1d6b1c4739b8d456703542ec8182ce12277
|
heat/tests/functional/test_WordPress_Composed_Instances.py
|
heat/tests/functional/test_WordPress_Composed_Instances.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
|
Add a Wordpress+MySQL composed instance functional test case
|
Add a Wordpress+MySQL composed instance functional test case
Change-Id: I6a905b186be59c929e530519414e46d222b4ea08
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>
|
Python
|
apache-2.0
|
gonzolino/heat,noironetworks/heat,steveb/heat,cwolferh/heat-scratch,noironetworks/heat,gonzolino/heat,maestro-hybrid-cloud/heat,citrix-openstack-build/heat,pshchelo/heat,cryptickp/heat,dragorosson/heat,rh-s/heat,Triv90/Heat,steveb/heat,rh-s/heat,srznew/heat,srznew/heat,openstack/heat,redhat-openstack/heat,rickerc/heat_audit,jasondunsmore/heat,maestro-hybrid-cloud/heat,redhat-openstack/heat,pratikmallya/heat,rdo-management/heat,varunarya10/heat,pshchelo/heat,citrix-openstack-build/heat,rdo-management/heat,Triv90/Heat,dims/heat,takeshineshiro/heat,miguelgrinberg/heat,Triv90/Heat,rickerc/heat_audit,JioCloud/heat,miguelgrinberg/heat,NeCTAR-RC/heat,dragorosson/heat,ntt-sic/heat,cwolferh/heat-scratch,JioCloud/heat,openstack/heat,NeCTAR-RC/heat,dims/heat,jasondunsmore/heat,takeshineshiro/heat,cryptickp/heat,pratikmallya/heat,varunarya10/heat,ntt-sic/heat
|
Add a Wordpress+MySQL composed instance functional test case
Change-Id: I6a905b186be59c929e530519414e46d222b4ea08
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
|
<commit_before><commit_msg>Add a Wordpress+MySQL composed instance functional test case
Change-Id: I6a905b186be59c929e530519414e46d222b4ea08
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
|
Add a Wordpress+MySQL composed instance functional test case
Change-Id: I6a905b186be59c929e530519414e46d222b4ea08
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
|
<commit_before><commit_msg>Add a Wordpress+MySQL composed instance functional test case
Change-Id: I6a905b186be59c929e530519414e46d222b4ea08
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
|
|
3bd95d8789871246fb90c6eb0487d9746ef5cb27
|
bluebottle/cms/migrations/0056_auto_20191106_1041.py
|
bluebottle/cms/migrations/0056_auto_20191106_1041.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
|
Migrate all project contents blocks to activity contents blocks
|
Migrate all project contents blocks to activity contents blocks
BB-15606 #resolve
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Migrate all project contents blocks to activity contents blocks
BB-15606 #resolve
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
|
<commit_before><commit_msg>Migrate all project contents blocks to activity contents blocks
BB-15606 #resolve<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
|
Migrate all project contents blocks to activity contents blocks
BB-15606 #resolve# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
|
<commit_before><commit_msg>Migrate all project contents blocks to activity contents blocks
BB-15606 #resolve<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
|
|
06570a926bde2ea10730062b05a2348c3020745c
|
examples/filter_ensemble_average.py
|
examples/filter_ensemble_average.py
|
import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
|
Add example: filtered ensemble average.
|
Add example: filtered ensemble average.
|
Python
|
mit
|
cfe316/atomic,ezekial4/atomic_neu,ezekial4/atomic_neu
|
Add example: filtered ensemble average.
|
import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
|
<commit_before><commit_msg>Add example: filtered ensemble average.<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
|
Add example: filtered ensemble average.import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
|
<commit_before><commit_msg>Add example: filtered ensemble average.<commit_after>import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
|
|
38b4ec7164f07af7135c41c401c4f403c1061d66
|
app/main.py
|
app/main.py
|
"""lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
|
Add skeleton for parsing commands
|
Add skeleton for parsing commands
|
Python
|
mit
|
Zillolo/lazy-todo
|
Add skeleton for parsing commands
|
"""lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
|
<commit_before><commit_msg>Add skeleton for parsing commands<commit_after>
|
"""lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
|
Add skeleton for parsing commands"""lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
|
<commit_before><commit_msg>Add skeleton for parsing commands<commit_after>"""lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
|
|
3ed9dd0ca03216311771cda5f9cd3eb954a14d4f
|
telemeta/management/commands/telemeta-test-boilerplate.py
|
telemeta/management/commands/telemeta-test-boilerplate.py
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
|
Add boilerplate with simple test sounds
|
Add boilerplate with simple test sounds
|
Python
|
agpl-3.0
|
Parisson/Telemeta,Parisson/Telemeta,ANR-kamoulox/Telemeta,ANR-kamoulox/Telemeta,Parisson/Telemeta,ANR-kamoulox/Telemeta,ANR-kamoulox/Telemeta,Parisson/Telemeta
|
Add boilerplate with simple test sounds
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
|
<commit_before><commit_msg>Add boilerplate with simple test sounds<commit_after>
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
|
Add boilerplate with simple test soundsfrom optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
|
<commit_before><commit_msg>Add boilerplate with simple test sounds<commit_after>from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
|
|
3e0ababfeb0e22d33853d4bad68a29a0249e1a60
|
other/iterate_deadlock.py
|
other/iterate_deadlock.py
|
"""
Demonstrates deadlock related to attribute iteration.
"""
from threading import Thread
import h5py
FNAME = "deadlock.hdf5"
def make_file():
with h5py.File(FNAME,'w') as f:
for idx in xrange(1000):
f.attrs['%d'%idx] = 1
def list_attributes():
with h5py.File(FNAME, 'r') as f:
names = list(f.attrs)
if __name__ == '__main__':
make_file()
thread = Thread(target=list_attributes)
thread.start()
list_attributes()
thread.join()
|
Add script demonstrating thread deadlock
|
Add script demonstrating thread deadlock
|
Python
|
bsd-3-clause
|
h5py/h5py,h5py/h5py,h5py/h5py
|
Add script demonstrating thread deadlock
|
"""
Demonstrates deadlock related to attribute iteration.
"""
from threading import Thread
import h5py
FNAME = "deadlock.hdf5"
def make_file():
with h5py.File(FNAME,'w') as f:
for idx in xrange(1000):
f.attrs['%d'%idx] = 1
def list_attributes():
with h5py.File(FNAME, 'r') as f:
names = list(f.attrs)
if __name__ == '__main__':
make_file()
thread = Thread(target=list_attributes)
thread.start()
list_attributes()
thread.join()
|
<commit_before><commit_msg>Add script demonstrating thread deadlock<commit_after>
|
"""
Demonstrates deadlock related to attribute iteration.
"""
from threading import Thread
import h5py
FNAME = "deadlock.hdf5"
def make_file():
with h5py.File(FNAME,'w') as f:
for idx in xrange(1000):
f.attrs['%d'%idx] = 1
def list_attributes():
with h5py.File(FNAME, 'r') as f:
names = list(f.attrs)
if __name__ == '__main__':
make_file()
thread = Thread(target=list_attributes)
thread.start()
list_attributes()
thread.join()
|
Add script demonstrating thread deadlock
"""
Demonstrates deadlock related to attribute iteration.
"""
from threading import Thread
import h5py
FNAME = "deadlock.hdf5"
def make_file():
with h5py.File(FNAME,'w') as f:
for idx in xrange(1000):
f.attrs['%d'%idx] = 1
def list_attributes():
with h5py.File(FNAME, 'r') as f:
names = list(f.attrs)
if __name__ == '__main__':
make_file()
thread = Thread(target=list_attributes)
thread.start()
list_attributes()
thread.join()
|
<commit_before><commit_msg>Add script demonstrating thread deadlock<commit_after>
"""
Demonstrates deadlock related to attribute iteration.
"""
from threading import Thread
import h5py
FNAME = "deadlock.hdf5"
def make_file():
with h5py.File(FNAME,'w') as f:
for idx in xrange(1000):
f.attrs['%d'%idx] = 1
def list_attributes():
with h5py.File(FNAME, 'r') as f:
names = list(f.attrs)
if __name__ == '__main__':
make_file()
thread = Thread(target=list_attributes)
thread.start()
list_attributes()
thread.join()
|
|
b1517f63c3aa549170d77c6fb3546901fdbe744b
|
candidates/migrations/0017_remove_cv_and_program_fields.py
|
candidates/migrations/0017_remove_cv_and_program_fields.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0016_migrate_data_to_extra_fields'),
]
operations = [
migrations.RemoveField(
model_name='personextra',
name='cv',
),
migrations.RemoveField(
model_name='personextra',
name='program',
),
]
|
Remove the hard-coded extra 'cv' and 'program' fields
|
Remove the hard-coded extra 'cv' and 'program' fields
|
Python
|
agpl-3.0
|
datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit
|
Remove the hard-coded extra 'cv' and 'program' fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0016_migrate_data_to_extra_fields'),
]
operations = [
migrations.RemoveField(
model_name='personextra',
name='cv',
),
migrations.RemoveField(
model_name='personextra',
name='program',
),
]
|
<commit_before><commit_msg>Remove the hard-coded extra 'cv' and 'program' fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0016_migrate_data_to_extra_fields'),
]
operations = [
migrations.RemoveField(
model_name='personextra',
name='cv',
),
migrations.RemoveField(
model_name='personextra',
name='program',
),
]
|
Remove the hard-coded extra 'cv' and 'program' fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0016_migrate_data_to_extra_fields'),
]
operations = [
migrations.RemoveField(
model_name='personextra',
name='cv',
),
migrations.RemoveField(
model_name='personextra',
name='program',
),
]
|
<commit_before><commit_msg>Remove the hard-coded extra 'cv' and 'program' fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('candidates', '0016_migrate_data_to_extra_fields'),
]
operations = [
migrations.RemoveField(
model_name='personextra',
name='cv',
),
migrations.RemoveField(
model_name='personextra',
name='program',
),
]
|
|
72738366fa074b457021faab0c21c3b89070b5ad
|
nautilus/wizbit-extension.py
|
nautilus/wizbit-extension.py
|
from urlparse import urlparse
from os.path import exists, split, isdir
import nautilus
from lxml import etree
WIZ_CONTROLLED = "wiz-controlled"
WIZ_CONFLICT = "wiz-conflict"
YES = "Yes"
NO = "No"
class WizbitExtension(nautilus.ColumnProvider, nautilus.InfoProvider):
def __init__(self):
pass
def get_columns(self):
return [nautilus.Column("NautilusWizbit::is_controlled",
WIZ_CONTROLLED,
"Wizbit Controlled",
"File may be syncronized by Wizbit"),
nautilus.Column("NautilusWizbit::has_conflict",
WIZ_CONFLICT,
"Wizbit Conflict",
"File may have multiple versions that need to be resolved")]
def update_file_info(self, file):
controlled = False
conflict = False
(scheme, netloc, path, params, query, fragment) = urlparse(file.get_uri())
if scheme != 'file':
return
wizpath = self.get_wizpath(path)
if wizpath:
if isdir(path):
controlled = True
else:
try:
repos = etree.parse (wizpath + "/.wizbit/repos")
except IOError:
pass
else:
#Find if file is controlled
files = [f.text for f in repos.getroot().xpath("/wizbit/repo/file")]
(path, filename) = split(path)
if filename in files:
controlled = True
#Find if file is conflicting
repel = repos.getroot().xpath("/wizbit/repo")
for r in repel:
if r.get("name") == filename + ".git":
heads = [h for h in r if h.tag == "head"]
if len(heads) > 1:
conflict = True
if controlled:
file.add_emblem("cvs-controlled")
file.add_string_attribute(WIZ_CONTROLLED, YES)
else:
file.add_string_attribute(WIZ_CONTROLLED, NO)
if conflict:
file.add_emblem("cvs-conflict")
file.add_string_attribute(WIZ_CONFLICT, YES)
else:
file.add_string_attribute(WIZ_CONFLICT, NO)
def get_wizpath(self, path):
if exists(path + "/.wizbit/repos"):
return path
else:
(head, tail) = split(path)
if head != '/':
return self.get_wizpath(head)
else:
if exists("/.wizbit/repos"):
return head
else:
return ""
|
Add first revision of Nautilus extension.
|
Add first revision of Nautilus extension.
Features: Adds file properties for wizbit controlled
and wizbit conflicting. Adds emblems to the file for
these states.
|
Python
|
lgpl-2.1
|
wizbit-archive/wizbit,wizbit-archive/wizbit
|
Add first revision of Nautilus extension.
Features: Adds file properties for wizbit controlled
and wizbit conflicting. Adds emblems to the file for
these states.
|
from urlparse import urlparse
from os.path import exists, split, isdir
import nautilus
from lxml import etree
WIZ_CONTROLLED = "wiz-controlled"
WIZ_CONFLICT = "wiz-conflict"
YES = "Yes"
NO = "No"
class WizbitExtension(nautilus.ColumnProvider, nautilus.InfoProvider):
def __init__(self):
pass
def get_columns(self):
return [nautilus.Column("NautilusWizbit::is_controlled",
WIZ_CONTROLLED,
"Wizbit Controlled",
"File may be syncronized by Wizbit"),
nautilus.Column("NautilusWizbit::has_conflict",
WIZ_CONFLICT,
"Wizbit Conflict",
"File may have multiple versions that need to be resolved")]
def update_file_info(self, file):
controlled = False
conflict = False
(scheme, netloc, path, params, query, fragment) = urlparse(file.get_uri())
if scheme != 'file':
return
wizpath = self.get_wizpath(path)
if wizpath:
if isdir(path):
controlled = True
else:
try:
repos = etree.parse (wizpath + "/.wizbit/repos")
except IOError:
pass
else:
#Find if file is controlled
files = [f.text for f in repos.getroot().xpath("/wizbit/repo/file")]
(path, filename) = split(path)
if filename in files:
controlled = True
#Find if file is conflicting
repel = repos.getroot().xpath("/wizbit/repo")
for r in repel:
if r.get("name") == filename + ".git":
heads = [h for h in r if h.tag == "head"]
if len(heads) > 1:
conflict = True
if controlled:
file.add_emblem("cvs-controlled")
file.add_string_attribute(WIZ_CONTROLLED, YES)
else:
file.add_string_attribute(WIZ_CONTROLLED, NO)
if conflict:
file.add_emblem("cvs-conflict")
file.add_string_attribute(WIZ_CONFLICT, YES)
else:
file.add_string_attribute(WIZ_CONFLICT, NO)
def get_wizpath(self, path):
if exists(path + "/.wizbit/repos"):
return path
else:
(head, tail) = split(path)
if head != '/':
return self.get_wizpath(head)
else:
if exists("/.wizbit/repos"):
return head
else:
return ""
|
<commit_before><commit_msg>Add first revision of Nautilus extension.
Features: Adds file properties for wizbit controlled
and wizbit conflicting. Adds emblems to the file for
these states.<commit_after>
|
from urlparse import urlparse
from os.path import exists, split, isdir
import nautilus
from lxml import etree
WIZ_CONTROLLED = "wiz-controlled"
WIZ_CONFLICT = "wiz-conflict"
YES = "Yes"
NO = "No"
class WizbitExtension(nautilus.ColumnProvider, nautilus.InfoProvider):
def __init__(self):
pass
def get_columns(self):
return [nautilus.Column("NautilusWizbit::is_controlled",
WIZ_CONTROLLED,
"Wizbit Controlled",
"File may be syncronized by Wizbit"),
nautilus.Column("NautilusWizbit::has_conflict",
WIZ_CONFLICT,
"Wizbit Conflict",
"File may have multiple versions that need to be resolved")]
def update_file_info(self, file):
controlled = False
conflict = False
(scheme, netloc, path, params, query, fragment) = urlparse(file.get_uri())
if scheme != 'file':
return
wizpath = self.get_wizpath(path)
if wizpath:
if isdir(path):
controlled = True
else:
try:
repos = etree.parse (wizpath + "/.wizbit/repos")
except IOError:
pass
else:
#Find if file is controlled
files = [f.text for f in repos.getroot().xpath("/wizbit/repo/file")]
(path, filename) = split(path)
if filename in files:
controlled = True
#Find if file is conflicting
repel = repos.getroot().xpath("/wizbit/repo")
for r in repel:
if r.get("name") == filename + ".git":
heads = [h for h in r if h.tag == "head"]
if len(heads) > 1:
conflict = True
if controlled:
file.add_emblem("cvs-controlled")
file.add_string_attribute(WIZ_CONTROLLED, YES)
else:
file.add_string_attribute(WIZ_CONTROLLED, NO)
if conflict:
file.add_emblem("cvs-conflict")
file.add_string_attribute(WIZ_CONFLICT, YES)
else:
file.add_string_attribute(WIZ_CONFLICT, NO)
def get_wizpath(self, path):
if exists(path + "/.wizbit/repos"):
return path
else:
(head, tail) = split(path)
if head != '/':
return self.get_wizpath(head)
else:
if exists("/.wizbit/repos"):
return head
else:
return ""
|
Add first revision of Nautilus extension.
Features: Adds file properties for wizbit controlled
and wizbit conflicting. Adds emblems to the file for
these states.from urlparse import urlparse
from os.path import exists, split, isdir
import nautilus
from lxml import etree
WIZ_CONTROLLED = "wiz-controlled"
WIZ_CONFLICT = "wiz-conflict"
YES = "Yes"
NO = "No"
class WizbitExtension(nautilus.ColumnProvider, nautilus.InfoProvider):
def __init__(self):
pass
def get_columns(self):
return [nautilus.Column("NautilusWizbit::is_controlled",
WIZ_CONTROLLED,
"Wizbit Controlled",
"File may be syncronized by Wizbit"),
nautilus.Column("NautilusWizbit::has_conflict",
WIZ_CONFLICT,
"Wizbit Conflict",
"File may have multiple versions that need to be resolved")]
def update_file_info(self, file):
controlled = False
conflict = False
(scheme, netloc, path, params, query, fragment) = urlparse(file.get_uri())
if scheme != 'file':
return
wizpath = self.get_wizpath(path)
if wizpath:
if isdir(path):
controlled = True
else:
try:
repos = etree.parse (wizpath + "/.wizbit/repos")
except IOError:
pass
else:
#Find if file is controlled
files = [f.text for f in repos.getroot().xpath("/wizbit/repo/file")]
(path, filename) = split(path)
if filename in files:
controlled = True
#Find if file is conflicting
repel = repos.getroot().xpath("/wizbit/repo")
for r in repel:
if r.get("name") == filename + ".git":
heads = [h for h in r if h.tag == "head"]
if len(heads) > 1:
conflict = True
if controlled:
file.add_emblem("cvs-controlled")
file.add_string_attribute(WIZ_CONTROLLED, YES)
else:
file.add_string_attribute(WIZ_CONTROLLED, NO)
if conflict:
file.add_emblem("cvs-conflict")
file.add_string_attribute(WIZ_CONFLICT, YES)
else:
file.add_string_attribute(WIZ_CONFLICT, NO)
def get_wizpath(self, path):
if exists(path + "/.wizbit/repos"):
return path
else:
(head, tail) = split(path)
if head != '/':
return self.get_wizpath(head)
else:
if exists("/.wizbit/repos"):
return head
else:
return ""
|
<commit_before><commit_msg>Add first revision of Nautilus extension.
Features: Adds file properties for wizbit controlled
and wizbit conflicting. Adds emblems to the file for
these states.<commit_after>from urlparse import urlparse
from os.path import exists, split, isdir
import nautilus
from lxml import etree
WIZ_CONTROLLED = "wiz-controlled"
WIZ_CONFLICT = "wiz-conflict"
YES = "Yes"
NO = "No"
class WizbitExtension(nautilus.ColumnProvider, nautilus.InfoProvider):
def __init__(self):
pass
def get_columns(self):
return [nautilus.Column("NautilusWizbit::is_controlled",
WIZ_CONTROLLED,
"Wizbit Controlled",
"File may be syncronized by Wizbit"),
nautilus.Column("NautilusWizbit::has_conflict",
WIZ_CONFLICT,
"Wizbit Conflict",
"File may have multiple versions that need to be resolved")]
def update_file_info(self, file):
controlled = False
conflict = False
(scheme, netloc, path, params, query, fragment) = urlparse(file.get_uri())
if scheme != 'file':
return
wizpath = self.get_wizpath(path)
if wizpath:
if isdir(path):
controlled = True
else:
try:
repos = etree.parse (wizpath + "/.wizbit/repos")
except IOError:
pass
else:
#Find if file is controlled
files = [f.text for f in repos.getroot().xpath("/wizbit/repo/file")]
(path, filename) = split(path)
if filename in files:
controlled = True
#Find if file is conflicting
repel = repos.getroot().xpath("/wizbit/repo")
for r in repel:
if r.get("name") == filename + ".git":
heads = [h for h in r if h.tag == "head"]
if len(heads) > 1:
conflict = True
if controlled:
file.add_emblem("cvs-controlled")
file.add_string_attribute(WIZ_CONTROLLED, YES)
else:
file.add_string_attribute(WIZ_CONTROLLED, NO)
if conflict:
file.add_emblem("cvs-conflict")
file.add_string_attribute(WIZ_CONFLICT, YES)
else:
file.add_string_attribute(WIZ_CONFLICT, NO)
def get_wizpath(self, path):
if exists(path + "/.wizbit/repos"):
return path
else:
(head, tail) = split(path)
if head != '/':
return self.get_wizpath(head)
else:
if exists("/.wizbit/repos"):
return head
else:
return ""
|
|
24cf3c2676e4ea7342e95e6a37857c6fa687865e
|
src/submission/migrations/0058_auto_20210812_1254.py
|
src/submission/migrations/0058_auto_20210812_1254.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-12 12:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submission', '0057_merge_20210811_1506'),
]
operations = [
migrations.AlterModelManagers(
name='article',
managers=[
],
),
]
|
Remove managers for article obj.
|
Remove managers for article obj.
|
Python
|
agpl-3.0
|
BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway
|
Remove managers for article obj.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-12 12:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submission', '0057_merge_20210811_1506'),
]
operations = [
migrations.AlterModelManagers(
name='article',
managers=[
],
),
]
|
<commit_before><commit_msg>Remove managers for article obj.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-12 12:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submission', '0057_merge_20210811_1506'),
]
operations = [
migrations.AlterModelManagers(
name='article',
managers=[
],
),
]
|
Remove managers for article obj.# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-12 12:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submission', '0057_merge_20210811_1506'),
]
operations = [
migrations.AlterModelManagers(
name='article',
managers=[
],
),
]
|
<commit_before><commit_msg>Remove managers for article obj.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-12 12:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('submission', '0057_merge_20210811_1506'),
]
operations = [
migrations.AlterModelManagers(
name='article',
managers=[
],
),
]
|
|
68a7f9faf1933bb224113d9fa5d0ddd362b2e5ea
|
SizeDocGenerator.py
|
SizeDocGenerator.py
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
Python
|
bsd-3-clause
|
computerline1z/win-exec-calc-shellcode,computerline1z/win-exec-calc-shellcode,ohio813/win-exec-calc-shellcode,ohio813/win-exec-calc-shellcode
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
<commit_before><commit_msg>Add script to generate the site documentation containing the sizes of the binary shellcodes.<commit_after>
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
<commit_before><commit_msg>Add script to generate the site documentation containing the sizes of the binary shellcodes.<commit_after>import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
|
bbc208548f0dd381f3045d24db3c21c4c8ee004e
|
grovepi/scan.py
|
grovepi/scan.py
|
import time
import grove_i2c_temp_hum_mini # temp + humidity
import hp206c # altitude + temp + pressure
import grovepi # used by air sensor and dust sensor
import atexit # used for the dust sensor
import json
# Initialize the sensors
t= grove_i2c_temp_hum_mini.th02()
h= hp206c.hp206c()
grovepi.dust_sensor_en()
air_sensor = 0
grovepi.pinMode(air_sensor,"INPUT")
atexit.register(grovepi.dust_sensor_dis)
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print "HP20x_dev is available."
else:
print "HP20x_dev isn't available."
while True:
temp = h.ReadTemperature()
temp2 = t.getTemperature()
pressure = h.ReadPressure()
altitude = h.ReadAltitude()
humidity = t.getHumidity()
air_quality = "--"
# try:
# # Get dust
# [new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
# if new_val:
# print lowpulseoccupancy
# except IOError:
# print ("Error")
try:
# Get air quality
air_quality = grovepi.analogRead(air_sensor)
if air_quality > 700:
print ("High pollution")
elif air_quality > 300:
print ("Low pollution")
else:
print ("Air fresh")
print ("air_quality =", air_quality)
except IOError:
print ("Error")
# Send result
data = {
"air_quality": air_quality,
"humidity": humidity,
"temperature": (temp + temp2) / 2,
"pressure": pressure,
"altitude": altitude
}
print json.dumps(data)
# with open('./json/hsk1.json', 'wb') as f:
# f.write(json.dumps(voc))
time.sleep(.5)
|
Test all sensors at once
|
Test all sensors at once
|
Python
|
mit
|
mmewen/UTSEUS-Binky,mmewen/UTSEUS-Binky,mmewen/UTSEUS-Binky,mmewen/UTSEUS-Binky,mmewen/UTSEUS-Binky
|
Test all sensors at once
|
import time
import grove_i2c_temp_hum_mini # temp + humidity
import hp206c # altitude + temp + pressure
import grovepi # used by air sensor and dust sensor
import atexit # used for the dust sensor
import json
# Initialize the sensors
t= grove_i2c_temp_hum_mini.th02()
h= hp206c.hp206c()
grovepi.dust_sensor_en()
air_sensor = 0
grovepi.pinMode(air_sensor,"INPUT")
atexit.register(grovepi.dust_sensor_dis)
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print "HP20x_dev is available."
else:
print "HP20x_dev isn't available."
while True:
temp = h.ReadTemperature()
temp2 = t.getTemperature()
pressure = h.ReadPressure()
altitude = h.ReadAltitude()
humidity = t.getHumidity()
air_quality = "--"
# try:
# # Get dust
# [new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
# if new_val:
# print lowpulseoccupancy
# except IOError:
# print ("Error")
try:
# Get air quality
air_quality = grovepi.analogRead(air_sensor)
if air_quality > 700:
print ("High pollution")
elif air_quality > 300:
print ("Low pollution")
else:
print ("Air fresh")
print ("air_quality =", air_quality)
except IOError:
print ("Error")
# Send result
data = {
"air_quality": air_quality,
"humidity": humidity,
"temperature": (temp + temp2) / 2,
"pressure": pressure,
"altitude": altitude
}
print json.dumps(data)
# with open('./json/hsk1.json', 'wb') as f:
# f.write(json.dumps(voc))
time.sleep(.5)
|
<commit_before><commit_msg>Test all sensors at once<commit_after>
|
import time
import grove_i2c_temp_hum_mini # temp + humidity
import hp206c # altitude + temp + pressure
import grovepi # used by air sensor and dust sensor
import atexit # used for the dust sensor
import json
# Initialize the sensors
t= grove_i2c_temp_hum_mini.th02()
h= hp206c.hp206c()
grovepi.dust_sensor_en()
air_sensor = 0
grovepi.pinMode(air_sensor,"INPUT")
atexit.register(grovepi.dust_sensor_dis)
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print "HP20x_dev is available."
else:
print "HP20x_dev isn't available."
while True:
temp = h.ReadTemperature()
temp2 = t.getTemperature()
pressure = h.ReadPressure()
altitude = h.ReadAltitude()
humidity = t.getHumidity()
air_quality = "--"
# try:
# # Get dust
# [new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
# if new_val:
# print lowpulseoccupancy
# except IOError:
# print ("Error")
try:
# Get air quality
air_quality = grovepi.analogRead(air_sensor)
if air_quality > 700:
print ("High pollution")
elif air_quality > 300:
print ("Low pollution")
else:
print ("Air fresh")
print ("air_quality =", air_quality)
except IOError:
print ("Error")
# Send result
data = {
"air_quality": air_quality,
"humidity": humidity,
"temperature": (temp + temp2) / 2,
"pressure": pressure,
"altitude": altitude
}
print json.dumps(data)
# with open('./json/hsk1.json', 'wb') as f:
# f.write(json.dumps(voc))
time.sleep(.5)
|
Test all sensors at onceimport time
import grove_i2c_temp_hum_mini # temp + humidity
import hp206c # altitude + temp + pressure
import grovepi # used by air sensor and dust sensor
import atexit # used for the dust sensor
import json
# Initialize the sensors
t= grove_i2c_temp_hum_mini.th02()
h= hp206c.hp206c()
grovepi.dust_sensor_en()
air_sensor = 0
grovepi.pinMode(air_sensor,"INPUT")
atexit.register(grovepi.dust_sensor_dis)
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print "HP20x_dev is available."
else:
print "HP20x_dev isn't available."
while True:
temp = h.ReadTemperature()
temp2 = t.getTemperature()
pressure = h.ReadPressure()
altitude = h.ReadAltitude()
humidity = t.getHumidity()
air_quality = "--"
# try:
# # Get dust
# [new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
# if new_val:
# print lowpulseoccupancy
# except IOError:
# print ("Error")
try:
# Get air quality
air_quality = grovepi.analogRead(air_sensor)
if air_quality > 700:
print ("High pollution")
elif air_quality > 300:
print ("Low pollution")
else:
print ("Air fresh")
print ("air_quality =", air_quality)
except IOError:
print ("Error")
# Send result
data = {
"air_quality": air_quality,
"humidity": humidity,
"temperature": (temp + temp2) / 2,
"pressure": pressure,
"altitude": altitude
}
print json.dumps(data)
# with open('./json/hsk1.json', 'wb') as f:
# f.write(json.dumps(voc))
time.sleep(.5)
|
<commit_before><commit_msg>Test all sensors at once<commit_after>import time
import grove_i2c_temp_hum_mini # temp + humidity
import hp206c # altitude + temp + pressure
import grovepi # used by air sensor and dust sensor
import atexit # used for the dust sensor
import json
# Initialize the sensors
t= grove_i2c_temp_hum_mini.th02()
h= hp206c.hp206c()
grovepi.dust_sensor_en()
air_sensor = 0
grovepi.pinMode(air_sensor,"INPUT")
atexit.register(grovepi.dust_sensor_dis)
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print "HP20x_dev is available."
else:
print "HP20x_dev isn't available."
while True:
temp = h.ReadTemperature()
temp2 = t.getTemperature()
pressure = h.ReadPressure()
altitude = h.ReadAltitude()
humidity = t.getHumidity()
air_quality = "--"
# try:
# # Get dust
# [new_val,lowpulseoccupancy] = grovepi.dustSensorRead()
# if new_val:
# print lowpulseoccupancy
# except IOError:
# print ("Error")
try:
# Get air quality
air_quality = grovepi.analogRead(air_sensor)
if air_quality > 700:
print ("High pollution")
elif air_quality > 300:
print ("Low pollution")
else:
print ("Air fresh")
print ("air_quality =", air_quality)
except IOError:
print ("Error")
# Send result
data = {
"air_quality": air_quality,
"humidity": humidity,
"temperature": (temp + temp2) / 2,
"pressure": pressure,
"altitude": altitude
}
print json.dumps(data)
# with open('./json/hsk1.json', 'wb') as f:
# f.write(json.dumps(voc))
time.sleep(.5)
|
|
c834082c59abe6ae6d2e065e1a5afac2d399a612
|
lib/bridgedb/test/test_crypto.py
|
lib/bridgedb/test/test_crypto.py
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for :mod:`bridgedb.crypto`."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from twisted.trial import unittest
from bridgedb import crypto
SEKRIT_KEY = b'v\x16Xm\xfc\x1b}\x063\x85\xaa\xa5\xf9\xad\x18\xb2P\x93\xc6k\xf9'
SEKRIT_KEY += b'\x8bI\xd9\xb8xw\xf5\xec\x1b\x7f\xa8'
class CryptoTest(unittest.TestCase):
def test_getKey_nokey(self):
"""Test retrieving the secret_key from an empty file."""
filename = os.path.join(os.getcwd(), 'sekrit')
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self):
"""Test retrieving the secret_key from a new tmpfile."""
filename = self.mktemp()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_keyexists(self):
"""Write the example key to a file and test reading it back."""
filename = self.mktemp()
with open(filename, 'wb') as fh:
fh.write(SEKRIT_KEY)
fh.flush()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
self.assertEqual(SEKRIT_KEY, key,
"""The example key and the one read from file differ!
key (in hex): %s
SEKRIT_KEY (in hex): %s"""
% (key.encode('hex'), SEKRIT_KEY.encode('hex')))
|
Add unittests for the bridgedb.crypto module.
|
Add unittests for the bridgedb.crypto module.
|
Python
|
bsd-3-clause
|
mmaker/bridgedb,pagea/bridgedb,mmaker/bridgedb,wfn/bridgedb,pagea/bridgedb,wfn/bridgedb
|
Add unittests for the bridgedb.crypto module.
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for :mod:`bridgedb.crypto`."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from twisted.trial import unittest
from bridgedb import crypto
SEKRIT_KEY = b'v\x16Xm\xfc\x1b}\x063\x85\xaa\xa5\xf9\xad\x18\xb2P\x93\xc6k\xf9'
SEKRIT_KEY += b'\x8bI\xd9\xb8xw\xf5\xec\x1b\x7f\xa8'
class CryptoTest(unittest.TestCase):
def test_getKey_nokey(self):
"""Test retrieving the secret_key from an empty file."""
filename = os.path.join(os.getcwd(), 'sekrit')
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self):
"""Test retrieving the secret_key from a new tmpfile."""
filename = self.mktemp()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_keyexists(self):
"""Write the example key to a file and test reading it back."""
filename = self.mktemp()
with open(filename, 'wb') as fh:
fh.write(SEKRIT_KEY)
fh.flush()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
self.assertEqual(SEKRIT_KEY, key,
"""The example key and the one read from file differ!
key (in hex): %s
SEKRIT_KEY (in hex): %s"""
% (key.encode('hex'), SEKRIT_KEY.encode('hex')))
|
<commit_before><commit_msg>Add unittests for the bridgedb.crypto module.<commit_after>
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for :mod:`bridgedb.crypto`."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from twisted.trial import unittest
from bridgedb import crypto
SEKRIT_KEY = b'v\x16Xm\xfc\x1b}\x063\x85\xaa\xa5\xf9\xad\x18\xb2P\x93\xc6k\xf9'
SEKRIT_KEY += b'\x8bI\xd9\xb8xw\xf5\xec\x1b\x7f\xa8'
class CryptoTest(unittest.TestCase):
def test_getKey_nokey(self):
"""Test retrieving the secret_key from an empty file."""
filename = os.path.join(os.getcwd(), 'sekrit')
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self):
"""Test retrieving the secret_key from a new tmpfile."""
filename = self.mktemp()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_keyexists(self):
"""Write the example key to a file and test reading it back."""
filename = self.mktemp()
with open(filename, 'wb') as fh:
fh.write(SEKRIT_KEY)
fh.flush()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
self.assertEqual(SEKRIT_KEY, key,
"""The example key and the one read from file differ!
key (in hex): %s
SEKRIT_KEY (in hex): %s"""
% (key.encode('hex'), SEKRIT_KEY.encode('hex')))
|
Add unittests for the bridgedb.crypto module.# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for :mod:`bridgedb.crypto`."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from twisted.trial import unittest
from bridgedb import crypto
SEKRIT_KEY = b'v\x16Xm\xfc\x1b}\x063\x85\xaa\xa5\xf9\xad\x18\xb2P\x93\xc6k\xf9'
SEKRIT_KEY += b'\x8bI\xd9\xb8xw\xf5\xec\x1b\x7f\xa8'
class CryptoTest(unittest.TestCase):
def test_getKey_nokey(self):
"""Test retrieving the secret_key from an empty file."""
filename = os.path.join(os.getcwd(), 'sekrit')
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self):
"""Test retrieving the secret_key from a new tmpfile."""
filename = self.mktemp()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_keyexists(self):
"""Write the example key to a file and test reading it back."""
filename = self.mktemp()
with open(filename, 'wb') as fh:
fh.write(SEKRIT_KEY)
fh.flush()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
self.assertEqual(SEKRIT_KEY, key,
"""The example key and the one read from file differ!
key (in hex): %s
SEKRIT_KEY (in hex): %s"""
% (key.encode('hex'), SEKRIT_KEY.encode('hex')))
|
<commit_before><commit_msg>Add unittests for the bridgedb.crypto module.<commit_after># -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for :mod:`bridgedb.crypto`."""
from __future__ import print_function
from __future__ import unicode_literals
import os
from twisted.trial import unittest
from bridgedb import crypto
SEKRIT_KEY = b'v\x16Xm\xfc\x1b}\x063\x85\xaa\xa5\xf9\xad\x18\xb2P\x93\xc6k\xf9'
SEKRIT_KEY += b'\x8bI\xd9\xb8xw\xf5\xec\x1b\x7f\xa8'
class CryptoTest(unittest.TestCase):
def test_getKey_nokey(self):
"""Test retrieving the secret_key from an empty file."""
filename = os.path.join(os.getcwd(), 'sekrit')
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_tmpfile(self):
"""Test retrieving the secret_key from a new tmpfile."""
filename = self.mktemp()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
def test_getKey_keyexists(self):
"""Write the example key to a file and test reading it back."""
filename = self.mktemp()
with open(filename, 'wb') as fh:
fh.write(SEKRIT_KEY)
fh.flush()
key = crypto.getKey(filename)
self.failUnlessIsInstance(key, basestring,
"key isn't a string! type=%r" % type(key))
self.assertEqual(SEKRIT_KEY, key,
"""The example key and the one read from file differ!
key (in hex): %s
SEKRIT_KEY (in hex): %s"""
% (key.encode('hex'), SEKRIT_KEY.encode('hex')))
|
|
955bca3beb7808636a586bed43c37e5f74fba17f
|
kino/functions/weather.py
|
kino/functions/weather.py
|
# -*- coding: utf-8 -*-
import datetime
import forecastio
from geopy.geocoders import GoogleV3
from kino.template import MsgTemplate
from slack.slackbot import SlackerAdapter
from utils.config import Config
class Weather(object):
def __init__(self):
self.config = Config()
self.slackbot = SlackerAdapter()
self.template = MsgTemplate()
geolocator = GoogleV3()
self.location = geolocator.geocode(self.config.weather["HOME"])
api_key = self.config.weather["DARK_SKY_SECRET_KEY"]
lat = self.location.latitude
lon = self.location.longitude
self.forecastio = forecastio.load_forecast(api_key, lat, lon)
def read(self, when='current'):
if when == 'current':
self.__current_forecast()
elif when == 'daily':
self.__daily_forecast()
def __daily_forecast(self):
daily = self.forecastio.daily()
address = self.location.address
icon = daily.icon
summary = daily.summary
attachments = self.template.make_weather_template(address, icon, summary)
self.slackbot.send_message(attachments=attachments)
def __current_forecast(self):
current = self.forecastio.currently()
address = self.location.address
icon = current.icon
summary = current.summary
temperature = current.temperature
attachments = self.template.make_weather_template(address, icon, summary, temperature=temperature)
self.slackbot.send_message(attachments=attachments)
|
Add Weather class (use forecastio, geopy) - forecase(current/daily)
|
Add Weather class (use forecastio, geopy) - forecase(current/daily)
|
Python
|
mit
|
DongjunLee/kino-bot
|
Add Weather class (use forecastio, geopy) - forecase(current/daily)
|
# -*- coding: utf-8 -*-
import datetime
import forecastio
from geopy.geocoders import GoogleV3
from kino.template import MsgTemplate
from slack.slackbot import SlackerAdapter
from utils.config import Config
class Weather(object):
def __init__(self):
self.config = Config()
self.slackbot = SlackerAdapter()
self.template = MsgTemplate()
geolocator = GoogleV3()
self.location = geolocator.geocode(self.config.weather["HOME"])
api_key = self.config.weather["DARK_SKY_SECRET_KEY"]
lat = self.location.latitude
lon = self.location.longitude
self.forecastio = forecastio.load_forecast(api_key, lat, lon)
def read(self, when='current'):
if when == 'current':
self.__current_forecast()
elif when == 'daily':
self.__daily_forecast()
def __daily_forecast(self):
daily = self.forecastio.daily()
address = self.location.address
icon = daily.icon
summary = daily.summary
attachments = self.template.make_weather_template(address, icon, summary)
self.slackbot.send_message(attachments=attachments)
def __current_forecast(self):
current = self.forecastio.currently()
address = self.location.address
icon = current.icon
summary = current.summary
temperature = current.temperature
attachments = self.template.make_weather_template(address, icon, summary, temperature=temperature)
self.slackbot.send_message(attachments=attachments)
|
<commit_before><commit_msg>Add Weather class (use forecastio, geopy) - forecase(current/daily)<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
import forecastio
from geopy.geocoders import GoogleV3
from kino.template import MsgTemplate
from slack.slackbot import SlackerAdapter
from utils.config import Config
class Weather(object):
def __init__(self):
self.config = Config()
self.slackbot = SlackerAdapter()
self.template = MsgTemplate()
geolocator = GoogleV3()
self.location = geolocator.geocode(self.config.weather["HOME"])
api_key = self.config.weather["DARK_SKY_SECRET_KEY"]
lat = self.location.latitude
lon = self.location.longitude
self.forecastio = forecastio.load_forecast(api_key, lat, lon)
def read(self, when='current'):
if when == 'current':
self.__current_forecast()
elif when == 'daily':
self.__daily_forecast()
def __daily_forecast(self):
daily = self.forecastio.daily()
address = self.location.address
icon = daily.icon
summary = daily.summary
attachments = self.template.make_weather_template(address, icon, summary)
self.slackbot.send_message(attachments=attachments)
def __current_forecast(self):
current = self.forecastio.currently()
address = self.location.address
icon = current.icon
summary = current.summary
temperature = current.temperature
attachments = self.template.make_weather_template(address, icon, summary, temperature=temperature)
self.slackbot.send_message(attachments=attachments)
|
Add Weather class (use forecastio, geopy) - forecase(current/daily)# -*- coding: utf-8 -*-
import datetime
import forecastio
from geopy.geocoders import GoogleV3
from kino.template import MsgTemplate
from slack.slackbot import SlackerAdapter
from utils.config import Config
class Weather(object):
def __init__(self):
self.config = Config()
self.slackbot = SlackerAdapter()
self.template = MsgTemplate()
geolocator = GoogleV3()
self.location = geolocator.geocode(self.config.weather["HOME"])
api_key = self.config.weather["DARK_SKY_SECRET_KEY"]
lat = self.location.latitude
lon = self.location.longitude
self.forecastio = forecastio.load_forecast(api_key, lat, lon)
def read(self, when='current'):
if when == 'current':
self.__current_forecast()
elif when == 'daily':
self.__daily_forecast()
def __daily_forecast(self):
daily = self.forecastio.daily()
address = self.location.address
icon = daily.icon
summary = daily.summary
attachments = self.template.make_weather_template(address, icon, summary)
self.slackbot.send_message(attachments=attachments)
def __current_forecast(self):
current = self.forecastio.currently()
address = self.location.address
icon = current.icon
summary = current.summary
temperature = current.temperature
attachments = self.template.make_weather_template(address, icon, summary, temperature=temperature)
self.slackbot.send_message(attachments=attachments)
|
<commit_before><commit_msg>Add Weather class (use forecastio, geopy) - forecase(current/daily)<commit_after># -*- coding: utf-8 -*-
import datetime
import forecastio
from geopy.geocoders import GoogleV3
from kino.template import MsgTemplate
from slack.slackbot import SlackerAdapter
from utils.config import Config
class Weather(object):
def __init__(self):
self.config = Config()
self.slackbot = SlackerAdapter()
self.template = MsgTemplate()
geolocator = GoogleV3()
self.location = geolocator.geocode(self.config.weather["HOME"])
api_key = self.config.weather["DARK_SKY_SECRET_KEY"]
lat = self.location.latitude
lon = self.location.longitude
self.forecastio = forecastio.load_forecast(api_key, lat, lon)
def read(self, when='current'):
if when == 'current':
self.__current_forecast()
elif when == 'daily':
self.__daily_forecast()
def __daily_forecast(self):
daily = self.forecastio.daily()
address = self.location.address
icon = daily.icon
summary = daily.summary
attachments = self.template.make_weather_template(address, icon, summary)
self.slackbot.send_message(attachments=attachments)
def __current_forecast(self):
current = self.forecastio.currently()
address = self.location.address
icon = current.icon
summary = current.summary
temperature = current.temperature
attachments = self.template.make_weather_template(address, icon, summary, temperature=temperature)
self.slackbot.send_message(attachments=attachments)
|
|
6789f2ea1862f4c30e8d60bd0b47640b7e5835c1
|
count_labels.py
|
count_labels.py
|
"""Count HEEM labels in data set.
Usage: python count_labels.py <dir with train and test files>
"""
import codecs
from glob import glob
import numpy as np
import argparse
from collections import Counter
def load_data(data_file):
data = [ln.rsplit(None, 1) for ln in open(data_file)]
X_data, Y_data = zip(*data)
return X_data, Y_data
def count_labels(file_name, counter):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
for labelset in Y:
counter.update(labelset)
del counter['None']
return counter
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
train_file = '{}/train_1.txt'.format(args.input_dir)
test_file = '{}/test_1.txt'.format(args.input_dir)
labels = Counter()
labels = count_labels(train_file, labels)
labels = count_labels(test_file, labels)
for l, freq in labels.most_common():
print '{}\t{}'.format(l, freq)
|
Add script to count labels in a data set
|
Add script to count labels in a data set
(Moved from embodied emotions ml code.)
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to count labels in a data set
(Moved from embodied emotions ml code.)
|
"""Count HEEM labels in data set.
Usage: python count_labels.py <dir with train and test files>
"""
import codecs
from glob import glob
import numpy as np
import argparse
from collections import Counter
def load_data(data_file):
data = [ln.rsplit(None, 1) for ln in open(data_file)]
X_data, Y_data = zip(*data)
return X_data, Y_data
def count_labels(file_name, counter):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
for labelset in Y:
counter.update(labelset)
del counter['None']
return counter
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
train_file = '{}/train_1.txt'.format(args.input_dir)
test_file = '{}/test_1.txt'.format(args.input_dir)
labels = Counter()
labels = count_labels(train_file, labels)
labels = count_labels(test_file, labels)
for l, freq in labels.most_common():
print '{}\t{}'.format(l, freq)
|
<commit_before><commit_msg>Add script to count labels in a data set
(Moved from embodied emotions ml code.)<commit_after>
|
"""Count HEEM labels in data set.
Usage: python count_labels.py <dir with train and test files>
"""
import codecs
from glob import glob
import numpy as np
import argparse
from collections import Counter
def load_data(data_file):
data = [ln.rsplit(None, 1) for ln in open(data_file)]
X_data, Y_data = zip(*data)
return X_data, Y_data
def count_labels(file_name, counter):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
for labelset in Y:
counter.update(labelset)
del counter['None']
return counter
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
train_file = '{}/train_1.txt'.format(args.input_dir)
test_file = '{}/test_1.txt'.format(args.input_dir)
labels = Counter()
labels = count_labels(train_file, labels)
labels = count_labels(test_file, labels)
for l, freq in labels.most_common():
print '{}\t{}'.format(l, freq)
|
Add script to count labels in a data set
(Moved from embodied emotions ml code.)"""Count HEEM labels in data set.
Usage: python count_labels.py <dir with train and test files>
"""
import codecs
from glob import glob
import numpy as np
import argparse
from collections import Counter
def load_data(data_file):
data = [ln.rsplit(None, 1) for ln in open(data_file)]
X_data, Y_data = zip(*data)
return X_data, Y_data
def count_labels(file_name, counter):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
for labelset in Y:
counter.update(labelset)
del counter['None']
return counter
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
train_file = '{}/train_1.txt'.format(args.input_dir)
test_file = '{}/test_1.txt'.format(args.input_dir)
labels = Counter()
labels = count_labels(train_file, labels)
labels = count_labels(test_file, labels)
for l, freq in labels.most_common():
print '{}\t{}'.format(l, freq)
|
<commit_before><commit_msg>Add script to count labels in a data set
(Moved from embodied emotions ml code.)<commit_after>"""Count HEEM labels in data set.
Usage: python count_labels.py <dir with train and test files>
"""
import codecs
from glob import glob
import numpy as np
import argparse
from collections import Counter
def load_data(data_file):
data = [ln.rsplit(None, 1) for ln in open(data_file)]
X_data, Y_data = zip(*data)
return X_data, Y_data
def count_labels(file_name, counter):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
for labelset in Y:
counter.update(labelset)
del counter['None']
return counter
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
args = parser.parse_args()
train_file = '{}/train_1.txt'.format(args.input_dir)
test_file = '{}/test_1.txt'.format(args.input_dir)
labels = Counter()
labels = count_labels(train_file, labels)
labels = count_labels(test_file, labels)
for l, freq in labels.most_common():
print '{}\t{}'.format(l, freq)
|
|
a9609a500a65cc0efb787f5d90e164bd6fa48c1a
|
leftViewofBST.py
|
leftViewofBST.py
|
class BST:
def __init__(self,val):
self.left = None
self.right = None
self.data = val
def insertToBst(root,value):
if root is None:
root = value
else:
if value.data < root.data:
if root.left is None:
root.left = value
else:
insertToBst(root.left, value)
else:
if root.right is None:
root.right = value
else:
insertToBst(root.right, value)
def leftView(root,level,currentLevel):
if not root:
return
else:
if (currentLevel[0] < level):
print root.data
currentLevel[0] = level
leftView(root.left, level+1, currentLevel)
leftView(root.right, level+1, currentLevel)
tree = BST(5)
insertToBst(tree, BST(4))
insertToBst(tree, BST(6))
insertToBst(tree, BST(2))
insertToBst(tree, BST(1))
insertToBst(tree, BST(7))
insertToBst(tree, BST(8))
insertToBst(tree, BST(9))
insertToBst(tree, BST(10))
leftView(tree, 1, [0]) # => 5,4,2,1,9,10 ,O(n)
|
Print the left view of a BST
|
Print the left view of a BST
|
Python
|
mit
|
arunkumarpalaniappan/algorithm_tryouts
|
Print the left view of a BST
|
class BST:
def __init__(self,val):
self.left = None
self.right = None
self.data = val
def insertToBst(root,value):
if root is None:
root = value
else:
if value.data < root.data:
if root.left is None:
root.left = value
else:
insertToBst(root.left, value)
else:
if root.right is None:
root.right = value
else:
insertToBst(root.right, value)
def leftView(root,level,currentLevel):
if not root:
return
else:
if (currentLevel[0] < level):
print root.data
currentLevel[0] = level
leftView(root.left, level+1, currentLevel)
leftView(root.right, level+1, currentLevel)
tree = BST(5)
insertToBst(tree, BST(4))
insertToBst(tree, BST(6))
insertToBst(tree, BST(2))
insertToBst(tree, BST(1))
insertToBst(tree, BST(7))
insertToBst(tree, BST(8))
insertToBst(tree, BST(9))
insertToBst(tree, BST(10))
leftView(tree, 1, [0]) # => 5,4,2,1,9,10 ,O(n)
|
<commit_before><commit_msg>Print the left view of a BST<commit_after>
|
class BST:
def __init__(self,val):
self.left = None
self.right = None
self.data = val
def insertToBst(root,value):
if root is None:
root = value
else:
if value.data < root.data:
if root.left is None:
root.left = value
else:
insertToBst(root.left, value)
else:
if root.right is None:
root.right = value
else:
insertToBst(root.right, value)
def leftView(root,level,currentLevel):
if not root:
return
else:
if (currentLevel[0] < level):
print root.data
currentLevel[0] = level
leftView(root.left, level+1, currentLevel)
leftView(root.right, level+1, currentLevel)
tree = BST(5)
insertToBst(tree, BST(4))
insertToBst(tree, BST(6))
insertToBst(tree, BST(2))
insertToBst(tree, BST(1))
insertToBst(tree, BST(7))
insertToBst(tree, BST(8))
insertToBst(tree, BST(9))
insertToBst(tree, BST(10))
leftView(tree, 1, [0]) # => 5,4,2,1,9,10 ,O(n)
|
Print the left view of a BSTclass BST:
def __init__(self,val):
self.left = None
self.right = None
self.data = val
def insertToBst(root,value):
if root is None:
root = value
else:
if value.data < root.data:
if root.left is None:
root.left = value
else:
insertToBst(root.left, value)
else:
if root.right is None:
root.right = value
else:
insertToBst(root.right, value)
def leftView(root,level,currentLevel):
if not root:
return
else:
if (currentLevel[0] < level):
print root.data
currentLevel[0] = level
leftView(root.left, level+1, currentLevel)
leftView(root.right, level+1, currentLevel)
tree = BST(5)
insertToBst(tree, BST(4))
insertToBst(tree, BST(6))
insertToBst(tree, BST(2))
insertToBst(tree, BST(1))
insertToBst(tree, BST(7))
insertToBst(tree, BST(8))
insertToBst(tree, BST(9))
insertToBst(tree, BST(10))
leftView(tree, 1, [0]) # => 5,4,2,1,9,10 ,O(n)
|
<commit_before><commit_msg>Print the left view of a BST<commit_after>class BST:
def __init__(self,val):
self.left = None
self.right = None
self.data = val
def insertToBst(root,value):
if root is None:
root = value
else:
if value.data < root.data:
if root.left is None:
root.left = value
else:
insertToBst(root.left, value)
else:
if root.right is None:
root.right = value
else:
insertToBst(root.right, value)
def leftView(root,level,currentLevel):
if not root:
return
else:
if (currentLevel[0] < level):
print root.data
currentLevel[0] = level
leftView(root.left, level+1, currentLevel)
leftView(root.right, level+1, currentLevel)
tree = BST(5)
insertToBst(tree, BST(4))
insertToBst(tree, BST(6))
insertToBst(tree, BST(2))
insertToBst(tree, BST(1))
insertToBst(tree, BST(7))
insertToBst(tree, BST(8))
insertToBst(tree, BST(9))
insertToBst(tree, BST(10))
leftView(tree, 1, [0]) # => 5,4,2,1,9,10 ,O(n)
|
|
e19097216c090c0e3f4b68c743d6427f012ab69e
|
txlege84/legislators/migrations/0004_auto_20141201_1604.py
|
txlege84/legislators/migrations/0004_auto_20141201_1604.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0003_auto_20141120_1731'),
]
operations = [
migrations.AlterField(
model_name='legislator',
name='party',
field=models.ForeignKey(related_name='legislators', blank=True, to='legislators.Party', null=True),
preserve_default=True,
),
]
|
Add migration for legislator change
|
Add migration for legislator change
|
Python
|
mit
|
texastribune/txlege84,texastribune/txlege84,texastribune/txlege84,texastribune/txlege84
|
Add migration for legislator change
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0003_auto_20141120_1731'),
]
operations = [
migrations.AlterField(
model_name='legislator',
name='party',
field=models.ForeignKey(related_name='legislators', blank=True, to='legislators.Party', null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for legislator change<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0003_auto_20141120_1731'),
]
operations = [
migrations.AlterField(
model_name='legislator',
name='party',
field=models.ForeignKey(related_name='legislators', blank=True, to='legislators.Party', null=True),
preserve_default=True,
),
]
|
Add migration for legislator change# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0003_auto_20141120_1731'),
]
operations = [
migrations.AlterField(
model_name='legislator',
name='party',
field=models.ForeignKey(related_name='legislators', blank=True, to='legislators.Party', null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for legislator change<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0003_auto_20141120_1731'),
]
operations = [
migrations.AlterField(
model_name='legislator',
name='party',
field=models.ForeignKey(related_name='legislators', blank=True, to='legislators.Party', null=True),
preserve_default=True,
),
]
|
|
897843932937faa841220cde90bdc89603d95615
|
hackerrank/linked-list/dedup.py
|
hackerrank/linked-list/dedup.py
|
# https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list/problem
def RemoveDuplicates(head):
if head is None:
return None
curr = head
while curr.next is not None:
currentData = curr.data
next = curr.next;
nextData = next.data
if currentData == nextData:
curr.next = curr.next.next
else:
curr = curr.next
return head
|
Solve hackerrank linked list problem
|
[algorithm] Solve hackerrank linked list problem
|
Python
|
mit
|
honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice
|
[algorithm] Solve hackerrank linked list problem
|
# https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list/problem
def RemoveDuplicates(head):
if head is None:
return None
curr = head
while curr.next is not None:
currentData = curr.data
next = curr.next;
nextData = next.data
if currentData == nextData:
curr.next = curr.next.next
else:
curr = curr.next
return head
|
<commit_before><commit_msg>[algorithm] Solve hackerrank linked list problem<commit_after>
|
# https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list/problem
def RemoveDuplicates(head):
if head is None:
return None
curr = head
while curr.next is not None:
currentData = curr.data
next = curr.next;
nextData = next.data
if currentData == nextData:
curr.next = curr.next.next
else:
curr = curr.next
return head
|
[algorithm] Solve hackerrank linked list problem# https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list/problem
def RemoveDuplicates(head):
if head is None:
return None
curr = head
while curr.next is not None:
currentData = curr.data
next = curr.next;
nextData = next.data
if currentData == nextData:
curr.next = curr.next.next
else:
curr = curr.next
return head
|
<commit_before><commit_msg>[algorithm] Solve hackerrank linked list problem<commit_after># https://www.hackerrank.com/challenges/delete-duplicate-value-nodes-from-a-sorted-linked-list/problem
def RemoveDuplicates(head):
if head is None:
return None
curr = head
while curr.next is not None:
currentData = curr.data
next = curr.next;
nextData = next.data
if currentData == nextData:
curr.next = curr.next.next
else:
curr = curr.next
return head
|
|
a8274a5d5e4ec68f3ee594ffa741e90f11cf24db
|
tools/update_test_bmv2_jsons.py
|
tools/update_test_bmv2_jsons.py
|
#!/usr/bin/env python2
import argparse
import fnmatch
import os
import subprocess
import sys
def find_files(root):
files = []
for path_prefix, _, filenames in os.walk(root, followlinks=False):
for filename in fnmatch.filter(filenames, '*.p4'):
path = os.path.join(path_prefix, filename)
json_path = os.path.splitext(path)[0] + ".json"
if os.path.exists(json_path):
files.append([path, json_path])
return files
def check_compiler_exec(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([path, "--version"],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # exec not found
return False
def main():
parser = argparse.ArgumentParser(
description="Search for P4 files recursively in provided directory "
"and if they have a JSON equivalent regenerates it using the bmv2 "
"compiler.")
parser.add_argument("--root", type=str, default=os.getcwd(),
help="Directory in which to recursively search for P4 "
"files. Default is current working directory.")
parser.add_argument("--compiler", type=str, default="p4c-bmv2",
help="bmv2 compiler to use. Default is p4c-bmv2.")
args = parser.parse_args()
if not check_compiler_exec(args.compiler):
print "Cannot use provided compiler"
sys.exit(1)
files = find_files(args.root)
for input_f, output_f in files:
print "Regenerating", input_f, "->", output_f
try:
cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"]
with open(os.devnull, 'w') as devnull:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print "ERROR"
print " ".join(cmd)
print out
except OSError:
print "FATAL ERROR"
sys.exit(2)
if __name__ == '__main__':
main()
|
Add tool to regenerate JSON files from P4 progs
|
Add tool to regenerate JSON files from P4 progs
tools/update_test_bmv2_jsons.py can be used to regenerate all the bmv2
JSON files from their P4 counterpart
|
Python
|
apache-2.0
|
p4lang/PI,p4lang/PI,p4lang/PI,p4lang/PI
|
Add tool to regenerate JSON files from P4 progs
tools/update_test_bmv2_jsons.py can be used to regenerate all the bmv2
JSON files from their P4 counterpart
|
#!/usr/bin/env python2
import argparse
import fnmatch
import os
import subprocess
import sys
def find_files(root):
files = []
for path_prefix, _, filenames in os.walk(root, followlinks=False):
for filename in fnmatch.filter(filenames, '*.p4'):
path = os.path.join(path_prefix, filename)
json_path = os.path.splitext(path)[0] + ".json"
if os.path.exists(json_path):
files.append([path, json_path])
return files
def check_compiler_exec(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([path, "--version"],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # exec not found
return False
def main():
parser = argparse.ArgumentParser(
description="Search for P4 files recursively in provided directory "
"and if they have a JSON equivalent regenerates it using the bmv2 "
"compiler.")
parser.add_argument("--root", type=str, default=os.getcwd(),
help="Directory in which to recursively search for P4 "
"files. Default is current working directory.")
parser.add_argument("--compiler", type=str, default="p4c-bmv2",
help="bmv2 compiler to use. Default is p4c-bmv2.")
args = parser.parse_args()
if not check_compiler_exec(args.compiler):
print "Cannot use provided compiler"
sys.exit(1)
files = find_files(args.root)
for input_f, output_f in files:
print "Regenerating", input_f, "->", output_f
try:
cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"]
with open(os.devnull, 'w') as devnull:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print "ERROR"
print " ".join(cmd)
print out
except OSError:
print "FATAL ERROR"
sys.exit(2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to regenerate JSON files from P4 progs
tools/update_test_bmv2_jsons.py can be used to regenerate all the bmv2
JSON files from their P4 counterpart<commit_after>
|
#!/usr/bin/env python2
import argparse
import fnmatch
import os
import subprocess
import sys
def find_files(root):
files = []
for path_prefix, _, filenames in os.walk(root, followlinks=False):
for filename in fnmatch.filter(filenames, '*.p4'):
path = os.path.join(path_prefix, filename)
json_path = os.path.splitext(path)[0] + ".json"
if os.path.exists(json_path):
files.append([path, json_path])
return files
def check_compiler_exec(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([path, "--version"],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # exec not found
return False
def main():
parser = argparse.ArgumentParser(
description="Search for P4 files recursively in provided directory "
"and if they have a JSON equivalent regenerates it using the bmv2 "
"compiler.")
parser.add_argument("--root", type=str, default=os.getcwd(),
help="Directory in which to recursively search for P4 "
"files. Default is current working directory.")
parser.add_argument("--compiler", type=str, default="p4c-bmv2",
help="bmv2 compiler to use. Default is p4c-bmv2.")
args = parser.parse_args()
if not check_compiler_exec(args.compiler):
print "Cannot use provided compiler"
sys.exit(1)
files = find_files(args.root)
for input_f, output_f in files:
print "Regenerating", input_f, "->", output_f
try:
cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"]
with open(os.devnull, 'w') as devnull:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print "ERROR"
print " ".join(cmd)
print out
except OSError:
print "FATAL ERROR"
sys.exit(2)
if __name__ == '__main__':
main()
|
Add tool to regenerate JSON files from P4 progs
tools/update_test_bmv2_jsons.py can be used to regenerate all the bmv2
JSON files from their P4 counterpart#!/usr/bin/env python2
import argparse
import fnmatch
import os
import subprocess
import sys
def find_files(root):
files = []
for path_prefix, _, filenames in os.walk(root, followlinks=False):
for filename in fnmatch.filter(filenames, '*.p4'):
path = os.path.join(path_prefix, filename)
json_path = os.path.splitext(path)[0] + ".json"
if os.path.exists(json_path):
files.append([path, json_path])
return files
def check_compiler_exec(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([path, "--version"],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # exec not found
return False
def main():
parser = argparse.ArgumentParser(
description="Search for P4 files recursively in provided directory "
"and if they have a JSON equivalent regenerates it using the bmv2 "
"compiler.")
parser.add_argument("--root", type=str, default=os.getcwd(),
help="Directory in which to recursively search for P4 "
"files. Default is current working directory.")
parser.add_argument("--compiler", type=str, default="p4c-bmv2",
help="bmv2 compiler to use. Default is p4c-bmv2.")
args = parser.parse_args()
if not check_compiler_exec(args.compiler):
print "Cannot use provided compiler"
sys.exit(1)
files = find_files(args.root)
for input_f, output_f in files:
print "Regenerating", input_f, "->", output_f
try:
cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"]
with open(os.devnull, 'w') as devnull:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print "ERROR"
print " ".join(cmd)
print out
except OSError:
print "FATAL ERROR"
sys.exit(2)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to regenerate JSON files from P4 progs
tools/update_test_bmv2_jsons.py can be used to regenerate all the bmv2
JSON files from their P4 counterpart<commit_after>#!/usr/bin/env python2
import argparse
import fnmatch
import os
import subprocess
import sys
def find_files(root):
files = []
for path_prefix, _, filenames in os.walk(root, followlinks=False):
for filename in fnmatch.filter(filenames, '*.p4'):
path = os.path.join(path_prefix, filename)
json_path = os.path.splitext(path)[0] + ".json"
if os.path.exists(json_path):
files.append([path, json_path])
return files
def check_compiler_exec(path):
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call([path, "--version"],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # exec not found
return False
def main():
parser = argparse.ArgumentParser(
description="Search for P4 files recursively in provided directory "
"and if they have a JSON equivalent regenerates it using the bmv2 "
"compiler.")
parser.add_argument("--root", type=str, default=os.getcwd(),
help="Directory in which to recursively search for P4 "
"files. Default is current working directory.")
parser.add_argument("--compiler", type=str, default="p4c-bmv2",
help="bmv2 compiler to use. Default is p4c-bmv2.")
args = parser.parse_args()
if not check_compiler_exec(args.compiler):
print "Cannot use provided compiler"
sys.exit(1)
files = find_files(args.root)
for input_f, output_f in files:
print "Regenerating", input_f, "->", output_f
try:
cmd = [args.compiler, input_f, "--json", output_f, "--keep-pragmas"]
with open(os.devnull, 'w') as devnull:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
print "ERROR"
print " ".join(cmd)
print out
except OSError:
print "FATAL ERROR"
sys.exit(2)
if __name__ == '__main__':
main()
|
|
b9034ca499ae8c0366ac8cd5ee71641f39c0ffba
|
website/project/taxonomies/__init__.py
|
website/project/taxonomies/__init__.py
|
import json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save()
|
Add taxonomy model and initiation
|
Add taxonomy model and initiation
|
Python
|
apache-2.0
|
Nesiehr/osf.io,binoculars/osf.io,pattisdr/osf.io,mattclark/osf.io,caseyrollins/osf.io,leb2dg/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,acshi/osf.io,icereval/osf.io,adlius/osf.io,leb2dg/osf.io,aaxelb/osf.io,chrisseto/osf.io,binoculars/osf.io,rdhyee/osf.io,alexschiller/osf.io,chennan47/osf.io,mattclark/osf.io,mluo613/osf.io,sloria/osf.io,caneruguz/osf.io,emetsger/osf.io,baylee-d/osf.io,mluo613/osf.io,saradbowman/osf.io,mfraezz/osf.io,laurenrevere/osf.io,caneruguz/osf.io,cslzchen/osf.io,rdhyee/osf.io,caneruguz/osf.io,cslzchen/osf.io,leb2dg/osf.io,cslzchen/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,felliott/osf.io,samchrisinger/osf.io,TomBaxter/osf.io,mattclark/osf.io,adlius/osf.io,cwisecarver/osf.io,cslzchen/osf.io,pattisdr/osf.io,pattisdr/osf.io,rdhyee/osf.io,chrisseto/osf.io,acshi/osf.io,adlius/osf.io,hmoco/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,hmoco/osf.io,samchrisinger/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,caneruguz/osf.io,erinspace/osf.io,mluo613/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,crcresearch/osf.io,emetsger/osf.io,mluo613/osf.io,sloria/osf.io,Johnetordoff/osf.io,chennan47/osf.io,aaxelb/osf.io,baylee-d/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,rdhyee/osf.io,alexschiller/osf.io,felliott/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,felliott/osf.io,acshi/osf.io,brianjgeiger/osf.io,acshi/osf.io,sloria/osf.io,icereval/osf.io,samchrisinger/osf.io,emetsger/osf.io,felliott/osf.io,chrisseto/osf.io,mluo613/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,icereval/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,Nesiehr/osf.io,hmoco/osf.io,mfraezz/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,acshi/osf.io,brianjgeiger/osf.io,chennan47/osf.io,chrisseto/osf.io,crcresearch/osf.io,erinspace/osf.io,mfraezz/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,aaxelb/osf.io,emetsger/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,aaxelb/osf.io
|
Add taxonomy model and initiation
|
import json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save()
|
<commit_before><commit_msg>Add taxonomy model and initiation<commit_after>
|
import json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save()
|
Add taxonomy model and initiationimport json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save()
|
<commit_before><commit_msg>Add taxonomy model and initiation<commit_after>import json
import os
from website import settings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
@mongo_utils.unique_on(['id', '_id'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
type = fields.StringField(required=True)
text = fields.StringField(required=True)
parent = fields.ForeignField('subject', index=True)
def ensure_taxonomies():
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomies are loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
parent = None
if len(subjects) > 1:
parent = subjects[-2]
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except NoResultsFound:
subject = Subject(
type = type,
text = text,
parent = parent
)
else:
subject.type = type
subject.text = text
subject.parent = parent
subject.save()
|
|
7491f500c75850c094158b4621fdef602bce3d27
|
benchmarks/benchmarks/benchmark_custom_generators.py
|
benchmarks/benchmarks/benchmark_custom_generators.py
|
from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
|
Add benchmarks for custom generators
|
Add benchmarks for custom generators
|
Python
|
mit
|
maxalbert/tohu
|
Add benchmarks for custom generators
|
from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
|
<commit_before><commit_msg>Add benchmarks for custom generators<commit_after>
|
from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
|
Add benchmarks for custom generatorsfrom tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
|
<commit_before><commit_msg>Add benchmarks for custom generators<commit_after>from tohu.v6.primitive_generators import Integer, HashDigest, FakerGenerator
from tohu.v6.derived_generators import Apply, Lookup, SelectOne, SelectMultiple
from tohu.v6.custom_generator import CustomGenerator
from .common import NUM_PARAMS
mapping = {
'A': ['a', 'aa', 'aaa', 'aaaa', 'aaaaa'],
'B': ['b', 'bb', 'bbb', 'bbbb', 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', 'dd', 'ddd', 'dddd', 'ddddd'],
'E': ['e', 'ee', 'eee', 'eeee', 'eeeee'],
'F': ['f', 'ff', 'fff', 'ffff', 'fffff'],
'G': ['g', 'gg', 'ggg', 'gggg', 'ggggg'],
}
class Quux1Generator(CustomGenerator):
aa = Integer(100, 200)
bb = HashDigest(length=8)
cc = FakerGenerator(method="name")
class Quux2Generator(CustomGenerator):
aa = SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G'])
ll = Lookup(key=aa, mapping=mapping)
nn = Integer(1, 5)
bb = SelectMultiple(ll, num=nn)
class Quux3Generator(CustomGenerator):
bb = SelectMultiple(Lookup(SelectOne(['A', 'B', 'C', 'D', 'E', 'F', 'G']), mapping), num=Integer(1, 5))
class TimeBasicCustomGenerator:
params = NUM_PARAMS
def setup(self, num):
self.g1 = Quux1Generator()
def time_basic_custom_generator(self, num):
self.g1.generate(num=num)
class TimeComplexCustomGeneratorWithExplicitlyNamedFields:
params = NUM_PARAMS
def setup(self, num):
self.g2 = Quux2Generator()
def time_complex_custom_generator_with_explicitly_named_fields(self, num):
self.g2.generate(num=num)
class TimeComplexCustomGeneratorWithAnonymousFields:
params = NUM_PARAMS
def setup(self, num):
self.g3 = Quux3Generator()
def time_complex_custom_generator_with_anonymous_fields(self, num):
self.g3.generate(num=num)
|
|
48eb4604673513b771b6def05a1652ae1b66d4d0
|
scripts/add_ssm_config.py
|
scripts/add_ssm_config.py
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
Add a script for storing a config variable
|
Add a script for storing a config variable
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a script for storing a config variable
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
<commit_before><commit_msg>Add a script for storing a config variable<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
Add a script for storing a config variable#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
<commit_before><commit_msg>Add a script for storing a config variable<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/config/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import sys
import boto3
import click
ssm_client = boto3.client("ssm")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
@click.option("--config_value", prompt="What is the config value?", required=True)
def store_config_key(project_id, label, config_key, config_value):
ssm_name = f"/{project_id}/config/{label}/{config_key}"
resp = ssm_client.put_parameter(
Name=ssm_name,
Description=f"Config value populated by {__file__}",
Value=config_value,
Type="String",
Overwrite=True,
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"{ssm_name} -> {config_value!r}")
else:
print(f"Unexpected error: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
|
50dded21e316b6b8e6cb7800b17ed7bd92624946
|
xml_to_json.py
|
xml_to_json.py
|
#!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
|
Add toy example of reading a large XML file
|
Add toy example of reading a large XML file
|
Python
|
apache-2.0
|
tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes
|
Add toy example of reading a large XML file
|
#!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
|
<commit_before><commit_msg>Add toy example of reading a large XML file<commit_after>
|
#!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
|
Add toy example of reading a large XML file#!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
|
<commit_before><commit_msg>Add toy example of reading a large XML file<commit_after>#!/usr/bin/env python
import xml.etree.cElementTree as ET
from sys import argv
input_file = argv[1]
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.10/}"
with open(input_file) as open_file:
in_page = False
for _, elem in ET.iterparse(open_file):
# Pull out each revision
if elem.tag == NAMESPACE + "revision":
# Look at each subtag, if it is the 'sha1' tag, print out the text content
for child in elem:
if child.tag == NAMESPACE + "sha1":
print child.text
# Clear the child to free up memory
child.clear()
# Now clear the parent once we've finished with it to further clean up
elem.clear()
|
|
63f9f87a3f04cb03c1e286cc5b6d49306f90e352
|
python/004_largest_palindrome_product/palindrome_product.py
|
python/004_largest_palindrome_product/palindrome_product.py
|
from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
|
Add solution for problem 4
|
Add solution for problem 4
|
Python
|
bsd-3-clause
|
gidj/euler,gidj/euler
|
Add solution for problem 4
|
from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
|
<commit_before><commit_msg>Add solution for problem 4<commit_after>
|
from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
|
Add solution for problem 4from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
|
<commit_before><commit_msg>Add solution for problem 4<commit_after>from itertools import combinations_with_replacement
from operator import mul
three_digit_numbers = tuple(range(100, 1000))
combinations = combinations_with_replacement(three_digit_numbers, 2)
products = [mul(*x) for x in combinations]
max_palindrome = max([x for x in products if str(x)[::-1] == str(x)])
|
|
d410fb26d3fb8bbd843234e90891bee5a5fff7e7
|
halaqat/settings/local_settings.py
|
halaqat/settings/local_settings.py
|
from .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
|
Add local dev settings module
|
Add local dev settings module
|
Python
|
mit
|
EmadMokhtar/halaqat,EmadMokhtar/halaqat,EmadMokhtar/halaqat
|
Add local dev settings module
|
from .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
|
<commit_before><commit_msg>Add local dev settings module<commit_after>
|
from .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
|
Add local dev settings modulefrom .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
|
<commit_before><commit_msg>Add local dev settings module<commit_after>from .base_settings import *
DEBUG = True
LANGUAGE_CODE = 'en'
TIME_FORMAT = [
'%I:%M %p',
'%H:%M %p',
]
TIME_INPUT_FORMATS = [
'%I:%M %p',
'%H:%M %p'
]
|
|
dc7cf288c5c5c9733a59184770fbaa26db036833
|
tests/unit_project/test_core/test_custom_urls.py
|
tests/unit_project/test_core/test_custom_urls.py
|
# -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
|
Add basic tests for custom_urls system
|
Add basic tests for custom_urls system
|
Python
|
bsd-3-clause
|
whalerock/ella,petrlosa/ella,WhiskeyMedia/ella,whalerock/ella,whalerock/ella,MichalMaM/ella,petrlosa/ella,MichalMaM/ella,ella/ella,WhiskeyMedia/ella
|
Add basic tests for custom_urls system
|
# -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
|
<commit_before><commit_msg>Add basic tests for custom_urls system<commit_after>
|
# -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
|
Add basic tests for custom_urls system# -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
|
<commit_before><commit_msg>Add basic tests for custom_urls system<commit_after># -*- coding: utf-8 -*-
from djangosanetesting import UnitTestCase
from django.http import Http404
from ella.core.custom_urls import DetailDispatcher
# dummy functions to register as views
def view(request, bits, context):
return request, bits, context
def custom_view(request, context):
return request, context
class TestCustomUrlsDispatcher(UnitTestCase):
def setUp(self):
self.dispatcher = DetailDispatcher()
self.context = {'object': self}
self.request = object()
def test_no_extension(self):
self.assert_raises(Http404, self.dispatcher._get_view, 'start', self)
def test_register_global_extension(self):
self.dispatcher.register('start', view)
self.assert_equals(view, self.dispatcher._get_view('start', self))
def test_register_extension_for_model(self):
self.dispatcher.register('another_start', view, model=self.__class__)
self.assert_equals(view, self.dispatcher._get_view('another_start', self.__class__))
def test_register_extension_for_model_not_work_for_other_models(self):
self.dispatcher.register('start', view, model=self.__class__)
self.assert_raises(Http404, self.dispatcher._get_view, 'start', object())
def test_no_custom_view(self):
self.assert_raises(Http404, self.dispatcher._get_custom_detail_view, self.__class__)
def test_register_custom_view(self):
self.dispatcher.register_custom_detail(self.__class__, custom_view)
self.assert_equals(custom_view, self.dispatcher._get_custom_detail_view(self.__class__))
|
|
c153bc9422308599d1354abf782273ca7bd78952
|
nova/tests/virt_unittest.py
|
nova/tests/virt_unittest.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add a few unit tests for libvirt_conn.
|
Add a few unit tests for libvirt_conn.
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
Add a few unit tests for libvirt_conn.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add a few unit tests for libvirt_conn.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add a few unit tests for libvirt_conn.# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add a few unit tests for libvirt_conn.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
|
ea11ae8919139eae8eaa6b9b1dfe256726d3c584
|
test/test_SBSolarcell.py
|
test/test_SBSolarcell.py
|
# -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
|
Copy SBSolarcell tests into individual file
|
Copy SBSolarcell tests into individual file
|
Python
|
mit
|
jrsmith3/ibei,jrsmith3/tec,jrsmith3/tec
|
Copy SBSolarcell tests into individual file
|
# -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
|
<commit_before><commit_msg>Copy SBSolarcell tests into individual file<commit_after>
|
# -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
|
Copy SBSolarcell tests into individual file# -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
|
<commit_before><commit_msg>Copy SBSolarcell tests into individual file<commit_after># -*- coding: utf-8 -*-
import numpy as np
import ibei
from astropy import units
import unittest
temp_sun = 5762.
temp_earth = 288.
bandgap = 1.15
input_params = {"temp_sun": temp_sun,
"temp_planet": temp_earth,
"bandgap": bandgap,
"voltage": 0.5,}
class CalculatorsReturnUnits(unittest.TestCase):
"""
Tests units of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_blackbody_radiant_power_density(self):
"""
calc_blackbody_radiant_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_blackbody_radiant_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
def test_calc_power_density_zero_bandgap(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.solarcell.bandgap = 0
tested_unit = self.solarcell.calc_power_density().unit
target_unit = units.Unit("W/m2")
self.assertEqual(tested_unit, target_unit)
class CalculatorsReturnType(unittest.TestCase):
"""
Tests type of the calculator methods returned values.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_efficiency(self):
"""
calc_power_density should return value with unit of W m^-2.
"""
self.assertIsInstance(self.solarcell.calc_efficiency(), float)
class CalculatorsReturnValue(unittest.TestCase):
"""
Tests special values of the calculator methods.
"""
def setUp(self):
"""
Initialize SBSolarcell object from input_params
"""
self.solarcell = ibei.SQSolarcell(input_params)
def test_calc_power_density(self):
"""
calc_power_density should return 0 when bandgap = 0.
"""
self.solarcell.bandgap = 0
self.assertEqual(0, self.solarcell.calc_power_density())
if __name__ == "__main__":
pass
|
|
8fd466ecd16db736177104902eb84f661b2b62cc
|
opps/sitemaps/googlenews.py
|
opps/sitemaps/googlenews.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
|
Create sitemap for google news
|
Create sitemap for google news
|
Python
|
mit
|
jeanmask/opps,opps/opps,YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,williamroot/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,opps/opps,jeanmask/opps,williamroot/opps,jeanmask/opps
|
Create sitemap for google news
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
|
<commit_before><commit_msg>Create sitemap for google news<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
|
Create sitemap for google news#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
|
<commit_before><commit_msg>Create sitemap for google news<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sitemaps import GenericSitemap
from django.contrib.sites.models import Site
class GoogleNewsSitemap(GenericSitemap):
# That's Google News limit. Do not increase it!
limit = 1000
sitemap_template = 'sitemap_googlenews.xml'
def get_urls(self, page=1, site=None):
if site is None:
site = Site.objects.get_current()
sup = super(GoogleNewsSitemap, self)
old_urls = sup.get_urls(page, site)
urls = []
for item in self.paginator.page(page).object_list:
for url in old_urls:
loc = "http://%s%s" % (site.domain, self.location(item))
if url.get('location') == loc:
old_urls.remove(url)
url['item'] = item
urls.append(url)
return urls
|
|
2a106a12db2a59ccb0517a13db67b35f475b3ef5
|
apps/survey/urls.py
|
apps/survey/urls.py
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
Add args to survey_data url
|
Add args to survey_data url
|
Python
|
agpl-3.0
|
chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork,chispita/epiwork
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
Add args to survey_data url
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
<commit_before>from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
<commit_msg>Add args to survey_data url<commit_after>
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
Add args to survey_data urlfrom django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
<commit_before>from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/$', views.survey_data, name='survey_management'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
<commit_msg>Add args to survey_data url<commit_after>from django.conf.urls.defaults import *
from . import views
urlpatterns = patterns('',
url(r'^profile/$', views.profile_index, name='survey_profile'),
url(r'^profile/electric/$', views.profile_electric, name='survey_profile_electric'),
url(r'^main/$', views.main_index),
url(r'^group_management/$', views.group_management, name='group_management'),
url(r'^survey_management/$', views.survey_management, name='survey_management'),
url(r'^survey_data/(?P<survey_shortname>.+)/(?P<id>\d+)/$', views.survey_data, name='survey_data'),
#url(r'^survey_data/(?P<survey_shortname>.+)/$', views.survey_data, name='survey_data'),
url(r'^thanks_profile/$', views.thanks_profile, name='profile_thanks'),
url(r'^$', views.index, name='survey_index'),
)
|
1b00a597d8145b2df05054fef8d072d452209463
|
src/data/surface.py
|
src/data/surface.py
|
from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
|
Make SurfaceHandler (for sfc data)
|
Make SurfaceHandler (for sfc data)
|
Python
|
mit
|
gciteam6/xgboost,gciteam6/xgboost
|
Make SurfaceHandler (for sfc data)
|
from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
|
<commit_before><commit_msg>Make SurfaceHandler (for sfc data)<commit_after>
|
from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
|
Make SurfaceHandler (for sfc data)from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
|
<commit_before><commit_msg>Make SurfaceHandler (for sfc data)<commit_after>from glob import glob
# Third-party modules
import pandas as pd
# Hand-made modules
from base import LocationHandlerBase
SFC_REGEX_DIRNAME = "sfc[1-5]"
KWARGS_READ_CSV_SFC_MASTER = {
"index_col": 0,
}
KWARGS_READ_CSV_SFC_LOG = {
"index_col": 0,
"na_values": ['', ' ']
}
class SurfaceHandler(LocationHandlerBase):
def __init__(self,
sfc_master_filepath,
sfc_file_prefix="sfc_",
sfc_file_suffix=".tsv"):
super().__init__(sfc_master_filepath, **KWARGS_READ_CSV_SFC_MASTER)
self.sfc_file_prefix = sfc_file_prefix
self.sfc_file_suffix = sfc_file_suffix
self.SFC_REGEX_DIRNAME = SFC_REGEX_DIRNAME
def read_tsv(self, path_or_buf):
df_ret = pd.read_csv(path_or_buf, **self.gen_read_csv_kwargs(KWARGS_READ_CSV_SFC_LOG))
df_ret.index = self.parse_datetime(pd.Series(df_ret.index).apply(str))
return df_ret
def to_tsv(self, df, path_or_buf, **kwargs):
df.to_csv(path_or_buf, **self.gen_to_csv_kwargs(kwargs))
def gen_filepath_list(self, aid_list):
sfc_regex_filepath_list = [
self.path.join(
self.INTERIM_DATA_BASEPATH,
self.SFC_REGEX_DIRNAME,
self.sfc_file_prefix + str(aid) + self.sfc_file_suffix
) for aid in aid_list
]
return [
sfc_file \
for sfc_regex_filepath in sfc_regex_filepath_list \
for sfc_file in glob(sfc_regex_filepath)
]
def retrive_data(self, filepath_list, name_list):
if len(filepath_list) < 1:
raise ValueError("Empty list ?")
df_ret = self.read_tsv(filepath_list[0])
df_ret.columns = [str(col_name) + '_' + name_list[0] for col_name in df_ret.columns]
if len(filepath_list) > 1:
for filepath, name in zip(filepath_list[1:], name_list[1:]):
df_ret = df_ret.merge(
self.read_tsv(filepath),
how="outer",
left_index=True,
right_index=True,
suffixes=(".", "_{}".format(name))
)
return df_ret
if __name__ == '__main__':
print("Surface!")
|
|
3091555ca7fc421f886a1df1ac28f677feb70a53
|
app/migrations/0006_auto_20150825_1513.py
|
app/migrations/0006_auto_20150825_1513.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
Add default value for the fields object and field of the social network app model
|
Add default value for the fields object and field of the social network app model
|
Python
|
mit
|
rebearteta/social-ideation,rebearteta/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,rebearteta/social-ideation,joausaga/social-ideation,joausaga/social-ideation,joausaga/social-ideation
|
Add default value for the fields object and field of the social network app model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add default value for the fields object and field of the social network app model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
Add default value for the fields object and field of the social network app model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add default value for the fields object and field of the social network app model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0005_auto_20150819_1054'),
]
operations = [
migrations.AlterField(
model_name='socialnetworkapp',
name='field_real_time_updates',
field=models.CharField(default=b'feed', max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='socialnetworkapp',
name='object_real_time_updates',
field=models.CharField(default=b'page', max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
|
d9be3f189fc34117bdec6e0c7856f7a7dc5f902a
|
cdap-docs/tools/versionscallback-gen.py
|
cdap-docs/tools/versionscallback-gen.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
|
Add tool for generating the JSONP required by the documentation versions.
|
Add tool for generating the JSONP required by the documentation versions.
|
Python
|
apache-2.0
|
chtyim/cdap,mpouttuclarke/cdap,anthcp/cdap,chtyim/cdap,hsaputra/cdap,chtyim/cdap,hsaputra/cdap,caskdata/cdap,chtyim/cdap,caskdata/cdap,mpouttuclarke/cdap,mpouttuclarke/cdap,hsaputra/cdap,caskdata/cdap,hsaputra/cdap,mpouttuclarke/cdap,caskdata/cdap,anthcp/cdap,anthcp/cdap,chtyim/cdap,caskdata/cdap,anthcp/cdap,caskdata/cdap,mpouttuclarke/cdap,anthcp/cdap,hsaputra/cdap,chtyim/cdap
|
Add tool for generating the JSONP required by the documentation versions.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
|
<commit_before><commit_msg>Add tool for generating the JSONP required by the documentation versions.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
|
Add tool for generating the JSONP required by the documentation versions.#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
|
<commit_before><commit_msg>Add tool for generating the JSONP required by the documentation versions.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Cask Data, Inc.
#
# Used to generate JSONP from a CDAP documentation directory on a webserver.
#
# sudo echo "versionscallback({\"development\": \"2.6.0-SNAPSHOT\", \"current\": \"2.5.2\", \"versions\": [\"2.5.1\", \"2.5.0\"]});" > json-versions.js; ls -l
import sys
from os import getcwd, listdir, readlink
from os.path import isdir, islink, join
def add_value(call, name, value):
if value:
if call:
call += ', '
call += '\\\"%s\\\": \\\"%s\\\"' % (name, value)
return call
def add_object(call, name, value):
if value:
if call:
call += ', '
call += ('\\\"%s\\\": %s' % (name, value)).replace("\'", '\\\"')
return call
def walk_directory(path=''):
global current, development, versions
if not path:
path = getcwd()
onlydirs = [ d for d in listdir(path) if isdir(join(path,d)) ]
onlydirs.reverse()
for d in onlydirs:
if d == 'current':
d_path = join(path,d)
if islink(d_path):
current = readlink(d_path)
elif d.endswith('SNAPSHOT'):
development = d
elif d and d != current:
versions.append(d)
def build(path=''):
global current, development, versions
call = ''
walk_directory(path)
call = add_value(call, 'development', development)
call = add_value(call, 'current', current)
call = add_object(call, 'versions', versions)
target = join(path, 'json-versions.js')
print 'sudo echo "versionscallback({%s});" > %s; ls -l' % (call, target)
def usage():
print 'Generates a command that creates the "versionscallback" JSONP from a CDAP documentation directory on a webserver.'
print 'Run this with the path to the directory containing the documentation directories.'
print 'python %s <path>' % sys.argv[0]
# Main
if __name__ == '__main__':
current = ''
development = ''
versions = []
path = ''
if len(sys.argv) > 1:
path = sys.argv[1]
build(path)
else:
usage()
|
|
bf3f14692b6e2a348f5a0171ad57e494801ed4f4
|
scripts/writelibsvmdataformat.py
|
scripts/writelibsvmdataformat.py
|
"""
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add python script to write lib svm expected data format from my collected data
|
Add python script to write lib svm expected data format from my collected data
|
Python
|
bsd-3-clause
|
Wayne82/libsvm-practice,Wayne82/libsvm-practice,Wayne82/libsvm-practice
|
Add python script to write lib svm expected data format from my collected data
|
"""
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add python script to write lib svm expected data format from my collected data<commit_after>
|
"""
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add python script to write lib svm expected data format from my collected data"""
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add python script to write lib svm expected data format from my collected data<commit_after>"""
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import getopt
cmd_usage = """
usage: writelibsvmdataformat.py --inputs="/inputs/csv_files" --output="/output/lib_svm_data"
"""
feature_space = 10
def write_libsvm_data(input_files, output_file):
"""
:param input_files: input files, each of which contains a single label at first row, and a bunch of data following
:param output_file: output file, which meet lib svm expected data format
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
with open(input_file, 'rb') as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=' ')
# assume there is only one item in each row
label = input_reader.next()
i = 1 # start from index 1
line = [label[0]]
for row in input_reader:
if int(row[0]) != 0:
line.append(':'.join([str(i), row[0]]))
i += 1
if i > feature_space:
output_writer.writerow(line)
i = 1
line = [label[0]]
def main(argv):
"""
:param argv: command line arguments
:rtype : error status, success 0 and fail 1
"""
try:
optlist, args = getopt.getopt(argv[1:], "hi:o:", ["help", "inputs=", "output="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print cmd_usage
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
assert isinstance(output_file, basestring)
assert isinstance(inputs, basestring)
input_files = []
for root, dirs, files in os.walk(inputs):
for name in files:
if name.endswith('.csv'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
1c2c7d5134780e58bd69f24ee06050b2f405d946
|
src/program/lwaftr/tests/subcommands/run_nohw_test.py
|
src/program/lwaftr/tests/subcommands/run_nohw_test.py
|
"""
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
|
Add unit test for run_nohw
|
Add unit test for run_nohw
|
Python
|
apache-2.0
|
Igalia/snabb,eugeneia/snabbswitch,eugeneia/snabbswitch,eugeneia/snabbswitch,alexandergall/snabbswitch,Igalia/snabb,alexandergall/snabbswitch,snabbco/snabb,dpino/snabb,eugeneia/snabb,heryii/snabb,alexandergall/snabbswitch,snabbco/snabb,snabbco/snabb,alexandergall/snabbswitch,Igalia/snabbswitch,Igalia/snabb,snabbco/snabb,SnabbCo/snabbswitch,Igalia/snabb,alexandergall/snabbswitch,Igalia/snabbswitch,eugeneia/snabbswitch,heryii/snabb,Igalia/snabbswitch,snabbco/snabb,dpino/snabbswitch,alexandergall/snabbswitch,dpino/snabbswitch,Igalia/snabb,dpino/snabbswitch,SnabbCo/snabbswitch,eugeneia/snabb,dpino/snabb,snabbco/snabb,Igalia/snabbswitch,dpino/snabb,heryii/snabb,dpino/snabb,eugeneia/snabb,alexandergall/snabbswitch,snabbco/snabb,Igalia/snabb,eugeneia/snabb,Igalia/snabbswitch,heryii/snabb,eugeneia/snabb,dpino/snabb,eugeneia/snabb,alexandergall/snabbswitch,SnabbCo/snabbswitch,eugeneia/snabb,dpino/snabb,dpino/snabbswitch,Igalia/snabb,dpino/snabb,eugeneia/snabb,Igalia/snabb,SnabbCo/snabbswitch,snabbco/snabb,heryii/snabb,heryii/snabb
|
Add unit test for run_nohw
|
"""
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for run_nohw<commit_after>
|
"""
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
|
Add unit test for run_nohw"""
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for run_nohw<commit_after>"""
Test the "snabb lwaftr run_nohw" subcommand.
"""
import unittest
from random import randint
from subprocess import call, check_call
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase
class TestRun(BaseTestCase):
program = [
str(SNABB_CMD), 'lwaftr', 'run_nohw',
]
cmd_args = {
'--duration': '1',
'--bench-file': '/dev/null',
'--conf': str(DATA_DIR / 'icmp_on_fail.conf'),
'--inet-if': '',
'--b4-if': '',
}
veths = []
@classmethod
def setUpClass(cls):
cls.create_veth_pair()
@classmethod
def create_veth_pair(cls):
veth0 = cls.random_veth_name()
veth1 = cls.random_veth_name()
# Create veth pair.
check_call(('ip', 'link', 'add', veth0, 'type', 'veth', 'peer', \
'name', veth1))
# Set interfaces up.
check_call(('ip', 'link', 'set', veth0, 'up'))
check_call(('ip', 'link', 'set', veth1, 'up'))
# Add interface names to class.
cls.veths.append(veth0)
cls.veths.append(veth1)
@classmethod
def random_veth_name(cls):
return 'veth%s' % randint(10000, 999999)
def test_run_nohw(self):
self.execute_run_test(self.cmd_args)
def execute_run_test(self, cmd_args):
self.cmd_args['--inet-if'] = self.veths[0]
self.cmd_args['--b4-if'] = self.veths[1]
output = self.run_cmd(self.build_cmd())
self.assertIn(b'link report', output,
b'\n'.join((b'OUTPUT', output)))
def build_cmd(self):
result = self.program
for item in self.cmd_args.items():
for each in item:
result.append(each)
return result
@classmethod
def tearDownClass(cls):
cls.remove_veths()
@classmethod
def remove_veths(cls):
for i in range(0, len(cls.veths), 2):
check_call(('ip', 'link', 'delete', cls.veths[i]))
if __name__ == '__main__':
unittest.main()
|
|
87565c1e6032bff2cc3e20f5c4f46b7a17977f7c
|
migrations/versions/0098_tfl_dar.py
|
migrations/versions/0098_tfl_dar.py
|
"""empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
|
Add organisation for TFL Dial a Ride
|
Add organisation for TFL Dial a Ride
References image added in:
- [ ] https://github.com/alphagov/notifications-admin/pull/1321
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add organisation for TFL Dial a Ride
References image added in:
- [ ] https://github.com/alphagov/notifications-admin/pull/1321
|
"""empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
|
<commit_before><commit_msg>Add organisation for TFL Dial a Ride
References image added in:
- [ ] https://github.com/alphagov/notifications-admin/pull/1321<commit_after>
|
"""empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
|
Add organisation for TFL Dial a Ride
References image added in:
- [ ] https://github.com/alphagov/notifications-admin/pull/1321"""empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
|
<commit_before><commit_msg>Add organisation for TFL Dial a Ride
References image added in:
- [ ] https://github.com/alphagov/notifications-admin/pull/1321<commit_after>"""empty message
Revision ID: 0098_tfl_dar
Revises: 0097_notnull_inbound_provider
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0098_tfl_dar'
down_revision = '0097_notnull_inbound_provider'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
''
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
|
|
22585d29220709dc3a3de16b03c626ca27c715ca
|
migrations/versions/3025c44bdb2_.py
|
migrations/versions/3025c44bdb2_.py
|
"""empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
Add migration version? Not sure if this is right
|
Add migration version? Not sure if this is right
|
Python
|
bsd-2-clause
|
brianwolfe/robotics-tutorial,brianwolfe/robotics-tutorial,brianwolfe/robotics-tutorial
|
Add migration version? Not sure if this is right
|
"""empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
<commit_before><commit_msg>Add migration version? Not sure if this is right<commit_after>
|
"""empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
Add migration version? Not sure if this is right"""empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
<commit_before><commit_msg>Add migration version? Not sure if this is right<commit_after>"""empty message
Revision ID: 3025c44bdb2
Revises: None
Create Date: 2014-12-16 12:13:55.759378
"""
# revision identifiers, used by Alembic.
revision = '3025c44bdb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
|
64c70f3f73d14d5bdd18cf5c4ad8b15ec745f517
|
config/check_ascii.py
|
config/check_ascii.py
|
import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
|
Add helpful script for ascii checking - fyi @bruskiza
|
Add helpful script for ascii checking - fyi @bruskiza
|
Python
|
bsd-3-clause
|
praekelt/mama-ng-jsbox,praekelt/mama-ng-jsbox
|
Add helpful script for ascii checking - fyi @bruskiza
|
import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
|
<commit_before><commit_msg>Add helpful script for ascii checking - fyi @bruskiza<commit_after>
|
import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
|
Add helpful script for ascii checking - fyi @bruskizaimport json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
|
<commit_before><commit_msg>Add helpful script for ascii checking - fyi @bruskiza<commit_after>import json
files = ["go-ussd_public.ibo_NG.json"]
def is_ascii(s):
return all(ord(c) < 128 for c in s)
current_message_id = 0
for file_name in files:
json_file = open(file_name, "rU").read()
json_data = json.loads(json_file)
print "Proccessing %s\n-------" % file_name
for key, value in json_data.items():
# Ignore non-content keys and empty keys
if len(value) == 2:
if not is_ascii(value[1]):
print ("Non-ascii translation found of <%s>: %s" % (key, value[1]))
print "Done Proccessing %s\n-------" % file_name
|
|
289ce4a720c5863f6a80e1b86083fd2919b52f14
|
tests/startsymbol_tests/NotNonterminalTest.py
|
tests/startsymbol_tests/NotNonterminalTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for tests of start symbol as not nonterminal
|
Add file for tests of start symbol as not nonterminal
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for tests of start symbol as not nonterminal
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for tests of start symbol as not nonterminal<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for tests of start symbol as not nonterminal#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for tests of start symbol as not nonterminal<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:12
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NotNonterminalTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
e075b0b1c8d581107209e869eda7f6ff07a7321c
|
reverse_dict.py
|
reverse_dict.py
|
"""Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
|
Add script to create a historic->modern dictionary
|
Add script to create a historic->modern dictionary
The script takes as input a modern->historic variants dictionary in a
json file (created by the generate_historic_liwc script) and
reverses it to a dictionary that can be used to replace historic words
with their modern variant(s). Currently, the script only prints
statistics about the reversal (e.g., the number of modern and historic
words, frequencies of historic words that map to multiple modern words).
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to create a historic->modern dictionary
The script takes as input a modern->historic variants dictionary in a
json file (created by the generate_historic_liwc script) and
reverses it to a dictionary that can be used to replace historic words
with their modern variant(s). Currently, the script only prints
statistics about the reversal (e.g., the number of modern and historic
words, frequencies of historic words that map to multiple modern words).
|
"""Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
|
<commit_before><commit_msg>Add script to create a historic->modern dictionary
The script takes as input a modern->historic variants dictionary in a
json file (created by the generate_historic_liwc script) and
reverses it to a dictionary that can be used to replace historic words
with their modern variant(s). Currently, the script only prints
statistics about the reversal (e.g., the number of modern and historic
words, frequencies of historic words that map to multiple modern words).<commit_after>
|
"""Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
|
Add script to create a historic->modern dictionary
The script takes as input a modern->historic variants dictionary in a
json file (created by the generate_historic_liwc script) and
reverses it to a dictionary that can be used to replace historic words
with their modern variant(s). Currently, the script only prints
statistics about the reversal (e.g., the number of modern and historic
words, frequencies of historic words that map to multiple modern words)."""Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
|
<commit_before><commit_msg>Add script to create a historic->modern dictionary
The script takes as input a modern->historic variants dictionary in a
json file (created by the generate_historic_liwc script) and
reverses it to a dictionary that can be used to replace historic words
with their modern variant(s). Currently, the script only prints
statistics about the reversal (e.g., the number of modern and historic
words, frequencies of historic words that map to multiple modern words).<commit_after>"""Reverse modern->historic spelling variants dictonary to historic->modern
mappings
"""
import argparse
import codecs
import json
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dict', help='the name of the json file '
'containing the modern->spelling variants dictionary')
args = parser.parse_args()
dict_file = args.input_dict
modern_dict = {}
historic_dict = {}
with codecs.open(dict_file, 'rb', 'utf8') as f:
modern_dict = json.load(f, encoding='utf-8')
for modern_word, variants in modern_dict.iteritems():
for var in variants:
if var not in historic_dict.keys():
historic_dict[var] = Counter()
historic_dict[var][modern_word] += 1
print '#words in modern dict: {}'.format(len(modern_dict))
print '#words in historic dict: {}'.format(len(historic_dict))
# find historic words that map to mulitple terms
mappings_counter = Counter()
print '\nhistoric word\tmodern variant\tfrequency'
for w, mappings in historic_dict.iteritems():
mappings_counter[str(len(mappings)).zfill(3)] += 1
if len(mappings) > 1:
for variant, freq in mappings.iteritems():
print '{}\t{}\t{}'.format(w, variant, freq)
mp = mappings_counter.keys()
mp.sort()
print '\n#mappings\t#historic words'
for m in mp:
print '{}\t{}'.format(m, mappings_counter[m])
|
|
fb6eee18b2bf48dd0063623515ced00e980bdf10
|
nipype/utils/tests/test_docparse.py
|
nipype/utils/tests/test_docparse.py
|
from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
|
Add a few tests for docparse.
|
Add a few tests for docparse.
git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@338 ead46cd0-7350-4e37-8683-fc4c6f79bf00
|
Python
|
bsd-3-clause
|
carolFrohlich/nipype,carolFrohlich/nipype,sgiavasis/nipype,gerddie/nipype,christianbrodbeck/nipype,glatard/nipype,grlee77/nipype,satra/NiPypeold,arokem/nipype,wanderine/nipype,mick-d/nipype,carlohamalainen/nipype,rameshvs/nipype,dgellis90/nipype,blakedewey/nipype,glatard/nipype,gerddie/nipype,grlee77/nipype,Leoniela/nipype,dgellis90/nipype,mick-d/nipype_source,iglpdc/nipype,iglpdc/nipype,arokem/nipype,fprados/nipype,satra/NiPypeold,dmordom/nipype,mick-d/nipype_source,Leoniela/nipype,Leoniela/nipype,sgiavasis/nipype,christianbrodbeck/nipype,mick-d/nipype_source,gerddie/nipype,rameshvs/nipype,carolFrohlich/nipype,wanderine/nipype,pearsonlab/nipype,JohnGriffiths/nipype,wanderine/nipype,arokem/nipype,fprados/nipype,wanderine/nipype,blakedewey/nipype,JohnGriffiths/nipype,iglpdc/nipype,blakedewey/nipype,pearsonlab/nipype,dgellis90/nipype,glatard/nipype,dgellis90/nipype,gerddie/nipype,FredLoney/nipype,FCP-INDI/nipype,rameshvs/nipype,FredLoney/nipype,arokem/nipype,dmordom/nipype,dmordom/nipype,sgiavasis/nipype,carlohamalainen/nipype,grlee77/nipype,sgiavasis/nipype,JohnGriffiths/nipype,carolFrohlich/nipype,mick-d/nipype,pearsonlab/nipype,FCP-INDI/nipype,FCP-INDI/nipype,iglpdc/nipype,grlee77/nipype,fprados/nipype,pearsonlab/nipype,mick-d/nipype,FredLoney/nipype,mick-d/nipype,carlohamalainen/nipype,FCP-INDI/nipype,glatard/nipype,JohnGriffiths/nipype,rameshvs/nipype,blakedewey/nipype
|
Add a few tests for docparse.
git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@338 ead46cd0-7350-4e37-8683-fc4c6f79bf00
|
from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
|
<commit_before><commit_msg>Add a few tests for docparse.
git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@338 ead46cd0-7350-4e37-8683-fc4c6f79bf00<commit_after>
|
from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
|
Add a few tests for docparse.
git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@338 ead46cd0-7350-4e37-8683-fc4c6f79bf00from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
|
<commit_before><commit_msg>Add a few tests for docparse.
git-svn-id: 24f545668198cdd163a527378499f2123e59bf9f@338 ead46cd0-7350-4e37-8683-fc4c6f79bf00<commit_after>from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
|
|
555dac76a8810cfeaae96f8de04e9eb3362a3314
|
migrations/versions/0109_rem_old_noti_status.py
|
migrations/versions/0109_rem_old_noti_status.py
|
"""
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
|
Remove old notification status column
|
Remove old notification status column
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove old notification status column
|
"""
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
|
<commit_before><commit_msg>Remove old notification status column<commit_after>
|
"""
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
|
Remove old notification status column"""
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
|
<commit_before><commit_msg>Remove old notification status column<commit_after>"""
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
|
|
21a67556b83b7905134439d55afe33c35e4b3422
|
migrations/versions/0246_notifications_index.py
|
migrations/versions/0246_notifications_index.py
|
"""
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
|
Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries. We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists.
|
Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries.
We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries.
We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists.
|
"""
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
|
<commit_before><commit_msg>Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries.
We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists.<commit_after>
|
"""
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
|
Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries.
We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists."""
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
|
<commit_before><commit_msg>Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries.
We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists.<commit_after>"""
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
|
|
5acc7d50cbe199af49aece28b95ea97484ae31c7
|
snake/solutions/ghiaEtAl1982.py
|
snake/solutions/ghiaEtAl1982.py
|
"""
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
|
Add solution class for Ghia et al. (1982)
|
Add solution class for Ghia et al. (1982)
|
Python
|
mit
|
mesnardo/snake
|
Add solution class for Ghia et al. (1982)
|
"""
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
|
<commit_before><commit_msg>Add solution class for Ghia et al. (1982)<commit_after>
|
"""
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
|
Add solution class for Ghia et al. (1982)"""
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
|
<commit_before><commit_msg>Add solution class for Ghia et al. (1982)<commit_after>"""
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
|
|
14068a2e3ca445c02895aed38420baf846338aae
|
scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py
|
scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py
|
# Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
|
Add smile detection example script.
|
NN: Add smile detection example script.
|
Python
|
mit
|
iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv
|
NN: Add smile detection example script.
|
# Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
|
<commit_before><commit_msg>NN: Add smile detection example script.<commit_after>
|
# Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
|
NN: Add smile detection example script.# Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
|
<commit_before><commit_msg>NN: Add smile detection example script.<commit_after># Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
|
|
139123ddb81eec12d0f932ff6ff73aadb4b418cc
|
ocradmin/lib/nodetree/decorators.py
|
ocradmin/lib/nodetree/decorators.py
|
"""
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
|
Add decorator to make a Node class from a regular function
|
Add decorator to make a Node class from a regular function
|
Python
|
apache-2.0
|
vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium
|
Add decorator to make a Node class from a regular function
|
"""
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
|
<commit_before><commit_msg>Add decorator to make a Node class from a regular function<commit_after>
|
"""
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
|
Add decorator to make a Node class from a regular function"""
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
|
<commit_before><commit_msg>Add decorator to make a Node class from a regular function<commit_after>"""
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
|
|
fbc780c7beb94d73b2a4ea110e733f8c87763741
|
geoip/lookups.py
|
geoip/lookups.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name
|
Add location name lookup for ajax_select.
|
Add location name lookup for ajax_select.
|
Python
|
agpl-3.0
|
umitproject/openmonitor-aggregator,umitproject/openmonitor-aggregator,umitproject/openmonitor-aggregator,umitproject/openmonitor-aggregator,umitproject/openmonitor-aggregator
|
Add location name lookup for ajax_select.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name
|
<commit_before><commit_msg>Add location name lookup for ajax_select.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name
|
Add location name lookup for ajax_select.#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name
|
<commit_before><commit_msg>Add location name lookup for ajax_select.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name
|
|
af3ba846a8074132c64568c420ecb9b6ade9c6ea
|
geomRegexTest.py
|
geomRegexTest.py
|
__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename)
|
Work on defining RegEx to find and format molecular geometries in Gaussian output files.
|
Work on defining RegEx to find and format molecular geometries in Gaussian output files.
|
Python
|
apache-2.0
|
thompcinnamon/QM-calc-scripts
|
Work on defining RegEx to find and format molecular geometries in Gaussian output files.
|
__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename)
|
<commit_before><commit_msg>Work on defining RegEx to find and format molecular geometries in Gaussian output files.<commit_after>
|
__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename)
|
Work on defining RegEx to find and format molecular geometries in Gaussian output files.__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename)
|
<commit_before><commit_msg>Work on defining RegEx to find and format molecular geometries in Gaussian output files.<commit_after>__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename)
|
|
c2b69a51faac56689edc88e747a00b60cf08cc04
|
dthm4kaiako/poet/migrations/0003_auto_20190731_1912.py
|
dthm4kaiako/poet/migrations/0003_auto_20190731_1912.py
|
# Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
|
Add default ordering of progress outcome groups
|
Add default ordering of progress outcome groups
|
Python
|
mit
|
uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers
|
Add default ordering of progress outcome groups
|
# Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
|
<commit_before><commit_msg>Add default ordering of progress outcome groups<commit_after>
|
# Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
|
Add default ordering of progress outcome groups# Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
|
<commit_before><commit_msg>Add default ordering of progress outcome groups<commit_after># Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
|
|
32a79573b38c6d2ea7f5b81363610a5d9332ed4e
|
src/main/resources/jsonformat.py
|
src/main/resources/jsonformat.py
|
#!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
|
Add python script to parse JSON output
|
Add python script to parse JSON output
|
Python
|
apache-2.0
|
leo27lijiang/app-monitor,leo27lijiang/app-monitor
|
Add python script to parse JSON output
|
#!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
|
<commit_before><commit_msg>Add python script to parse JSON output<commit_after>
|
#!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
|
Add python script to parse JSON output#!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
|
<commit_before><commit_msg>Add python script to parse JSON output<commit_after>#!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
|
|
699469342179fdc4319b5f39ea201015860ef09d
|
infrastructure/migrations/0020_auto_20210922_0929.py
|
infrastructure/migrations/0020_auto_20210922_0929.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
|
Add migration for CI fix
|
Add migration for CI fix
|
Python
|
mit
|
Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data
|
Add migration for CI fix
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
|
<commit_before><commit_msg>Add migration for CI fix<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
|
Add migration for CI fix# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
|
<commit_before><commit_msg>Add migration for CI fix<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
|
|
c63144242d9cf2ecf02d58eb9a93cfe426acc6dc
|
scripts/send_preprint_unreg_contributor_emails.py
|
scripts/send_preprint_unreg_contributor_emails.py
|
# -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
|
Add script to send unregister user emails
|
Add script to send unregister user emails
...to unregistered contributors who didn't receive the email
after 568413a77cc51511a0f7afe081a218676a36ebb6 was deployed
Relates to [OSF-7935]
|
Python
|
apache-2.0
|
laurenrevere/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,leb2dg/osf.io,crcresearch/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,chennan47/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,Nesiehr/osf.io,caseyrollins/osf.io,sloria/osf.io,baylee-d/osf.io,cslzchen/osf.io,hmoco/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,chrisseto/osf.io,hmoco/osf.io,erinspace/osf.io,icereval/osf.io,cwisecarver/osf.io,icereval/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,erinspace/osf.io,chrisseto/osf.io,sloria/osf.io,brianjgeiger/osf.io,chennan47/osf.io,adlius/osf.io,crcresearch/osf.io,mattclark/osf.io,leb2dg/osf.io,pattisdr/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,felliott/osf.io,caneruguz/osf.io,hmoco/osf.io,cslzchen/osf.io,felliott/osf.io,baylee-d/osf.io,adlius/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,aaxelb/osf.io,mattclark/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,cslzchen/osf.io,aaxelb/osf.io,adlius/osf.io,caseyrollins/osf.io,chrisseto/osf.io,pattisdr/osf.io,Nesiehr/osf.io,binoculars/osf.io,erinspace/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,mfraezz/osf.io,chennan47/osf.io,TomBaxter/osf.io,aaxelb/osf.io,caneruguz/osf.io,baylee-d/osf.io,saradbowman/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,adlius/osf.io,cwisecarver/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,sloria/osf.io,pattisdr/osf.io,TomBaxter/osf.io,felliott/osf.io,binoculars/osf.io,crcresearch/osf.io,mattclark/osf.io
|
Add script to send unregister user emails
...to unregistered contributors who didn't receive the email
after 568413a77cc51511a0f7afe081a218676a36ebb6 was deployed
Relates to [OSF-7935]
|
# -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to send unregister user emails
...to unregistered contributors who didn't receive the email
after 568413a77cc51511a0f7afe081a218676a36ebb6 was deployed
Relates to [OSF-7935]<commit_after>
|
# -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
|
Add script to send unregister user emails
...to unregistered contributors who didn't receive the email
after 568413a77cc51511a0f7afe081a218676a36ebb6 was deployed
Relates to [OSF-7935]# -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to send unregister user emails
...to unregistered contributors who didn't receive the email
after 568413a77cc51511a0f7afe081a218676a36ebb6 was deployed
Relates to [OSF-7935]<commit_after># -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
|
|
4065a08ea401e0d95e8d40d9d735edf92edda861
|
oslo_policy/tests/test_cache_handler.py
|
oslo_policy/tests/test_cache_handler.py
|
# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
|
Add unit tests on cache handler
|
Add unit tests on cache handler
Change-Id: Ife6600da240f830aa0080e3f214cedf1389ea512
|
Python
|
apache-2.0
|
openstack/oslo.policy
|
Add unit tests on cache handler
Change-Id: Ife6600da240f830aa0080e3f214cedf1389ea512
|
# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
|
<commit_before><commit_msg>Add unit tests on cache handler
Change-Id: Ife6600da240f830aa0080e3f214cedf1389ea512<commit_after>
|
# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
|
Add unit tests on cache handler
Change-Id: Ife6600da240f830aa0080e3f214cedf1389ea512# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
|
<commit_before><commit_msg>Add unit tests on cache handler
Change-Id: Ife6600da240f830aa0080e3f214cedf1389ea512<commit_after># Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
|
|
3661ca3947763656165f8fc68ea42358ad37285a
|
test/unit/helpers/test_qiprofile.py
|
test/unit/helpers/test_qiprofile.py
|
import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
Add stub for qiprofile update test.
|
Add stub for qiprofile update test.
|
Python
|
bsd-2-clause
|
ohsu-qin/qipipe
|
Add stub for qiprofile update test.
|
import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
<commit_before><commit_msg>Add stub for qiprofile update test.<commit_after>
|
import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
Add stub for qiprofile update test.import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
<commit_before><commit_msg>Add stub for qiprofile update test.<commit_after>import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
|
36af45d88f01723204d9b65d4081e74a80f0776b
|
test/layers_test.py
|
test/layers_test.py
|
import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
|
Add test for layers module.
|
Add test for layers module.
|
Python
|
mit
|
chrinide/theanets,devdoer/theanets,lmjohns3/theanets
|
Add test for layers module.
|
import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
|
<commit_before><commit_msg>Add test for layers module.<commit_after>
|
import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
|
Add test for layers module.import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
|
<commit_before><commit_msg>Add test for layers module.<commit_after>import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
|
|
3dcf251276060b43ac888e0239f26a0cf2531832
|
tests/test_proxy_drop_executable.py
|
tests/test_proxy_drop_executable.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
Add tests for proxy drop executable
|
Add tests for proxy drop executable
|
Python
|
mpl-2.0
|
gdestuynder/MozDef,mozilla/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,mozilla/MozDef,jeffbryner/MozDef,Phrozyn/MozDef,Phrozyn/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,mpurzynski/MozDef,mozilla/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,mpurzynski/MozDef,mozilla/MozDef
|
Add tests for proxy drop executable
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
<commit_before><commit_msg>Add tests for proxy drop executable<commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
Add tests for proxy drop executable# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
<commit_before><commit_msg>Add tests for proxy drop executable<commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
|
379aef7e3aebc05352cacd274b43b156e32de18b
|
runtests.py
|
runtests.py
|
#!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
|
Add script to run tests
|
Add script to run tests
|
Python
|
mit
|
lamarmeigs/django-clean-fields
|
Add script to run tests
|
#!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
|
<commit_before><commit_msg>Add script to run tests<commit_after>
|
#!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
|
Add script to run tests#!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
|
<commit_before><commit_msg>Add script to run tests<commit_after>#!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
|
|
abf39931331f54aff5f10345939420041bd2039d
|
tests/test_APS2Pattern.py
|
tests/test_APS2Pattern.py
|
import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
|
Add test for APS2 instruction merging.
|
Add test for APS2 instruction merging.
A reduced example of the failing situation from issue #90.
|
Python
|
apache-2.0
|
Plourde-Research-Lab/PyQLab,BBN-Q/PyQLab,rmcgurrin/PyQLab,calebjordan/PyQLab
|
Add test for APS2 instruction merging.
A reduced example of the failing situation from issue #90.
|
import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for APS2 instruction merging.
A reduced example of the failing situation from issue #90.<commit_after>
|
import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
|
Add test for APS2 instruction merging.
A reduced example of the failing situation from issue #90.import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for APS2 instruction merging.
A reduced example of the failing situation from issue #90.<commit_after>import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
|
|
aeaf2e1a1207f2094ea4298b1ecff015f5996b5a
|
skimage/filter/tests/test_gabor.py
|
skimage/filter/tests/test_gabor.py
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
Add test cases for gabor filter
|
Add test cases for gabor filter
|
Python
|
bsd-3-clause
|
blink1073/scikit-image,dpshelio/scikit-image,emon10005/scikit-image,ofgulban/scikit-image,Britefury/scikit-image,chriscrosscutler/scikit-image,keflavich/scikit-image,Hiyorimi/scikit-image,keflavich/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,vighneshbirodkar/scikit-image,ClinicalGraphics/scikit-image,newville/scikit-image,WarrenWeckesser/scikits-image,youprofit/scikit-image,pratapvardhan/scikit-image,ofgulban/scikit-image,WarrenWeckesser/scikits-image,ClinicalGraphics/scikit-image,bsipocz/scikit-image,blink1073/scikit-image,michaelpacer/scikit-image,SamHames/scikit-image,Britefury/scikit-image,GaZ3ll3/scikit-image,SamHames/scikit-image,jwiggins/scikit-image,rjeli/scikit-image,chintak/scikit-image,paalge/scikit-image,vighneshbirodkar/scikit-image,bennlich/scikit-image,jwiggins/scikit-image,dpshelio/scikit-image,SamHames/scikit-image,SamHames/scikit-image,chintak/scikit-image,rjeli/scikit-image,bennlich/scikit-image,paalge/scikit-image,warmspringwinds/scikit-image,juliusbierk/scikit-image,ajaybhat/scikit-image,michaelaye/scikit-image,paalge/scikit-image,bsipocz/scikit-image,juliusbierk/scikit-image,Midafi/scikit-image,pratapvardhan/scikit-image,youprofit/scikit-image,rjeli/scikit-image,robintw/scikit-image,vighneshbirodkar/scikit-image,almarklein/scikit-image,michaelpacer/scikit-image,michaelaye/scikit-image,chriscrosscutler/scikit-image,ajaybhat/scikit-image,almarklein/scikit-image,almarklein/scikit-image,oew1v07/scikit-image,chintak/scikit-image,Midafi/scikit-image,almarklein/scikit-image,emon10005/scikit-image,oew1v07/scikit-image,robintw/scikit-image,ofgulban/scikit-image,newville/scikit-image,GaZ3ll3/scikit-image,Hiyorimi/scikit-image
|
Add test cases for gabor filter
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
<commit_before><commit_msg>Add test cases for gabor filter<commit_after>
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
Add test cases for gabor filterimport numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
<commit_before><commit_msg>Add test cases for gabor filter<commit_after>import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
|
|
a70f46aac52be5b38b869cfbe18c0421a0032aee
|
count_params.py
|
count_params.py
|
import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
|
Add script to count parameters of PyTorch model
|
Add script to count parameters of PyTorch model
Signed-off-by: Tushar Pankaj <514568343cdd6c4d7c6bb94cf636e24b01176284@gmail.com>
|
Python
|
mit
|
sauhaardac/training,sauhaardac/training
|
Add script to count parameters of PyTorch model
Signed-off-by: Tushar Pankaj <514568343cdd6c4d7c6bb94cf636e24b01176284@gmail.com>
|
import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
|
<commit_before><commit_msg>Add script to count parameters of PyTorch model
Signed-off-by: Tushar Pankaj <514568343cdd6c4d7c6bb94cf636e24b01176284@gmail.com><commit_after>
|
import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
|
Add script to count parameters of PyTorch model
Signed-off-by: Tushar Pankaj <514568343cdd6c4d7c6bb94cf636e24b01176284@gmail.com>import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
|
<commit_before><commit_msg>Add script to count parameters of PyTorch model
Signed-off-by: Tushar Pankaj <514568343cdd6c4d7c6bb94cf636e24b01176284@gmail.com><commit_after>import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
|
|
35e76ec99a3710a20b17a5afddaa14389af65098
|
tools/import_mediawiki.py
|
tools/import_mediawiki.py
|
import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
|
Add some simple MediaWiki importer.
|
tools: Add some simple MediaWiki importer.
|
Python
|
apache-2.0
|
ludovicchabant/Wikked,ludovicchabant/Wikked,ludovicchabant/Wikked
|
tools: Add some simple MediaWiki importer.
|
import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>tools: Add some simple MediaWiki importer.<commit_after>
|
import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
|
tools: Add some simple MediaWiki importer.import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>tools: Add some simple MediaWiki importer.<commit_after>import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
|
|
fe6ece236e684d76441280ba700565f7fbce40cc
|
14B-088/HI/analysis/pbcov_masking.py
|
14B-088/HI/analysis/pbcov_masking.py
|
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
|
Create masked version based on pbcov cutogg
|
Create masked version based on pbcov cutogg
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Create masked version based on pbcov cutogg
|
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
|
<commit_before><commit_msg>Create masked version based on pbcov cutogg<commit_after>
|
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
|
Create masked version based on pbcov cutogg
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
|
<commit_before><commit_msg>Create masked version based on pbcov cutogg<commit_after>
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
|
|
b3e6855489eba5d59507ef6fb4c92f8284526ec1
|
Arrays/check_consecutive_elements.py
|
Arrays/check_consecutive_elements.py
|
import unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
|
Check consecutive elements in an array
|
Check consecutive elements in an array
|
Python
|
mit
|
prathamtandon/g4gproblems
|
Check consecutive elements in an array
|
import unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
|
<commit_before><commit_msg>Check consecutive elements in an array<commit_after>
|
import unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
|
Check consecutive elements in an arrayimport unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
|
<commit_before><commit_msg>Check consecutive elements in an array<commit_after>import unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
|
|
55b33bff9856cc91943f0a5ae492db1fdc7d8d5a
|
numba/tests/jitclass_usecases.py
|
numba/tests/jitclass_usecases.py
|
"""
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
|
Add missing python 3 only file.
|
Add missing python 3 only file.
As title.
|
Python
|
bsd-2-clause
|
cpcloud/numba,stonebig/numba,gmarkall/numba,numba/numba,seibert/numba,seibert/numba,stonebig/numba,cpcloud/numba,stuartarchibald/numba,stuartarchibald/numba,IntelLabs/numba,sklam/numba,IntelLabs/numba,sklam/numba,numba/numba,numba/numba,seibert/numba,jriehl/numba,cpcloud/numba,jriehl/numba,IntelLabs/numba,seibert/numba,stuartarchibald/numba,stonebig/numba,seibert/numba,stuartarchibald/numba,sklam/numba,sklam/numba,stonebig/numba,gmarkall/numba,jriehl/numba,sklam/numba,IntelLabs/numba,numba/numba,stuartarchibald/numba,jriehl/numba,cpcloud/numba,cpcloud/numba,gmarkall/numba,IntelLabs/numba,gmarkall/numba,gmarkall/numba,numba/numba,jriehl/numba,stonebig/numba
|
Add missing python 3 only file.
As title.
|
"""
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
|
<commit_before><commit_msg>Add missing python 3 only file.
As title.<commit_after>
|
"""
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
|
Add missing python 3 only file.
As title."""
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
|
<commit_before><commit_msg>Add missing python 3 only file.
As title.<commit_after>"""
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
|
|
e251aff9a232a66b2d24324f394da2ad9345ce79
|
scripts/migration/migrate_none_as_email_verification.py
|
scripts/migration/migrate_none_as_email_verification.py
|
""" Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
|
Add migration script for changing users with None as email_verifications to {}
|
Add migration script for changing users with None as email_verifications to {}
|
Python
|
apache-2.0
|
rdhyee/osf.io,samanehsan/osf.io,barbour-em/osf.io,saradbowman/osf.io,haoyuchen1992/osf.io,brandonPurvis/osf.io,jolene-esposito/osf.io,emetsger/osf.io,laurenrevere/osf.io,HarryRybacki/osf.io,haoyuchen1992/osf.io,CenterForOpenScience/osf.io,samchrisinger/osf.io,jnayak1/osf.io,caneruguz/osf.io,reinaH/osf.io,binoculars/osf.io,GageGaskins/osf.io,adlius/osf.io,barbour-em/osf.io,brianjgeiger/osf.io,sloria/osf.io,adlius/osf.io,acshi/osf.io,rdhyee/osf.io,MerlinZhang/osf.io,Nesiehr/osf.io,wearpants/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,TomHeatwole/osf.io,wearpants/osf.io,RomanZWang/osf.io,bdyetton/prettychart,zachjanicki/osf.io,jolene-esposito/osf.io,Ghalko/osf.io,asanfilippo7/osf.io,ckc6cz/osf.io,CenterForOpenScience/osf.io,jinluyuan/osf.io,leb2dg/osf.io,zachjanicki/osf.io,fabianvf/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,rdhyee/osf.io,RomanZWang/osf.io,mluke93/osf.io,njantrania/osf.io,Johnetordoff/osf.io,icereval/osf.io,fabianvf/osf.io,petermalcolm/osf.io,ticklemepierce/osf.io,cslzchen/osf.io,cosenal/osf.io,zamattiac/osf.io,sbt9uc/osf.io,Nesiehr/osf.io,zachjanicki/osf.io,cslzchen/osf.io,reinaH/osf.io,mluo613/osf.io,danielneis/osf.io,jeffreyliu3230/osf.io,RomanZWang/osf.io,laurenrevere/osf.io,aaxelb/osf.io,kch8qx/osf.io,SSJohns/osf.io,kwierman/osf.io,dplorimer/osf,leb2dg/osf.io,sbt9uc/osf.io,kwierman/osf.io,felliott/osf.io,caseyrygt/osf.io,zamattiac/osf.io,brandonPurvis/osf.io,mluke93/osf.io,mluo613/osf.io,zamattiac/osf.io,barbour-em/osf.io,hmoco/osf.io,haoyuchen1992/osf.io,Ghalko/osf.io,reinaH/osf.io,crcresearch/osf.io,chrisseto/osf.io,KAsante95/osf.io,chrisseto/osf.io,ZobairAlijan/osf.io,caseyrollins/osf.io,dplorimer/osf,jnayak1/osf.io,doublebits/osf.io,jeffreyliu3230/osf.io,dplorimer/osf,Ghalko/osf.io,cldershem/osf.io,samchrisinger/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,amyshi188/osf.io,cosenal/osf.io,mfraezz/osf.io,RomanZWang/osf.io,GageGaskins/osf.io,abought/osf.io,sbt9uc/osf.io,GageGaskins/osf.io,jinluyuan/osf.io,jmcarp/osf.io,jeffreyliu3230/osf.io,aaxelb/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,lyndsysimon/osf.io,HarryRybacki/osf.io,kwierman/osf.io,KAsante95/osf.io,monikagrabowska/osf.io,doublebits/osf.io,mattclark/osf.io,caseyrygt/osf.io,emetsger/osf.io,amyshi188/osf.io,leb2dg/osf.io,arpitar/osf.io,mluo613/osf.io,samanehsan/osf.io,felliott/osf.io,danielneis/osf.io,kch8qx/osf.io,cldershem/osf.io,KAsante95/osf.io,samchrisinger/osf.io,mluo613/osf.io,fabianvf/osf.io,DanielSBrown/osf.io,jinluyuan/osf.io,acshi/osf.io,sbt9uc/osf.io,crcresearch/osf.io,adlius/osf.io,mfraezz/osf.io,binoculars/osf.io,felliott/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,jnayak1/osf.io,jolene-esposito/osf.io,samanehsan/osf.io,caseyrygt/osf.io,petermalcolm/osf.io,asanfilippo7/osf.io,arpitar/osf.io,kch8qx/osf.io,cslzchen/osf.io,felliott/osf.io,Johnetordoff/osf.io,ticklemepierce/osf.io,ZobairAlijan/osf.io,petermalcolm/osf.io,cwisecarver/osf.io,danielneis/osf.io,mluo613/osf.io,hmoco/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,ckc6cz/osf.io,abought/osf.io,amyshi188/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,arpitar/osf.io,adlius/osf.io,haoyuchen1992/osf.io,reinaH/osf.io,sloria/osf.io,brianjgeiger/osf.io,wearpants/osf.io,erinspace/osf.io,ticklemepierce/osf.io,erinspace/osf.io,njantrania/osf.io,lyndsysimon/osf.io,chennan47/osf.io,TomBaxter/osf.io,acshi/osf.io,billyhunt/osf.io,brianjgeiger/osf.io,caseyrygt/osf.io,jmcarp/osf.io,abought/osf.io,doublebits/osf.io,njantrania/osf.io,ckc6cz/osf.io,chrisseto/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,acshi/osf.io,wearpants/osf.io,mfraezz/osf.io,cosenal/osf.io,asanfilippo7/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,HarryRybacki/osf.io,amyshi188/osf.io,leb2dg/osf.io,TomHeatwole/osf.io,DanielSBrown/osf.io,petermalcolm/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,cldershem/osf.io,jmcarp/osf.io,jinluyuan/osf.io,aaxelb/osf.io,bdyetton/prettychart,danielneis/osf.io,jmcarp/osf.io,caseyrollins/osf.io,zamattiac/osf.io,fabianvf/osf.io,njantrania/osf.io,mattclark/osf.io,KAsante95/osf.io,alexschiller/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,mluke93/osf.io,alexschiller/osf.io,emetsger/osf.io,ticklemepierce/osf.io,Nesiehr/osf.io,MerlinZhang/osf.io,billyhunt/osf.io,alexschiller/osf.io,acshi/osf.io,cldershem/osf.io,zachjanicki/osf.io,aaxelb/osf.io,pattisdr/osf.io,SSJohns/osf.io,jnayak1/osf.io,jeffreyliu3230/osf.io,dplorimer/osf,alexschiller/osf.io,ckc6cz/osf.io,DanielSBrown/osf.io,billyhunt/osf.io,pattisdr/osf.io,erinspace/osf.io,hmoco/osf.io,cslzchen/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,lyndsysimon/osf.io,ZobairAlijan/osf.io,baylee-d/osf.io,caneruguz/osf.io,brandonPurvis/osf.io,SSJohns/osf.io,bdyetton/prettychart,emetsger/osf.io,RomanZWang/osf.io,caneruguz/osf.io,kch8qx/osf.io,mfraezz/osf.io,cosenal/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,chennan47/osf.io,crcresearch/osf.io,barbour-em/osf.io,kwierman/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,jolene-esposito/osf.io,HalcyonChimera/osf.io,brandonPurvis/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,TomHeatwole/osf.io,lyndsysimon/osf.io,doublebits/osf.io,mluke93/osf.io,bdyetton/prettychart,KAsante95/osf.io,abought/osf.io,baylee-d/osf.io,Ghalko/osf.io,doublebits/osf.io,baylee-d/osf.io,samchrisinger/osf.io,sloria/osf.io,arpitar/osf.io,billyhunt/osf.io,kch8qx/osf.io,HarryRybacki/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,mattclark/osf.io,DanielSBrown/osf.io,MerlinZhang/osf.io,icereval/osf.io,billyhunt/osf.io,icereval/osf.io,cwisecarver/osf.io,MerlinZhang/osf.io,TomHeatwole/osf.io,saradbowman/osf.io,samanehsan/osf.io
|
Add migration script for changing users with None as email_verifications to {}
|
""" Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script for changing users with None as email_verifications to {}<commit_after>
|
""" Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
|
Add migration script for changing users with None as email_verifications to {}""" Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script for changing users with None as email_verifications to {}<commit_after>""" Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
|
|
4c8ea40eeec6df07cf8721c256ad8cc3d35fb23e
|
src/test_main.py
|
src/test_main.py
|
import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys())
|
Add intial unit test file
|
Add intial unit test file
Currently just tests the two primary functions, the results are a bit
brittle but that will allow us to detect regressions for future work.
|
Python
|
mit
|
masterkoppa/whatodo
|
Add intial unit test file
Currently just tests the two primary functions, the results are a bit
brittle but that will allow us to detect regressions for future work.
|
import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys())
|
<commit_before><commit_msg>Add intial unit test file
Currently just tests the two primary functions, the results are a bit
brittle but that will allow us to detect regressions for future work.<commit_after>
|
import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys())
|
Add intial unit test file
Currently just tests the two primary functions, the results are a bit
brittle but that will allow us to detect regressions for future work.import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys())
|
<commit_before><commit_msg>Add intial unit test file
Currently just tests the two primary functions, the results are a bit
brittle but that will allow us to detect regressions for future work.<commit_after>import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys())
|
|
4a179825234b711a729fce5bc9ffc8de029c0999
|
utest/controller/test_loading.py
|
utest/controller/test_loading.py
|
import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
|
import unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
|
Test for invalid data when loading
|
Test for invalid data when loading
|
Python
|
apache-2.0
|
robotframework/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,caio2k/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,caio2k/RIDE,caio2k/RIDE
|
import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
Test for invalid data when loading
|
import unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
|
<commit_before>import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
<commit_msg>Test for invalid data when loading<commit_after>
|
import unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
|
import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
Test for invalid data when loadingimport unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
|
<commit_before>import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
<commit_msg>Test for invalid data when loading<commit_after>import unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
|
0e6a7a805ff08f191c88bda67992cb874f538c2f
|
services/migrations/0097_alter_unitconnection_section_type.py
|
services/migrations/0097_alter_unitconnection_section_type.py
|
# Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
|
Add migration for unitconnection section types
|
Add migration for unitconnection section types
|
Python
|
agpl-3.0
|
City-of-Helsinki/smbackend,City-of-Helsinki/smbackend
|
Add migration for unitconnection section types
|
# Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
|
<commit_before><commit_msg>Add migration for unitconnection section types<commit_after>
|
# Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
|
Add migration for unitconnection section types# Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
|
<commit_before><commit_msg>Add migration for unitconnection section types<commit_after># Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
|
|
5e49eb4fb6bce9cdeae515590530b78e4dde89d9
|
doc/examples/plot_match_face_template.py
|
doc/examples/plot_match_face_template.py
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
|
Add alternate example for `match_template`.
|
Add alternate example for `match_template`.
|
Python
|
bsd-3-clause
|
emmanuelle/scikits.image,warmspringwinds/scikit-image,bennlich/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,GaZ3ll3/scikit-image,rjeli/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,pratapvardhan/scikit-image,michaelpacer/scikit-image,chintak/scikit-image,michaelaye/scikit-image,chintak/scikit-image,chriscrosscutler/scikit-image,pratapvardhan/scikit-image,michaelaye/scikit-image,oew1v07/scikit-image,paalge/scikit-image,almarklein/scikit-image,ajaybhat/scikit-image,paalge/scikit-image,jwiggins/scikit-image,newville/scikit-image,SamHames/scikit-image,jwiggins/scikit-image,paalge/scikit-image,bsipocz/scikit-image,ajaybhat/scikit-image,blink1073/scikit-image,SamHames/scikit-image,vighneshbirodkar/scikit-image,chintak/scikit-image,almarklein/scikit-image,chintak/scikit-image,newville/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,blink1073/scikit-image,warmspringwinds/scikit-image,WarrenWeckesser/scikits-image,keflavich/scikit-image,vighneshbirodkar/scikit-image,emmanuelle/scikits.image,dpshelio/scikit-image,chriscrosscutler/scikit-image,GaZ3ll3/scikit-image,rjeli/scikit-image,WarrenWeckesser/scikits-image,almarklein/scikit-image,SamHames/scikit-image,vighneshbirodkar/scikit-image,ofgulban/scikit-image,Hiyorimi/scikit-image,michaelpacer/scikit-image,Midafi/scikit-image,ClinicalGraphics/scikit-image,youprofit/scikit-image,emmanuelle/scikits.image,emon10005/scikit-image,Midafi/scikit-image,Hiyorimi/scikit-image,Britefury/scikit-image,robintw/scikit-image,oew1v07/scikit-image,keflavich/scikit-image,Britefury/scikit-image,juliusbierk/scikit-image,almarklein/scikit-image,emmanuelle/scikits.image,emon10005/scikit-image,bsipocz/scikit-image,robintw/scikit-image,youprofit/scikit-image,bennlich/scikit-image,juliusbierk/scikit-image
|
Add alternate example for `match_template`.
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
|
<commit_before><commit_msg>Add alternate example for `match_template`.<commit_after>
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
|
Add alternate example for `match_template`."""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
|
<commit_before><commit_msg>Add alternate example for `match_template`.<commit_after>"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
|
|
edc5116472c49370e5bf3ff7f9f7872732b0285e
|
phone_numbers.py
|
phone_numbers.py
|
#!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
|
Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?
|
Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?
|
Python
|
apache-2.0
|
aww/cs_practice
|
Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?
|
#!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?<commit_after>
|
#!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
|
Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?#!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a solution to the phone number problem: can a phone number be represented as words in a dictionary?<commit_after>#!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
|
|
86baa4f437cf3892c15a56e8331c19b6d2e63b1d
|
lib/gen-names.py
|
lib/gen-names.py
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
Add a script for generating unicode name table
|
lib: Add a script for generating unicode name table
GLib doesn't have a way to get unicode char names, so we'll have to
reimplement this.
|
Python
|
bsd-3-clause
|
GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters
|
lib: Add a script for generating unicode name table
GLib doesn't have a way to get unicode char names, so we'll have to
reimplement this.
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating unicode name table
GLib doesn't have a way to get unicode char names, so we'll have to
reimplement this.<commit_after>
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
lib: Add a script for generating unicode name table
GLib doesn't have a way to get unicode char names, so we'll have to
reimplement this.#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating unicode name table
GLib doesn't have a way to get unicode char names, so we'll have to
reimplement this.<commit_after>#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
|
3ba109622c24bd52f32e605c523249e1c26b0207
|
spacy/tests/regression/test_issue834.py
|
spacy/tests/regression/test_issue834.py
|
# coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
|
Add regression test with non ' ' space character as token
|
Add regression test with non ' ' space character as token
|
Python
|
mit
|
banglakit/spaCy,banglakit/spaCy,explosion/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,banglakit/spaCy,banglakit/spaCy,oroszgy/spaCy.hu,explosion/spaCy,recognai/spaCy,explosion/spaCy,oroszgy/spaCy.hu,explosion/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,explosion/spaCy,banglakit/spaCy,recognai/spaCy,raphael0202/spaCy,recognai/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,raphael0202/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,recognai/spaCy,aikramer2/spaCy,spacy-io/spaCy,raphael0202/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,recognai/spaCy,raphael0202/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,banglakit/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,explosion/spaCy,raphael0202/spaCy,honnibal/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy
|
Add regression test with non ' ' space character as token
|
# coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
|
<commit_before><commit_msg>Add regression test with non ' ' space character as token<commit_after>
|
# coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
|
Add regression test with non ' ' space character as token# coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
|
<commit_before><commit_msg>Add regression test with non ' ' space character as token<commit_after># coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.