commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
060f3d01458af237952a9081fadd523350862f2d
|
accounts/management/commands/clean_spammers.py
|
accounts/management/commands/clean_spammers.py
|
from django.db.models import Q, Count
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
users = (
User.objects
.annotate(game_count=Count('gamelibrary__games'))
.filter(
Q(website__icontains='.ru') | Q(website__icontains='wp-content'),
email_confirmed=False,
game_count=0
)
)
cleared_users = len(users)
for user in users:
print user, user.website
user.delete()
print "Cleared %d users" % cleared_users
|
Add task to delete spam accounts
|
Add task to delete spam accounts
|
Python
|
agpl-3.0
|
Turupawn/website,lutris/website,Turupawn/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website
|
Add task to delete spam accounts
|
from django.db.models import Q, Count
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
users = (
User.objects
.annotate(game_count=Count('gamelibrary__games'))
.filter(
Q(website__icontains='.ru') | Q(website__icontains='wp-content'),
email_confirmed=False,
game_count=0
)
)
cleared_users = len(users)
for user in users:
print user, user.website
user.delete()
print "Cleared %d users" % cleared_users
|
<commit_before><commit_msg>Add task to delete spam accounts<commit_after>
|
from django.db.models import Q, Count
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
users = (
User.objects
.annotate(game_count=Count('gamelibrary__games'))
.filter(
Q(website__icontains='.ru') | Q(website__icontains='wp-content'),
email_confirmed=False,
game_count=0
)
)
cleared_users = len(users)
for user in users:
print user, user.website
user.delete()
print "Cleared %d users" % cleared_users
|
Add task to delete spam accountsfrom django.db.models import Q, Count
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
users = (
User.objects
.annotate(game_count=Count('gamelibrary__games'))
.filter(
Q(website__icontains='.ru') | Q(website__icontains='wp-content'),
email_confirmed=False,
game_count=0
)
)
cleared_users = len(users)
for user in users:
print user, user.website
user.delete()
print "Cleared %d users" % cleared_users
|
<commit_before><commit_msg>Add task to delete spam accounts<commit_after>from django.db.models import Q, Count
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
users = (
User.objects
.annotate(game_count=Count('gamelibrary__games'))
.filter(
Q(website__icontains='.ru') | Q(website__icontains='wp-content'),
email_confirmed=False,
game_count=0
)
)
cleared_users = len(users)
for user in users:
print user, user.website
user.delete()
print "Cleared %d users" % cleared_users
|
|
6f148fb1bb047b4977c8fcd1d898c231bed3fc9d
|
indra/tests/test_dart_client.py
|
indra/tests/test_dart_client.py
|
import json
from indra.literature.dart_client import jsonify_query_data
def test_timestamp():
# Should ignore "after"
assert jsonify_query_data(timestamp={'on': '2020-01-01',
'after': '2020-01-02'}) == \
json.dumps({"timestamp": {"on": "2020-01-01"}})
assert jsonify_query_data(timestamp={'after': '2020-01-01',
'before': '2020-01-05'}) == \
json.dumps(
{'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}})
def test_lists():
# Check lists, ignore the lists that have non-str objects
assert jsonify_query_data(readers=['hume', 123456],
versions=['123', '456']) ==\
json.dumps({'versions': ['123', '456']})
|
Add two tests for dart client
|
Add two tests for dart client
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,johnbachman/indra,johnbachman/belpy,johnbachman/indra
|
Add two tests for dart client
|
import json
from indra.literature.dart_client import jsonify_query_data
def test_timestamp():
# Should ignore "after"
assert jsonify_query_data(timestamp={'on': '2020-01-01',
'after': '2020-01-02'}) == \
json.dumps({"timestamp": {"on": "2020-01-01"}})
assert jsonify_query_data(timestamp={'after': '2020-01-01',
'before': '2020-01-05'}) == \
json.dumps(
{'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}})
def test_lists():
# Check lists, ignore the lists that have non-str objects
assert jsonify_query_data(readers=['hume', 123456],
versions=['123', '456']) ==\
json.dumps({'versions': ['123', '456']})
|
<commit_before><commit_msg>Add two tests for dart client<commit_after>
|
import json
from indra.literature.dart_client import jsonify_query_data
def test_timestamp():
# Should ignore "after"
assert jsonify_query_data(timestamp={'on': '2020-01-01',
'after': '2020-01-02'}) == \
json.dumps({"timestamp": {"on": "2020-01-01"}})
assert jsonify_query_data(timestamp={'after': '2020-01-01',
'before': '2020-01-05'}) == \
json.dumps(
{'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}})
def test_lists():
# Check lists, ignore the lists that have non-str objects
assert jsonify_query_data(readers=['hume', 123456],
versions=['123', '456']) ==\
json.dumps({'versions': ['123', '456']})
|
Add two tests for dart clientimport json
from indra.literature.dart_client import jsonify_query_data
def test_timestamp():
# Should ignore "after"
assert jsonify_query_data(timestamp={'on': '2020-01-01',
'after': '2020-01-02'}) == \
json.dumps({"timestamp": {"on": "2020-01-01"}})
assert jsonify_query_data(timestamp={'after': '2020-01-01',
'before': '2020-01-05'}) == \
json.dumps(
{'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}})
def test_lists():
# Check lists, ignore the lists that have non-str objects
assert jsonify_query_data(readers=['hume', 123456],
versions=['123', '456']) ==\
json.dumps({'versions': ['123', '456']})
|
<commit_before><commit_msg>Add two tests for dart client<commit_after>import json
from indra.literature.dart_client import jsonify_query_data
def test_timestamp():
# Should ignore "after"
assert jsonify_query_data(timestamp={'on': '2020-01-01',
'after': '2020-01-02'}) == \
json.dumps({"timestamp": {"on": "2020-01-01"}})
assert jsonify_query_data(timestamp={'after': '2020-01-01',
'before': '2020-01-05'}) == \
json.dumps(
{'timestamp': {'after': '2020-01-01', 'before': '2020-01-05'}})
def test_lists():
# Check lists, ignore the lists that have non-str objects
assert jsonify_query_data(readers=['hume', 123456],
versions=['123', '456']) ==\
json.dumps({'versions': ['123', '456']})
|
|
de79ece940d244d2346b45cb27840f4bfbb32b20
|
iscc_bench/title_length.py
|
iscc_bench/title_length.py
|
# -*- coding: utf-8 -*-
"""Script to measure title length statisics"""
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
if __name__ == '__main__':
SAMPLE_SIZE = 1000000
title_sizes = []
for n, title in enumerate(iter_titles()):
title_sizes.append(len(title))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print('Longest title in {} samples had {} chars.'.format(SAMPLE_SIZE, abs_max))
print('The mean title length of all titles is {} chars '.format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print('The longest title without outliers is {} chars.'.format(max_real))
print('The mean title length without outliers is {} chars.'.format(cleaned.mean()))
|
Add script to measure title length statistics
|
Add script to measure title length statistics
|
Python
|
bsd-2-clause
|
coblo/isccbench
|
Add script to measure title length statistics
|
# -*- coding: utf-8 -*-
"""Script to measure title length statisics"""
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
if __name__ == '__main__':
SAMPLE_SIZE = 1000000
title_sizes = []
for n, title in enumerate(iter_titles()):
title_sizes.append(len(title))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print('Longest title in {} samples had {} chars.'.format(SAMPLE_SIZE, abs_max))
print('The mean title length of all titles is {} chars '.format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print('The longest title without outliers is {} chars.'.format(max_real))
print('The mean title length without outliers is {} chars.'.format(cleaned.mean()))
|
<commit_before><commit_msg>Add script to measure title length statistics<commit_after>
|
# -*- coding: utf-8 -*-
"""Script to measure title length statisics"""
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
if __name__ == '__main__':
SAMPLE_SIZE = 1000000
title_sizes = []
for n, title in enumerate(iter_titles()):
title_sizes.append(len(title))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print('Longest title in {} samples had {} chars.'.format(SAMPLE_SIZE, abs_max))
print('The mean title length of all titles is {} chars '.format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print('The longest title without outliers is {} chars.'.format(max_real))
print('The mean title length without outliers is {} chars.'.format(cleaned.mean()))
|
Add script to measure title length statistics# -*- coding: utf-8 -*-
"""Script to measure title length statisics"""
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
if __name__ == '__main__':
SAMPLE_SIZE = 1000000
title_sizes = []
for n, title in enumerate(iter_titles()):
title_sizes.append(len(title))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print('Longest title in {} samples had {} chars.'.format(SAMPLE_SIZE, abs_max))
print('The mean title length of all titles is {} chars '.format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print('The longest title without outliers is {} chars.'.format(max_real))
print('The mean title length without outliers is {} chars.'.format(cleaned.mean()))
|
<commit_before><commit_msg>Add script to measure title length statistics<commit_after># -*- coding: utf-8 -*-
"""Script to measure title length statisics"""
from itertools import cycle
import numpy as np
from iscc_bench.readers import ALL_READERS
def iter_titles():
"""Iterate over titles"""
readers = [r() for r in ALL_READERS]
for reader in cycle(readers):
meta = next(reader)
yield meta.title
def reject_outliers(data, m=2.):
"""Remove outliers from data."""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
if __name__ == '__main__':
SAMPLE_SIZE = 1000000
title_sizes = []
for n, title in enumerate(iter_titles()):
title_sizes.append(len(title))
if n > SAMPLE_SIZE:
break
data = np.array(title_sizes, dtype=np.uint16)
abs_max = max(data)
print('Longest title in {} samples had {} chars.'.format(SAMPLE_SIZE, abs_max))
print('The mean title length of all titles is {} chars '.format(data.mean()))
cleaned = reject_outliers(data)
max_real = max(cleaned)
print('The longest title without outliers is {} chars.'.format(max_real))
print('The mean title length without outliers is {} chars.'.format(cleaned.mean()))
|
|
625ab38d0509d43620292f471bccee38d66d1fe6
|
leetcode/remove_duplicates_from_sorted_array.py
|
leetcode/remove_duplicates_from_sorted_array.py
|
"""
Please read this as markdown:
# Algorithm description
Problem statement: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/
The algorithm implemented is as follows:
1. Check for two base cases:
1. When `nums` is empty
2. When `nums` length is 1
In both cases return the length of the list because there cannot be
repeated elements.
2. Third case: is the length of `nums` is greater than 1:
Traverse the `nums` list comparing the current element vs the previous
element. If those elements are different, move the current element to the
next in-order position `target_index`.
`target_index` is the variable we use to keep track of the latest
index that holds ordered, unique elements.
# Complexity
## Time
The time complexity is O(n). Why?
* Travers: we traverse the given `nums` list only once -> O(n)
* Swap: the `swap` method complexity is O(1).
* Simple operation: the other operations are just boolean comparations,
counter increases and variable assignments -> O(1).
So we have: O(n) (traverse) * O(1) (swap) * O(1) (simple operations) = O(n)
## Space
The space complexity is O(n). Why?
The only data structure we used to solve the problem was the list to hold
the given input `nums`. -> O(n)
## To improve
The problem statement required to:
> Do not allocate extra space for another array, you must do this by modifying
the input array in-place with O(1) extra memory.
If this constraint was not in place, I would used a dictionary to get the
unique elements present in `nums` and then would have returned the keys of the
dictionary in ascending order.
The time complexity would have been the same, the space complexity would have
been O(n) (given input) + O(n) (dictionary) ~= O(n). But the maintainability
of the code would have improved because the algorithm would be easier to
understand and modify by others.
"""
class Solution:
def switch(self, source_index: int, target_index: int, nums: [int]) -> None:
nums[source_index], nums[target_index] = nums[target_index], nums[source_index]
def removeDuplicates(self, nums: [int]) -> int:
if len(nums) <= 1:
return len(nums)
target_index = 0
previous_number = nums[0]
for current_index, current_number in enumerate(nums):
if previous_number != current_number:
target_index += 1
self.switch(current_index, target_index, nums)
previous_number = current_number
unique_length = target_index + 1
return unique_length
if __name__ == "__main__":
s = Solution()
r = s.removeDuplicates([0,0,1,1,1,2,2,3,3,4])
print(f'result: {r}')
|
Add solution for: Remove Duplicates from Sorted Array
|
Add solution for: Remove Duplicates from Sorted Array
|
Python
|
mit
|
julianespinel/trainning,julianespinel/training,julianespinel/training,julianespinel/training,julianespinel/trainning,julianespinel/training
|
Add solution for: Remove Duplicates from Sorted Array
|
"""
Please read this as markdown:
# Algorithm description
Problem statement: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/
The algorithm implemented is as follows:
1. Check for two base cases:
1. When `nums` is empty
2. When `nums` length is 1
In both cases return the length of the list because there cannot be
repeated elements.
2. Third case: is the length of `nums` is greater than 1:
Traverse the `nums` list comparing the current element vs the previous
element. If those elements are different, move the current element to the
next in-order position `target_index`.
`target_index` is the variable we use to keep track of the latest
index that holds ordered, unique elements.
# Complexity
## Time
The time complexity is O(n). Why?
* Travers: we traverse the given `nums` list only once -> O(n)
* Swap: the `swap` method complexity is O(1).
* Simple operation: the other operations are just boolean comparations,
counter increases and variable assignments -> O(1).
So we have: O(n) (traverse) * O(1) (swap) * O(1) (simple operations) = O(n)
## Space
The space complexity is O(n). Why?
The only data structure we used to solve the problem was the list to hold
the given input `nums`. -> O(n)
## To improve
The problem statement required to:
> Do not allocate extra space for another array, you must do this by modifying
the input array in-place with O(1) extra memory.
If this constraint was not in place, I would used a dictionary to get the
unique elements present in `nums` and then would have returned the keys of the
dictionary in ascending order.
The time complexity would have been the same, the space complexity would have
been O(n) (given input) + O(n) (dictionary) ~= O(n). But the maintainability
of the code would have improved because the algorithm would be easier to
understand and modify by others.
"""
class Solution:
def switch(self, source_index: int, target_index: int, nums: [int]) -> None:
nums[source_index], nums[target_index] = nums[target_index], nums[source_index]
def removeDuplicates(self, nums: [int]) -> int:
if len(nums) <= 1:
return len(nums)
target_index = 0
previous_number = nums[0]
for current_index, current_number in enumerate(nums):
if previous_number != current_number:
target_index += 1
self.switch(current_index, target_index, nums)
previous_number = current_number
unique_length = target_index + 1
return unique_length
if __name__ == "__main__":
s = Solution()
r = s.removeDuplicates([0,0,1,1,1,2,2,3,3,4])
print(f'result: {r}')
|
<commit_before><commit_msg>Add solution for: Remove Duplicates from Sorted Array<commit_after>
|
"""
Please read this as markdown:
# Algorithm description
Problem statement: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/
The algorithm implemented is as follows:
1. Check for two base cases:
1. When `nums` is empty
2. When `nums` length is 1
In both cases return the length of the list because there cannot be
repeated elements.
2. Third case: is the length of `nums` is greater than 1:
Traverse the `nums` list comparing the current element vs the previous
element. If those elements are different, move the current element to the
next in-order position `target_index`.
`target_index` is the variable we use to keep track of the latest
index that holds ordered, unique elements.
# Complexity
## Time
The time complexity is O(n). Why?
* Travers: we traverse the given `nums` list only once -> O(n)
* Swap: the `swap` method complexity is O(1).
* Simple operation: the other operations are just boolean comparations,
counter increases and variable assignments -> O(1).
So we have: O(n) (traverse) * O(1) (swap) * O(1) (simple operations) = O(n)
## Space
The space complexity is O(n). Why?
The only data structure we used to solve the problem was the list to hold
the given input `nums`. -> O(n)
## To improve
The problem statement required to:
> Do not allocate extra space for another array, you must do this by modifying
the input array in-place with O(1) extra memory.
If this constraint was not in place, I would used a dictionary to get the
unique elements present in `nums` and then would have returned the keys of the
dictionary in ascending order.
The time complexity would have been the same, the space complexity would have
been O(n) (given input) + O(n) (dictionary) ~= O(n). But the maintainability
of the code would have improved because the algorithm would be easier to
understand and modify by others.
"""
class Solution:
def switch(self, source_index: int, target_index: int, nums: [int]) -> None:
nums[source_index], nums[target_index] = nums[target_index], nums[source_index]
def removeDuplicates(self, nums: [int]) -> int:
if len(nums) <= 1:
return len(nums)
target_index = 0
previous_number = nums[0]
for current_index, current_number in enumerate(nums):
if previous_number != current_number:
target_index += 1
self.switch(current_index, target_index, nums)
previous_number = current_number
unique_length = target_index + 1
return unique_length
if __name__ == "__main__":
s = Solution()
r = s.removeDuplicates([0,0,1,1,1,2,2,3,3,4])
print(f'result: {r}')
|
Add solution for: Remove Duplicates from Sorted Array"""
Please read this as markdown:
# Algorithm description
Problem statement: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/
The algorithm implemented is as follows:
1. Check for two base cases:
1. When `nums` is empty
2. When `nums` length is 1
In both cases return the length of the list because there cannot be
repeated elements.
2. Third case: is the length of `nums` is greater than 1:
Traverse the `nums` list comparing the current element vs the previous
element. If those elements are different, move the current element to the
next in-order position `target_index`.
`target_index` is the variable we use to keep track of the latest
index that holds ordered, unique elements.
# Complexity
## Time
The time complexity is O(n). Why?
* Travers: we traverse the given `nums` list only once -> O(n)
* Swap: the `swap` method complexity is O(1).
* Simple operation: the other operations are just boolean comparations,
counter increases and variable assignments -> O(1).
So we have: O(n) (traverse) * O(1) (swap) * O(1) (simple operations) = O(n)
## Space
The space complexity is O(n). Why?
The only data structure we used to solve the problem was the list to hold
the given input `nums`. -> O(n)
## To improve
The problem statement required to:
> Do not allocate extra space for another array, you must do this by modifying
the input array in-place with O(1) extra memory.
If this constraint was not in place, I would used a dictionary to get the
unique elements present in `nums` and then would have returned the keys of the
dictionary in ascending order.
The time complexity would have been the same, the space complexity would have
been O(n) (given input) + O(n) (dictionary) ~= O(n). But the maintainability
of the code would have improved because the algorithm would be easier to
understand and modify by others.
"""
class Solution:
def switch(self, source_index: int, target_index: int, nums: [int]) -> None:
nums[source_index], nums[target_index] = nums[target_index], nums[source_index]
def removeDuplicates(self, nums: [int]) -> int:
if len(nums) <= 1:
return len(nums)
target_index = 0
previous_number = nums[0]
for current_index, current_number in enumerate(nums):
if previous_number != current_number:
target_index += 1
self.switch(current_index, target_index, nums)
previous_number = current_number
unique_length = target_index + 1
return unique_length
if __name__ == "__main__":
s = Solution()
r = s.removeDuplicates([0,0,1,1,1,2,2,3,3,4])
print(f'result: {r}')
|
<commit_before><commit_msg>Add solution for: Remove Duplicates from Sorted Array<commit_after>"""
Please read this as markdown:
# Algorithm description
Problem statement: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/727/
The algorithm implemented is as follows:
1. Check for two base cases:
1. When `nums` is empty
2. When `nums` length is 1
In both cases return the length of the list because there cannot be
repeated elements.
2. Third case: is the length of `nums` is greater than 1:
Traverse the `nums` list comparing the current element vs the previous
element. If those elements are different, move the current element to the
next in-order position `target_index`.
`target_index` is the variable we use to keep track of the latest
index that holds ordered, unique elements.
# Complexity
## Time
The time complexity is O(n). Why?
* Travers: we traverse the given `nums` list only once -> O(n)
* Swap: the `swap` method complexity is O(1).
* Simple operation: the other operations are just boolean comparations,
counter increases and variable assignments -> O(1).
So we have: O(n) (traverse) * O(1) (swap) * O(1) (simple operations) = O(n)
## Space
The space complexity is O(n). Why?
The only data structure we used to solve the problem was the list to hold
the given input `nums`. -> O(n)
## To improve
The problem statement required to:
> Do not allocate extra space for another array, you must do this by modifying
the input array in-place with O(1) extra memory.
If this constraint was not in place, I would used a dictionary to get the
unique elements present in `nums` and then would have returned the keys of the
dictionary in ascending order.
The time complexity would have been the same, the space complexity would have
been O(n) (given input) + O(n) (dictionary) ~= O(n). But the maintainability
of the code would have improved because the algorithm would be easier to
understand and modify by others.
"""
class Solution:
def switch(self, source_index: int, target_index: int, nums: [int]) -> None:
nums[source_index], nums[target_index] = nums[target_index], nums[source_index]
def removeDuplicates(self, nums: [int]) -> int:
if len(nums) <= 1:
return len(nums)
target_index = 0
previous_number = nums[0]
for current_index, current_number in enumerate(nums):
if previous_number != current_number:
target_index += 1
self.switch(current_index, target_index, nums)
previous_number = current_number
unique_length = target_index + 1
return unique_length
if __name__ == "__main__":
s = Solution()
r = s.removeDuplicates([0,0,1,1,1,2,2,3,3,4])
print(f'result: {r}')
|
|
5ac31a4baedc9ee6e704392e8c42dd474199fb90
|
examples/basics/visuals/bezier.py
|
examples/basics/visuals/bezier.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to draw curved lines (bezier).
"""
import sys
from vispy import app, gloo, visuals
from vispy.geometry import curves
from vispy.visuals.transforms import STTransform, NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Bezier lines example',
keys='interactive', size=(400, 750))
self.lines = [
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(50, -190),
(350, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(190, -190),
(210, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve3_bezier(
(10, 0),
(30, 200),
(390, 0)
), color='w', width=2, method='agg')
]
# Translate each line visual downwards
for i, line in enumerate(self.lines):
x = 0
y = 200 * (i + 1)
line.transform = STTransform(translate=[x, y])
self.texts = [
visuals.TextVisual('4 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 75)),
visuals.TextVisual('3 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 525)),
]
for text in self.texts:
text.transform = NullTransform()
# Initialize transform systems for each visual
self.visuals = self.lines + self.texts
for visual in self.visuals:
visual.tr_sys = visuals.transforms.TransformSystem(self)
visual.tr_sys.visual_to_document = visual.transform
self.show()
def on_draw(self, event):
gloo.clear('black')
gloo.set_viewport(0, 0, *self.physical_size)
for visual in self.visuals:
visual.draw(visual.tr_sys)
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
Add an example how to draw Bezier curves.
|
Add an example how to draw Bezier curves.
The vispy.geometry.curves module provides several helper
functions to generate the right vertices for a nice curved
line.
|
Python
|
bsd-3-clause
|
bollu/vispy,sbtlaarzc/vispy,QuLogic/vispy,srinathv/vispy,RebeccaWPerry/vispy,drufat/vispy,bollu/vispy,michaelaye/vispy,ghisvail/vispy,drufat/vispy,ghisvail/vispy,RebeccaWPerry/vispy,QuLogic/vispy,kkuunnddaannkk/vispy,jay3sh/vispy,inclement/vispy,julienr/vispy,dchilds7/Deysha-Star-Formation,michaelaye/vispy,srinathv/vispy,ghisvail/vispy,Eric89GXL/vispy,inclement/vispy,dchilds7/Deysha-Star-Formation,bollu/vispy,Eric89GXL/vispy,drufat/vispy,sbtlaarzc/vispy,Eric89GXL/vispy,jay3sh/vispy,julienr/vispy,RebeccaWPerry/vispy,julienr/vispy,jdreaver/vispy,inclement/vispy,sbtlaarzc/vispy,jdreaver/vispy,srinathv/vispy,kkuunnddaannkk/vispy,jdreaver/vispy,michaelaye/vispy,kkuunnddaannkk/vispy,QuLogic/vispy,jay3sh/vispy,dchilds7/Deysha-Star-Formation
|
Add an example how to draw Bezier curves.
The vispy.geometry.curves module provides several helper
functions to generate the right vertices for a nice curved
line.
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to draw curved lines (bezier).
"""
import sys
from vispy import app, gloo, visuals
from vispy.geometry import curves
from vispy.visuals.transforms import STTransform, NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Bezier lines example',
keys='interactive', size=(400, 750))
self.lines = [
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(50, -190),
(350, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(190, -190),
(210, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve3_bezier(
(10, 0),
(30, 200),
(390, 0)
), color='w', width=2, method='agg')
]
# Translate each line visual downwards
for i, line in enumerate(self.lines):
x = 0
y = 200 * (i + 1)
line.transform = STTransform(translate=[x, y])
self.texts = [
visuals.TextVisual('4 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 75)),
visuals.TextVisual('3 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 525)),
]
for text in self.texts:
text.transform = NullTransform()
# Initialize transform systems for each visual
self.visuals = self.lines + self.texts
for visual in self.visuals:
visual.tr_sys = visuals.transforms.TransformSystem(self)
visual.tr_sys.visual_to_document = visual.transform
self.show()
def on_draw(self, event):
gloo.clear('black')
gloo.set_viewport(0, 0, *self.physical_size)
for visual in self.visuals:
visual.draw(visual.tr_sys)
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
<commit_before><commit_msg>Add an example how to draw Bezier curves.
The vispy.geometry.curves module provides several helper
functions to generate the right vertices for a nice curved
line.<commit_after>
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to draw curved lines (bezier).
"""
import sys
from vispy import app, gloo, visuals
from vispy.geometry import curves
from vispy.visuals.transforms import STTransform, NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Bezier lines example',
keys='interactive', size=(400, 750))
self.lines = [
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(50, -190),
(350, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(190, -190),
(210, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve3_bezier(
(10, 0),
(30, 200),
(390, 0)
), color='w', width=2, method='agg')
]
# Translate each line visual downwards
for i, line in enumerate(self.lines):
x = 0
y = 200 * (i + 1)
line.transform = STTransform(translate=[x, y])
self.texts = [
visuals.TextVisual('4 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 75)),
visuals.TextVisual('3 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 525)),
]
for text in self.texts:
text.transform = NullTransform()
# Initialize transform systems for each visual
self.visuals = self.lines + self.texts
for visual in self.visuals:
visual.tr_sys = visuals.transforms.TransformSystem(self)
visual.tr_sys.visual_to_document = visual.transform
self.show()
def on_draw(self, event):
gloo.clear('black')
gloo.set_viewport(0, 0, *self.physical_size)
for visual in self.visuals:
visual.draw(visual.tr_sys)
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
Add an example how to draw Bezier curves.
The vispy.geometry.curves module provides several helper
functions to generate the right vertices for a nice curved
line.# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to draw curved lines (bezier).
"""
import sys
from vispy import app, gloo, visuals
from vispy.geometry import curves
from vispy.visuals.transforms import STTransform, NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Bezier lines example',
keys='interactive', size=(400, 750))
self.lines = [
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(50, -190),
(350, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(190, -190),
(210, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve3_bezier(
(10, 0),
(30, 200),
(390, 0)
), color='w', width=2, method='agg')
]
# Translate each line visual downwards
for i, line in enumerate(self.lines):
x = 0
y = 200 * (i + 1)
line.transform = STTransform(translate=[x, y])
self.texts = [
visuals.TextVisual('4 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 75)),
visuals.TextVisual('3 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 525)),
]
for text in self.texts:
text.transform = NullTransform()
# Initialize transform systems for each visual
self.visuals = self.lines + self.texts
for visual in self.visuals:
visual.tr_sys = visuals.transforms.TransformSystem(self)
visual.tr_sys.visual_to_document = visual.transform
self.show()
def on_draw(self, event):
gloo.clear('black')
gloo.set_viewport(0, 0, *self.physical_size)
for visual in self.visuals:
visual.draw(visual.tr_sys)
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
<commit_before><commit_msg>Add an example how to draw Bezier curves.
The vispy.geometry.curves module provides several helper
functions to generate the right vertices for a nice curved
line.<commit_after># -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to draw curved lines (bezier).
"""
import sys
from vispy import app, gloo, visuals
from vispy.geometry import curves
from vispy.visuals.transforms import STTransform, NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title='Bezier lines example',
keys='interactive', size=(400, 750))
self.lines = [
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(50, -190),
(350, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve4_bezier(
(10, 0),
(190, -190),
(210, 190),
(390, 0)
), color='w', width=2, method='agg'),
visuals.LineVisual(curves.curve3_bezier(
(10, 0),
(30, 200),
(390, 0)
), color='w', width=2, method='agg')
]
# Translate each line visual downwards
for i, line in enumerate(self.lines):
x = 0
y = 200 * (i + 1)
line.transform = STTransform(translate=[x, y])
self.texts = [
visuals.TextVisual('4 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 75)),
visuals.TextVisual('3 point Bezier curve', bold=True, color='w',
font_size=24, pos=(200, 525)),
]
for text in self.texts:
text.transform = NullTransform()
# Initialize transform systems for each visual
self.visuals = self.lines + self.texts
for visual in self.visuals:
visual.tr_sys = visuals.transforms.TransformSystem(self)
visual.tr_sys.visual_to_document = visual.transform
self.show()
def on_draw(self, event):
gloo.clear('black')
gloo.set_viewport(0, 0, *self.physical_size)
for visual in self.visuals:
visual.draw(visual.tr_sys)
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
|
880c69f0de40c4ad4ec0eddf65e10b3bbd955c6f
|
indico/util/caching_test.py
|
indico/util/caching_test.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.caching import memoize_request
@pytest.yield_fixture
def not_testing(app_context):
app_context.config['TESTING'] = False
try:
yield
finally:
app_context.config['TESTING'] = True
@pytest.mark.usefixtures('request_context', 'not_testing')
def test_memoize_request_args():
calls = [0]
@memoize_request
def fn(a, b, c='default', **kw):
calls[0] += 1
assert calls[0] == 0
fn(1, 2)
assert calls[0] == 1
fn(1, 2) # normal memoized call
assert calls[0] == 1
# default value explicitly provided (both as arg and kwarg)
fn(1, 2, 'default')
fn(1, 2, c='default')
fn(1, b=2)
fn(a=1, b=2, c='default')
assert calls[0] == 1
fn(2, 2, c='default')
assert calls[0] == 2
fn(2, 2)
assert calls[0] == 2
fn(2, 2, foo='bar')
assert calls[0] == 3
fn(a=2, b=2, foo='bar')
assert calls[0] == 3
|
Add unit test for memoize_request
|
Add unit test for memoize_request
|
Python
|
mit
|
ThiefMaster/indico,indico/indico,ThiefMaster/indico,OmeGak/indico,pferreir/indico,mvidalgarcia/indico,DirkHoffmann/indico,mvidalgarcia/indico,mic4ael/indico,DirkHoffmann/indico,indico/indico,OmeGak/indico,ThiefMaster/indico,pferreir/indico,mic4ael/indico,indico/indico,DirkHoffmann/indico,mvidalgarcia/indico,indico/indico,mvidalgarcia/indico,pferreir/indico,mic4ael/indico,pferreir/indico,DirkHoffmann/indico,OmeGak/indico,OmeGak/indico,mic4ael/indico,ThiefMaster/indico
|
Add unit test for memoize_request
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.caching import memoize_request
@pytest.yield_fixture
def not_testing(app_context):
app_context.config['TESTING'] = False
try:
yield
finally:
app_context.config['TESTING'] = True
@pytest.mark.usefixtures('request_context', 'not_testing')
def test_memoize_request_args():
calls = [0]
@memoize_request
def fn(a, b, c='default', **kw):
calls[0] += 1
assert calls[0] == 0
fn(1, 2)
assert calls[0] == 1
fn(1, 2) # normal memoized call
assert calls[0] == 1
# default value explicitly provided (both as arg and kwarg)
fn(1, 2, 'default')
fn(1, 2, c='default')
fn(1, b=2)
fn(a=1, b=2, c='default')
assert calls[0] == 1
fn(2, 2, c='default')
assert calls[0] == 2
fn(2, 2)
assert calls[0] == 2
fn(2, 2, foo='bar')
assert calls[0] == 3
fn(a=2, b=2, foo='bar')
assert calls[0] == 3
|
<commit_before><commit_msg>Add unit test for memoize_request<commit_after>
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.caching import memoize_request
@pytest.yield_fixture
def not_testing(app_context):
app_context.config['TESTING'] = False
try:
yield
finally:
app_context.config['TESTING'] = True
@pytest.mark.usefixtures('request_context', 'not_testing')
def test_memoize_request_args():
calls = [0]
@memoize_request
def fn(a, b, c='default', **kw):
calls[0] += 1
assert calls[0] == 0
fn(1, 2)
assert calls[0] == 1
fn(1, 2) # normal memoized call
assert calls[0] == 1
# default value explicitly provided (both as arg and kwarg)
fn(1, 2, 'default')
fn(1, 2, c='default')
fn(1, b=2)
fn(a=1, b=2, c='default')
assert calls[0] == 1
fn(2, 2, c='default')
assert calls[0] == 2
fn(2, 2)
assert calls[0] == 2
fn(2, 2, foo='bar')
assert calls[0] == 3
fn(a=2, b=2, foo='bar')
assert calls[0] == 3
|
Add unit test for memoize_request# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.caching import memoize_request
@pytest.yield_fixture
def not_testing(app_context):
app_context.config['TESTING'] = False
try:
yield
finally:
app_context.config['TESTING'] = True
@pytest.mark.usefixtures('request_context', 'not_testing')
def test_memoize_request_args():
calls = [0]
@memoize_request
def fn(a, b, c='default', **kw):
calls[0] += 1
assert calls[0] == 0
fn(1, 2)
assert calls[0] == 1
fn(1, 2) # normal memoized call
assert calls[0] == 1
# default value explicitly provided (both as arg and kwarg)
fn(1, 2, 'default')
fn(1, 2, c='default')
fn(1, b=2)
fn(a=1, b=2, c='default')
assert calls[0] == 1
fn(2, 2, c='default')
assert calls[0] == 2
fn(2, 2)
assert calls[0] == 2
fn(2, 2, foo='bar')
assert calls[0] == 3
fn(a=2, b=2, foo='bar')
assert calls[0] == 3
|
<commit_before><commit_msg>Add unit test for memoize_request<commit_after># This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.util.caching import memoize_request
@pytest.yield_fixture
def not_testing(app_context):
app_context.config['TESTING'] = False
try:
yield
finally:
app_context.config['TESTING'] = True
@pytest.mark.usefixtures('request_context', 'not_testing')
def test_memoize_request_args():
calls = [0]
@memoize_request
def fn(a, b, c='default', **kw):
calls[0] += 1
assert calls[0] == 0
fn(1, 2)
assert calls[0] == 1
fn(1, 2) # normal memoized call
assert calls[0] == 1
# default value explicitly provided (both as arg and kwarg)
fn(1, 2, 'default')
fn(1, 2, c='default')
fn(1, b=2)
fn(a=1, b=2, c='default')
assert calls[0] == 1
fn(2, 2, c='default')
assert calls[0] == 2
fn(2, 2)
assert calls[0] == 2
fn(2, 2, foo='bar')
assert calls[0] == 3
fn(a=2, b=2, foo='bar')
assert calls[0] == 3
|
|
68de70260e4ad9649ada6ef283e2ec93ac732762
|
website/jdpages/migrations/0002_auto_orderfield_verbose_name.py
|
website/jdpages/migrations/0002_auto_orderfield_verbose_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='columnelementwidget',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='document',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='socialmediabutton',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
]
|
Create migration after upgrading to mezzanine 4
|
Create migration after upgrading to mezzanine 4
|
Python
|
mit
|
jonge-democraten/website,jonge-democraten/website,jonge-democraten/website,jonge-democraten/website
|
Create migration after upgrading to mezzanine 4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='columnelementwidget',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='document',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='socialmediabutton',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
]
|
<commit_before><commit_msg>Create migration after upgrading to mezzanine 4<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='columnelementwidget',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='document',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='socialmediabutton',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
]
|
Create migration after upgrading to mezzanine 4# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='columnelementwidget',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='document',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='socialmediabutton',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
]
|
<commit_before><commit_msg>Create migration after upgrading to mezzanine 4<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='columnelementwidget',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='document',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
migrations.AlterField(
model_name='socialmediabutton',
name='_order',
field=mezzanine.core.fields.OrderField(null=True, verbose_name='Order'),
),
]
|
|
1a81ef87d7d763957533f0f9e62b12834bfb38bb
|
tests/test_tasks.py
|
tests/test_tasks.py
|
# Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test cases for Night Train task descriptions.'''
import nighttrain
def test_simple():
'''Basic test of task list parser.'''
TASKS = '''
- name: print-hello
commands: echo "hello"
'''
tasklist = nighttrain.tasks.TaskList(TASKS)
assert tasklist.names == ['print-hello']
|
Add initial test suite for nighttrain.tasks module
|
tests: Add initial test suite for nighttrain.tasks module
This overlaps with the main test suite so far, but will be useful once
the task list format becomes more complex.
|
Python
|
apache-2.0
|
ssssam/nightbus,ssssam/nightbus
|
tests: Add initial test suite for nighttrain.tasks module
This overlaps with the main test suite so far, but will be useful once
the task list format becomes more complex.
|
# Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test cases for Night Train task descriptions.'''
import nighttrain
def test_simple():
'''Basic test of task list parser.'''
TASKS = '''
- name: print-hello
commands: echo "hello"
'''
tasklist = nighttrain.tasks.TaskList(TASKS)
assert tasklist.names == ['print-hello']
|
<commit_before><commit_msg>tests: Add initial test suite for nighttrain.tasks module
This overlaps with the main test suite so far, but will be useful once
the task list format becomes more complex.<commit_after>
|
# Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test cases for Night Train task descriptions.'''
import nighttrain
def test_simple():
'''Basic test of task list parser.'''
TASKS = '''
- name: print-hello
commands: echo "hello"
'''
tasklist = nighttrain.tasks.TaskList(TASKS)
assert tasklist.names == ['print-hello']
|
tests: Add initial test suite for nighttrain.tasks module
This overlaps with the main test suite so far, but will be useful once
the task list format becomes more complex.# Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test cases for Night Train task descriptions.'''
import nighttrain
def test_simple():
'''Basic test of task list parser.'''
TASKS = '''
- name: print-hello
commands: echo "hello"
'''
tasklist = nighttrain.tasks.TaskList(TASKS)
assert tasklist.names == ['print-hello']
|
<commit_before><commit_msg>tests: Add initial test suite for nighttrain.tasks module
This overlaps with the main test suite so far, but will be useful once
the task list format becomes more complex.<commit_after># Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test cases for Night Train task descriptions.'''
import nighttrain
def test_simple():
'''Basic test of task list parser.'''
TASKS = '''
- name: print-hello
commands: echo "hello"
'''
tasklist = nighttrain.tasks.TaskList(TASKS)
assert tasklist.names == ['print-hello']
|
|
3095e39499df1db93c7fd8771f44504a38986fde
|
src/sentry/migrations/0063_remove_bad_groupedmessage_index.py
|
src/sentry/migrations/0063_remove_bad_groupedmessage_index.py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
def backwards(self, orm):
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
|
Clean up an old index in sentry_groupedmessage
|
Clean up an old index in sentry_groupedmessage
The original migration version 0015 was supposed to remove this, but the
ordering of the index fields in the migration script didn't match the
actual index.
0015 has:
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
but the order should be:
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
This old index prevents a message group from existing in multiple
projects. There is a new index already added by 0015 that includes the
project id and should replace this index.
|
Python
|
bsd-3-clause
|
looker/sentry,SilentCircle/sentry,vperron/sentry,jokey2k/sentry,rdio/sentry,gencer/sentry,daevaorn/sentry,pauloschilling/sentry,SilentCircle/sentry,alexm92/sentry,songyi199111/sentry,ifduyue/sentry,mvaled/sentry,ewdurbin/sentry,alexm92/sentry,kevinlondon/sentry,Kryz/sentry,looker/sentry,BayanGroup/sentry,nicholasserra/sentry,gencer/sentry,ifduyue/sentry,JTCunning/sentry,jean/sentry,argonemyth/sentry,llonchj/sentry,imankulov/sentry,JTCunning/sentry,nicholasserra/sentry,JamesMura/sentry,camilonova/sentry,mitsuhiko/sentry,gg7/sentry,ifduyue/sentry,pauloschilling/sentry,fotinakis/sentry,rdio/sentry,beeftornado/sentry,JamesMura/sentry,TedaLIEz/sentry,gencer/sentry,daevaorn/sentry,JackDanger/sentry,ewdurbin/sentry,felixbuenemann/sentry,looker/sentry,zenefits/sentry,jean/sentry,JamesMura/sentry,BuildingLink/sentry,vperron/sentry,hongliang5623/sentry,JackDanger/sentry,daevaorn/sentry,beni55/sentry,ngonzalvez/sentry,ewdurbin/sentry,daevaorn/sentry,songyi199111/sentry,korealerts1/sentry,drcapulet/sentry,vperron/sentry,fuziontech/sentry,drcapulet/sentry,zenefits/sentry,beni55/sentry,NickPresta/sentry,wong2/sentry,hongliang5623/sentry,songyi199111/sentry,BayanGroup/sentry,nicholasserra/sentry,Kryz/sentry,felixbuenemann/sentry,1tush/sentry,llonchj/sentry,gencer/sentry,korealerts1/sentry,fotinakis/sentry,fotinakis/sentry,looker/sentry,boneyao/sentry,beeftornado/sentry,rdio/sentry,jean/sentry,wujuguang/sentry,NickPresta/sentry,wujuguang/sentry,jokey2k/sentry,Natim/sentry,camilonova/sentry,BuildingLink/sentry,jean/sentry,mvaled/sentry,NickPresta/sentry,beni55/sentry,llonchj/sentry,gencer/sentry,TedaLIEz/sentry,fotinakis/sentry,imankulov/sentry,JamesMura/sentry,pauloschilling/sentry,camilonova/sentry,Natim/sentry,BuildingLink/sentry,wujuguang/sentry,JamesMura/sentry,wong2/sentry,argonemyth/sentry,drcapulet/sentry,boneyao/sentry,NickPresta/sentry,korealerts1/sentry,TedaLIEz/sentry,zenefits/sentry,1tush/sentry,SilentCircle/sentry,JTCunning/sentry,kevinlondon/sentry,ngonzalvez/sentry,kevinastone/sentry,fuziontech/sentry,wong2/sentry,Kryz/sentry,kevinastone/sentry,argonemyth/sentry,hongliang5623/sentry,zenefits/sentry,BayanGroup/sentry,looker/sentry,felixbuenemann/sentry,fuziontech/sentry,BuildingLink/sentry,kevinlondon/sentry,zenefits/sentry,Natim/sentry,mvaled/sentry,1tush/sentry,mvaled/sentry,SilentCircle/sentry,gg7/sentry,ngonzalvez/sentry,mvaled/sentry,rdio/sentry,ifduyue/sentry,jokey2k/sentry,mitsuhiko/sentry,beeftornado/sentry,mvaled/sentry,boneyao/sentry,imankulov/sentry,alexm92/sentry,BuildingLink/sentry,JackDanger/sentry,gg7/sentry,kevinastone/sentry,ifduyue/sentry,jean/sentry
|
Clean up an old index in sentry_groupedmessage
The original migration version 0015 was supposed to remove this, but the
ordering of the index fields in the migration script didn't match the
actual index.
0015 has:
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
but the order should be:
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
This old index prevents a message group from existing in multiple
projects. There is a new index already added by 0015 that includes the
project id and should replace this index.
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
def backwards(self, orm):
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
|
<commit_before><commit_msg>Clean up an old index in sentry_groupedmessage
The original migration version 0015 was supposed to remove this, but the
ordering of the index fields in the migration script didn't match the
actual index.
0015 has:
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
but the order should be:
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
This old index prevents a message group from existing in multiple
projects. There is a new index already added by 0015 that includes the
project id and should replace this index.<commit_after>
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
def backwards(self, orm):
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
|
Clean up an old index in sentry_groupedmessage
The original migration version 0015 was supposed to remove this, but the
ordering of the index fields in the migration script didn't match the
actual index.
0015 has:
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
but the order should be:
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
This old index prevents a message group from existing in multiple
projects. There is a new index already added by 0015 that includes the
project id and should replace this index.# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
def backwards(self, orm):
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
|
<commit_before><commit_msg>Clean up an old index in sentry_groupedmessage
The original migration version 0015 was supposed to remove this, but the
ordering of the index fields in the migration script didn't match the
actual index.
0015 has:
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
but the order should be:
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
This old index prevents a message group from existing in multiple
projects. There is a new index already added by 0015 that includes the
project id and should replace this index.<commit_after># encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
def backwards(self, orm):
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
|
|
f3f7db3064afea812c47c1edd3115ce5cec2b558
|
modules/module_wolfram_alpha.py
|
modules/module_wolfram_alpha.py
|
import requests
import urllib
import logging
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
print("module_wolfram_alpha requires lxml.etree for xpath support")
appid = None
query = "http://api.wolframalpha.com/v2/query?input=%s&appid=%s"
log = logging.getLogger('wolfram_alpha')
def init(bot):
global appid
config = bot.config.get("module_wolfram_alpha", {})
appid = config.get("appid", "")
if appid:
log.info("Using Wolfram Alpha appid %s" % appid)
else:
log.warning("Appid not found from config!")
def command_wa(bot, user, channel, args):
"""Query Wolfram Alpha"""
if not appid:
log.warn("Appid not specified in configuration!")
return
r = requests.get(query % (urllib.quote(args), appid))
if r.status_code != 200: return
root = etree.fromstring(r.content)
# find all pods
pods = root.findall("pod")
# no answer pods found, check if there are didyoumeans-elements
if not pods:
didyoumeans = root.find("didyoumeans")
# no support for future stuff yet, TODO?
if not didyoumeans: return
options = []
for didyoumean in didyoumeans:
options.append("'%s'" % didyoumean.text)
line = " or ".join(options)
line = "Did you mean %s?" % line
return bot.say(channel, line.encode("UTF-8"))
# first pod has the question as WA sees it
question = pods[0].xpath("subpod/plaintext")[0].text
# second one has the best answer
answer = pods[1].xpath("subpod/plaintext")[0].text
line = "%s: %s" % (question, answer)
return bot.say(channel, line.encode("UTF-8"))
|
Add module for wolfram alpha queries via their API
|
Add module for wolfram alpha queries via their API
git-svn-id: 056f9092885898c4775d98c479d2d33d00273e45@330 dda364a1-ef19-0410-af65-756c83048fb2
|
Python
|
bsd-3-clause
|
aapa/pyfibot,aapa/pyfibot,rnyberg/pyfibot,lepinkainen/pyfibot,rnyberg/pyfibot,EArmour/pyfibot,huqa/pyfibot,huqa/pyfibot,lepinkainen/pyfibot,EArmour/pyfibot
|
Add module for wolfram alpha queries via their API
git-svn-id: 056f9092885898c4775d98c479d2d33d00273e45@330 dda364a1-ef19-0410-af65-756c83048fb2
|
import requests
import urllib
import logging
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
print("module_wolfram_alpha requires lxml.etree for xpath support")
appid = None
query = "http://api.wolframalpha.com/v2/query?input=%s&appid=%s"
log = logging.getLogger('wolfram_alpha')
def init(bot):
global appid
config = bot.config.get("module_wolfram_alpha", {})
appid = config.get("appid", "")
if appid:
log.info("Using Wolfram Alpha appid %s" % appid)
else:
log.warning("Appid not found from config!")
def command_wa(bot, user, channel, args):
"""Query Wolfram Alpha"""
if not appid:
log.warn("Appid not specified in configuration!")
return
r = requests.get(query % (urllib.quote(args), appid))
if r.status_code != 200: return
root = etree.fromstring(r.content)
# find all pods
pods = root.findall("pod")
# no answer pods found, check if there are didyoumeans-elements
if not pods:
didyoumeans = root.find("didyoumeans")
# no support for future stuff yet, TODO?
if not didyoumeans: return
options = []
for didyoumean in didyoumeans:
options.append("'%s'" % didyoumean.text)
line = " or ".join(options)
line = "Did you mean %s?" % line
return bot.say(channel, line.encode("UTF-8"))
# first pod has the question as WA sees it
question = pods[0].xpath("subpod/plaintext")[0].text
# second one has the best answer
answer = pods[1].xpath("subpod/plaintext")[0].text
line = "%s: %s" % (question, answer)
return bot.say(channel, line.encode("UTF-8"))
|
<commit_before><commit_msg>Add module for wolfram alpha queries via their API
git-svn-id: 056f9092885898c4775d98c479d2d33d00273e45@330 dda364a1-ef19-0410-af65-756c83048fb2<commit_after>
|
import requests
import urllib
import logging
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
print("module_wolfram_alpha requires lxml.etree for xpath support")
appid = None
query = "http://api.wolframalpha.com/v2/query?input=%s&appid=%s"
log = logging.getLogger('wolfram_alpha')
def init(bot):
global appid
config = bot.config.get("module_wolfram_alpha", {})
appid = config.get("appid", "")
if appid:
log.info("Using Wolfram Alpha appid %s" % appid)
else:
log.warning("Appid not found from config!")
def command_wa(bot, user, channel, args):
"""Query Wolfram Alpha"""
if not appid:
log.warn("Appid not specified in configuration!")
return
r = requests.get(query % (urllib.quote(args), appid))
if r.status_code != 200: return
root = etree.fromstring(r.content)
# find all pods
pods = root.findall("pod")
# no answer pods found, check if there are didyoumeans-elements
if not pods:
didyoumeans = root.find("didyoumeans")
# no support for future stuff yet, TODO?
if not didyoumeans: return
options = []
for didyoumean in didyoumeans:
options.append("'%s'" % didyoumean.text)
line = " or ".join(options)
line = "Did you mean %s?" % line
return bot.say(channel, line.encode("UTF-8"))
# first pod has the question as WA sees it
question = pods[0].xpath("subpod/plaintext")[0].text
# second one has the best answer
answer = pods[1].xpath("subpod/plaintext")[0].text
line = "%s: %s" % (question, answer)
return bot.say(channel, line.encode("UTF-8"))
|
Add module for wolfram alpha queries via their API
git-svn-id: 056f9092885898c4775d98c479d2d33d00273e45@330 dda364a1-ef19-0410-af65-756c83048fb2import requests
import urllib
import logging
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
print("module_wolfram_alpha requires lxml.etree for xpath support")
appid = None
query = "http://api.wolframalpha.com/v2/query?input=%s&appid=%s"
log = logging.getLogger('wolfram_alpha')
def init(bot):
global appid
config = bot.config.get("module_wolfram_alpha", {})
appid = config.get("appid", "")
if appid:
log.info("Using Wolfram Alpha appid %s" % appid)
else:
log.warning("Appid not found from config!")
def command_wa(bot, user, channel, args):
"""Query Wolfram Alpha"""
if not appid:
log.warn("Appid not specified in configuration!")
return
r = requests.get(query % (urllib.quote(args), appid))
if r.status_code != 200: return
root = etree.fromstring(r.content)
# find all pods
pods = root.findall("pod")
# no answer pods found, check if there are didyoumeans-elements
if not pods:
didyoumeans = root.find("didyoumeans")
# no support for future stuff yet, TODO?
if not didyoumeans: return
options = []
for didyoumean in didyoumeans:
options.append("'%s'" % didyoumean.text)
line = " or ".join(options)
line = "Did you mean %s?" % line
return bot.say(channel, line.encode("UTF-8"))
# first pod has the question as WA sees it
question = pods[0].xpath("subpod/plaintext")[0].text
# second one has the best answer
answer = pods[1].xpath("subpod/plaintext")[0].text
line = "%s: %s" % (question, answer)
return bot.say(channel, line.encode("UTF-8"))
|
<commit_before><commit_msg>Add module for wolfram alpha queries via their API
git-svn-id: 056f9092885898c4775d98c479d2d33d00273e45@330 dda364a1-ef19-0410-af65-756c83048fb2<commit_after>import requests
import urllib
import logging
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
print("module_wolfram_alpha requires lxml.etree for xpath support")
appid = None
query = "http://api.wolframalpha.com/v2/query?input=%s&appid=%s"
log = logging.getLogger('wolfram_alpha')
def init(bot):
global appid
config = bot.config.get("module_wolfram_alpha", {})
appid = config.get("appid", "")
if appid:
log.info("Using Wolfram Alpha appid %s" % appid)
else:
log.warning("Appid not found from config!")
def command_wa(bot, user, channel, args):
"""Query Wolfram Alpha"""
if not appid:
log.warn("Appid not specified in configuration!")
return
r = requests.get(query % (urllib.quote(args), appid))
if r.status_code != 200: return
root = etree.fromstring(r.content)
# find all pods
pods = root.findall("pod")
# no answer pods found, check if there are didyoumeans-elements
if not pods:
didyoumeans = root.find("didyoumeans")
# no support for future stuff yet, TODO?
if not didyoumeans: return
options = []
for didyoumean in didyoumeans:
options.append("'%s'" % didyoumean.text)
line = " or ".join(options)
line = "Did you mean %s?" % line
return bot.say(channel, line.encode("UTF-8"))
# first pod has the question as WA sees it
question = pods[0].xpath("subpod/plaintext")[0].text
# second one has the best answer
answer = pods[1].xpath("subpod/plaintext")[0].text
line = "%s: %s" % (question, answer)
return bot.say(channel, line.encode("UTF-8"))
|
|
2de823ae11e1337f114457bf4e49275d8d2eda99
|
recursive_binary_search.py
|
recursive_binary_search.py
|
def binary_search(array, low, high, item):
if(low>high) :
return -1
mid = (low + high)//2
if(item == array[mid]):
return mid
elif item < array[mid]:
return binary_search(array, low, mid-1, item)
elif item > array[mid]:
return binary_search(array, mid+1, high, item)
print ( binary_search([1,4,5,7], 0, 3, 5)) #2
print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
|
Add recursive binary search implementation
|
Add recursive binary search implementation
|
Python
|
mit
|
arafat-al-mahmud/algorithms-python
|
Add recursive binary search implementation
|
def binary_search(array, low, high, item):
if(low>high) :
return -1
mid = (low + high)//2
if(item == array[mid]):
return mid
elif item < array[mid]:
return binary_search(array, low, mid-1, item)
elif item > array[mid]:
return binary_search(array, mid+1, high, item)
print ( binary_search([1,4,5,7], 0, 3, 5)) #2
print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
|
<commit_before><commit_msg>Add recursive binary search implementation<commit_after>
|
def binary_search(array, low, high, item):
if(low>high) :
return -1
mid = (low + high)//2
if(item == array[mid]):
return mid
elif item < array[mid]:
return binary_search(array, low, mid-1, item)
elif item > array[mid]:
return binary_search(array, mid+1, high, item)
print ( binary_search([1,4,5,7], 0, 3, 5)) #2
print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
|
Add recursive binary search implementation
def binary_search(array, low, high, item):
if(low>high) :
return -1
mid = (low + high)//2
if(item == array[mid]):
return mid
elif item < array[mid]:
return binary_search(array, low, mid-1, item)
elif item > array[mid]:
return binary_search(array, mid+1, high, item)
print ( binary_search([1,4,5,7], 0, 3, 5)) #2
print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
|
<commit_before><commit_msg>Add recursive binary search implementation<commit_after>
def binary_search(array, low, high, item):
if(low>high) :
return -1
mid = (low + high)//2
if(item == array[mid]):
return mid
elif item < array[mid]:
return binary_search(array, low, mid-1, item)
elif item > array[mid]:
return binary_search(array, mid+1, high, item)
print ( binary_search([1,4,5,7], 0, 3, 5)) #2
print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
|
|
befadd8fc0482adb55f63ac51166f2330c897d7a
|
src/diamond/handler/httpHandler.py
|
src/diamond/handler/httpHandler.py
|
#!/usr/bin/env python
# coding=utf-8
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
#!/usr/bin/env python
# coding=utf-8
"""
Send metrics to a http endpoint via POST
#### Dependencies
* urllib2
#### Configuration
Enable this handler
* handers = diamond.handler.httpHandler.HttpPostHandler
* url = http://www.example.com/endpoint
"""
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
Add in basic HttpPostHandler docs
|
Add in basic HttpPostHandler docs
|
Python
|
mit
|
szibis/Diamond,sebbrandt87/Diamond,TinLe/Diamond,datafiniti/Diamond,Ssawa/Diamond,ramjothikumar/Diamond,signalfx/Diamond,codepython/Diamond,mfriedenhagen/Diamond,thardie/Diamond,works-mobile/Diamond,cannium/Diamond,eMerzh/Diamond-1,actmd/Diamond,Netuitive/Diamond,jriguera/Diamond,TinLe/Diamond,bmhatfield/Diamond,hvnsweeting/Diamond,rtoma/Diamond,dcsquared13/Diamond,jaingaurav/Diamond,python-diamond/Diamond,socialwareinc/Diamond,mzupan/Diamond,gg7/diamond,skbkontur/Diamond,saucelabs/Diamond,dcsquared13/Diamond,tusharmakkar08/Diamond,Ensighten/Diamond,sebbrandt87/Diamond,MediaMath/Diamond,russss/Diamond,krbaker/Diamond,anandbhoraskar/Diamond,szibis/Diamond,jumping/Diamond,python-diamond/Diamond,datafiniti/Diamond,Precis/Diamond,anandbhoraskar/Diamond,disqus/Diamond,Ssawa/Diamond,anandbhoraskar/Diamond,EzyInsights/Diamond,TinLe/Diamond,saucelabs/Diamond,bmhatfield/Diamond,acquia/Diamond,saucelabs/Diamond,datafiniti/Diamond,hamelg/Diamond,tuenti/Diamond,skbkontur/Diamond,python-diamond/Diamond,works-mobile/Diamond,thardie/Diamond,mfriedenhagen/Diamond,eMerzh/Diamond-1,h00dy/Diamond,actmd/Diamond,Netuitive/netuitive-diamond,stuartbfox/Diamond,ramjothikumar/Diamond,MediaMath/Diamond,TinLe/Diamond,tusharmakkar08/Diamond,Netuitive/netuitive-diamond,Precis/Diamond,Basis/Diamond,eMerzh/Diamond-1,ceph/Diamond,bmhatfield/Diamond,TAKEALOT/Diamond,signalfx/Diamond,socialwareinc/Diamond,metamx/Diamond,jaingaurav/Diamond,h00dy/Diamond,jaingaurav/Diamond,tellapart/Diamond,russss/Diamond,Nihn/Diamond-1,mzupan/Diamond,Netuitive/Diamond,EzyInsights/Diamond,ramjothikumar/Diamond,MichaelDoyle/Diamond,socialwareinc/Diamond,tellapart/Diamond,tuenti/Diamond,mfriedenhagen/Diamond,tusharmakkar08/Diamond,skbkontur/Diamond,Netuitive/netuitive-diamond,zoidbergwill/Diamond,hamelg/Diamond,jumping/Diamond,jriguera/Diamond,Precis/Diamond,hvnsweeting/Diamond,rtoma/Diamond,jaingaurav/Diamond,ceph/Diamond,jumping/Diamond,MichaelDoyle/Diamond,sebbrandt87/Diamond,TAKEALOT/Diamond,Ormod/Diamond,signalfx/Diamond,Slach/Diamond,tuenti/Diamond,zoidbergwill/Diamond,jriguera/Diamond,zoidbergwill/Diamond,hvnsweeting/Diamond,tellapart/Diamond,Ssawa/Diamond,timchenxiaoyu/Diamond,tuenti/Diamond,Precis/Diamond,Clever/Diamond,Clever/Diamond,Nihn/Diamond-1,timchenxiaoyu/Diamond,gg7/diamond,disqus/Diamond,CYBERBUGJR/Diamond,skbkontur/Diamond,Ormod/Diamond,Ensighten/Diamond,Ormod/Diamond,jriguera/Diamond,Slach/Diamond,Basis/Diamond,mzupan/Diamond,stuartbfox/Diamond,eMerzh/Diamond-1,CYBERBUGJR/Diamond,zoidbergwill/Diamond,krbaker/Diamond,h00dy/Diamond,thardie/Diamond,Clever/Diamond,szibis/Diamond,janisz/Diamond-1,anandbhoraskar/Diamond,mfriedenhagen/Diamond,Netuitive/netuitive-diamond,bmhatfield/Diamond,MichaelDoyle/Diamond,TAKEALOT/Diamond,MediaMath/Diamond,cannium/Diamond,Basis/Diamond,cannium/Diamond,Ensighten/Diamond,codepython/Diamond,metamx/Diamond,h00dy/Diamond,krbaker/Diamond,timchenxiaoyu/Diamond,Ormod/Diamond,gg7/diamond,TAKEALOT/Diamond,EzyInsights/Diamond,saucelabs/Diamond,cannium/Diamond,acquia/Diamond,actmd/Diamond,Ssawa/Diamond,timchenxiaoyu/Diamond,szibis/Diamond,actmd/Diamond,sebbrandt87/Diamond,metamx/Diamond,Basis/Diamond,gg7/diamond,joel-airspring/Diamond,Netuitive/Diamond,signalfx/Diamond,janisz/Diamond-1,MediaMath/Diamond,russss/Diamond,socialwareinc/Diamond,disqus/Diamond,jumping/Diamond,Nihn/Diamond-1,hamelg/Diamond,codepython/Diamond,acquia/Diamond,works-mobile/Diamond,Ensighten/Diamond,joel-airspring/Diamond,hamelg/Diamond,CYBERBUGJR/Diamond,works-mobile/Diamond,Nihn/Diamond-1,stuartbfox/Diamond,joel-airspring/Diamond,datafiniti/Diamond,dcsquared13/Diamond,EzyInsights/Diamond,hvnsweeting/Diamond,tellapart/Diamond,janisz/Diamond-1,CYBERBUGJR/Diamond,joel-airspring/Diamond,rtoma/Diamond,krbaker/Diamond,acquia/Diamond,Netuitive/Diamond,codepython/Diamond,russss/Diamond,tusharmakkar08/Diamond,Slach/Diamond,stuartbfox/Diamond,thardie/Diamond,ceph/Diamond,janisz/Diamond-1,ramjothikumar/Diamond,Slach/Diamond,ceph/Diamond,mzupan/Diamond,dcsquared13/Diamond,Clever/Diamond,MichaelDoyle/Diamond,rtoma/Diamond
|
#!/usr/bin/env python
# coding=utf-8
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
Add in basic HttpPostHandler docs
|
#!/usr/bin/env python
# coding=utf-8
"""
Send metrics to a http endpoint via POST
#### Dependencies
* urllib2
#### Configuration
Enable this handler
* handers = diamond.handler.httpHandler.HttpPostHandler
* url = http://www.example.com/endpoint
"""
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
<commit_before>#!/usr/bin/env python
# coding=utf-8
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
<commit_msg>Add in basic HttpPostHandler docs<commit_after>
|
#!/usr/bin/env python
# coding=utf-8
"""
Send metrics to a http endpoint via POST
#### Dependencies
* urllib2
#### Configuration
Enable this handler
* handers = diamond.handler.httpHandler.HttpPostHandler
* url = http://www.example.com/endpoint
"""
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
#!/usr/bin/env python
# coding=utf-8
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
Add in basic HttpPostHandler docs#!/usr/bin/env python
# coding=utf-8
"""
Send metrics to a http endpoint via POST
#### Dependencies
* urllib2
#### Configuration
Enable this handler
* handers = diamond.handler.httpHandler.HttpPostHandler
* url = http://www.example.com/endpoint
"""
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
<commit_before>#!/usr/bin/env python
# coding=utf-8
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
<commit_msg>Add in basic HttpPostHandler docs<commit_after>#!/usr/bin/env python
# coding=utf-8
"""
Send metrics to a http endpoint via POST
#### Dependencies
* urllib2
#### Configuration
Enable this handler
* handers = diamond.handler.httpHandler.HttpPostHandler
* url = http://www.example.com/endpoint
"""
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config.get('batch', 100))
self.url = self.config.get('url')
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
be0d4b9e2e62490cab62a39499e570bdab1ac2f5
|
cmp_imgs.py
|
cmp_imgs.py
|
#!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show()
|
Convert an image to grayscale and resize it.
|
Convert an image to grayscale and resize it.
|
Python
|
mit
|
HKervadec/cmp_imgs
|
Convert an image to grayscale and resize it.
|
#!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show()
|
<commit_before><commit_msg>Convert an image to grayscale and resize it.<commit_after>
|
#!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show()
|
Convert an image to grayscale and resize it.#!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show()
|
<commit_before><commit_msg>Convert an image to grayscale and resize it.<commit_after>#!/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import imread
def rgb2gray(img):
return np.dot(img, [0.299, 0.587, 0.114])
if __name__ == "__main__":
img_name = "1920x1080.jpg"
img = imread(img_name)
gray_img = rgb2gray(img)
plt.imshow(gray_img, cmap=plt.cm.gray)
plt.show()
resized = np.ndarray((64, 64))
for (i,j),_ in np.ndenumerate(resized):
f1, f2 = gray_img.shape[0] // 64, gray_img.shape[1] // 64
slisse = gray_img[i*f1:(i+1)*f1,j*f2:(j+1)*f2]
resized[i,j] = np.max(slisse)
plt.imshow(resized, cmap=plt.cm.gray)
plt.show()
|
|
20d7a5cff131448f1960ac8cd03550739e19d698
|
utest/namespace/test_retrievercontextfactory.py
|
utest/namespace/test_retrievercontextfactory.py
|
import unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
Add test for retriever context factory
|
Add test for retriever context factory
|
Python
|
apache-2.0
|
robotframework/RIDE,caio2k/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,caio2k/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE
|
Add test for retriever context factory
|
import unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for retriever context factory<commit_after>
|
import unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
Add test for retriever context factoryimport unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for retriever context factory<commit_after>import unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
|
|
a809ba1af45726f8aed7ba4b079063629406c52b
|
st2common/tests/unit/test_service_setup.py
|
st2common/tests/unit/test_service_setup.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from st2common import service_setup
from st2tests.base import CleanFilesTestCase
from st2tests import config
__all__ = [
'ServiceSetupTestCase'
]
MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleConsoleFormatter
[logger_root]
level=invalid_log_level
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleConsoleFormatter
args=(sys.stdout,)
[formatter_simpleConsoleFormatter]
class=st2common.logging.formatters.ConsoleLogFormatter
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=
""".strip()
class ServiceSetupTestCase(CleanFilesTestCase):
def test_invalid_log_level_friendly_error_message(self):
_, mock_logging_config_path = tempfile.mkstemp()
self.to_delete_files.append(mock_logging_config_path)
with open(mock_logging_config_path, 'w') as fp:
fp.write(MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL)
def mock_get_logging_config_path():
return mock_logging_config_path
config.get_logging_config_path = mock_get_logging_config_path
expected_msg = 'Invalid log level selected. Log level names need to be all uppercase'
self.assertRaisesRegexp(KeyError, expected_msg, service_setup.setup, service='api',
config=config,
setup_db=False, register_mq_exchanges=False,
register_signal_handlers=False,
register_internal_trigger_types=False,
run_migrations=False)
|
Add a test case for invalid log level friendly error message during service setup.
|
Add a test case for invalid log level friendly error message during
service setup.
|
Python
|
apache-2.0
|
StackStorm/st2,nzlosh/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,Plexxi/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,nzlosh/st2,Plexxi/st2
|
Add a test case for invalid log level friendly error message during
service setup.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from st2common import service_setup
from st2tests.base import CleanFilesTestCase
from st2tests import config
__all__ = [
'ServiceSetupTestCase'
]
MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleConsoleFormatter
[logger_root]
level=invalid_log_level
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleConsoleFormatter
args=(sys.stdout,)
[formatter_simpleConsoleFormatter]
class=st2common.logging.formatters.ConsoleLogFormatter
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=
""".strip()
class ServiceSetupTestCase(CleanFilesTestCase):
def test_invalid_log_level_friendly_error_message(self):
_, mock_logging_config_path = tempfile.mkstemp()
self.to_delete_files.append(mock_logging_config_path)
with open(mock_logging_config_path, 'w') as fp:
fp.write(MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL)
def mock_get_logging_config_path():
return mock_logging_config_path
config.get_logging_config_path = mock_get_logging_config_path
expected_msg = 'Invalid log level selected. Log level names need to be all uppercase'
self.assertRaisesRegexp(KeyError, expected_msg, service_setup.setup, service='api',
config=config,
setup_db=False, register_mq_exchanges=False,
register_signal_handlers=False,
register_internal_trigger_types=False,
run_migrations=False)
|
<commit_before><commit_msg>Add a test case for invalid log level friendly error message during
service setup.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from st2common import service_setup
from st2tests.base import CleanFilesTestCase
from st2tests import config
__all__ = [
'ServiceSetupTestCase'
]
MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleConsoleFormatter
[logger_root]
level=invalid_log_level
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleConsoleFormatter
args=(sys.stdout,)
[formatter_simpleConsoleFormatter]
class=st2common.logging.formatters.ConsoleLogFormatter
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=
""".strip()
class ServiceSetupTestCase(CleanFilesTestCase):
def test_invalid_log_level_friendly_error_message(self):
_, mock_logging_config_path = tempfile.mkstemp()
self.to_delete_files.append(mock_logging_config_path)
with open(mock_logging_config_path, 'w') as fp:
fp.write(MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL)
def mock_get_logging_config_path():
return mock_logging_config_path
config.get_logging_config_path = mock_get_logging_config_path
expected_msg = 'Invalid log level selected. Log level names need to be all uppercase'
self.assertRaisesRegexp(KeyError, expected_msg, service_setup.setup, service='api',
config=config,
setup_db=False, register_mq_exchanges=False,
register_signal_handlers=False,
register_internal_trigger_types=False,
run_migrations=False)
|
Add a test case for invalid log level friendly error message during
service setup.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from st2common import service_setup
from st2tests.base import CleanFilesTestCase
from st2tests import config
__all__ = [
'ServiceSetupTestCase'
]
MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleConsoleFormatter
[logger_root]
level=invalid_log_level
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleConsoleFormatter
args=(sys.stdout,)
[formatter_simpleConsoleFormatter]
class=st2common.logging.formatters.ConsoleLogFormatter
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=
""".strip()
class ServiceSetupTestCase(CleanFilesTestCase):
def test_invalid_log_level_friendly_error_message(self):
_, mock_logging_config_path = tempfile.mkstemp()
self.to_delete_files.append(mock_logging_config_path)
with open(mock_logging_config_path, 'w') as fp:
fp.write(MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL)
def mock_get_logging_config_path():
return mock_logging_config_path
config.get_logging_config_path = mock_get_logging_config_path
expected_msg = 'Invalid log level selected. Log level names need to be all uppercase'
self.assertRaisesRegexp(KeyError, expected_msg, service_setup.setup, service='api',
config=config,
setup_db=False, register_mq_exchanges=False,
register_signal_handlers=False,
register_internal_trigger_types=False,
run_migrations=False)
|
<commit_before><commit_msg>Add a test case for invalid log level friendly error message during
service setup.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from st2common import service_setup
from st2tests.base import CleanFilesTestCase
from st2tests import config
__all__ = [
'ServiceSetupTestCase'
]
MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleConsoleFormatter
[logger_root]
level=invalid_log_level
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleConsoleFormatter
args=(sys.stdout,)
[formatter_simpleConsoleFormatter]
class=st2common.logging.formatters.ConsoleLogFormatter
format=%(asctime)s %(levelname)s [-] %(message)s
datefmt=
""".strip()
class ServiceSetupTestCase(CleanFilesTestCase):
def test_invalid_log_level_friendly_error_message(self):
_, mock_logging_config_path = tempfile.mkstemp()
self.to_delete_files.append(mock_logging_config_path)
with open(mock_logging_config_path, 'w') as fp:
fp.write(MOCK_LOGGING_CONFIG_INVALID_LOG_LEVEL)
def mock_get_logging_config_path():
return mock_logging_config_path
config.get_logging_config_path = mock_get_logging_config_path
expected_msg = 'Invalid log level selected. Log level names need to be all uppercase'
self.assertRaisesRegexp(KeyError, expected_msg, service_setup.setup, service='api',
config=config,
setup_db=False, register_mq_exchanges=False,
register_signal_handlers=False,
register_internal_trigger_types=False,
run_migrations=False)
|
|
447206a785b9563e82dfbd28e1cd2c5ef10a57f2
|
src/ggrc_risks/migrations/versions/20151029154646_2837682ad516_rename_threat_actors_to_threat.py
|
src/ggrc_risks/migrations/versions/20151029154646_2837682ad516_rename_threat_actors_to_threat.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Rename threat actors to threat
Revision ID: 2837682ad516
Revises: 39518b8ea21d
Create Date: 2015-10-29 15:46:46.294919
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '2837682ad516'
down_revision = '39518b8ea21d'
def upgrade():
op.execute("RENAME TABLE threat_actors TO threats")
def downgrade():
op.execute("RENAME TABLE threats TO threat_actors")
|
Add a migration for threat actor -> threat
|
Add a migration for threat actor -> threat
|
Python
|
apache-2.0
|
plamut/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core
|
Add a migration for threat actor -> threat
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Rename threat actors to threat
Revision ID: 2837682ad516
Revises: 39518b8ea21d
Create Date: 2015-10-29 15:46:46.294919
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '2837682ad516'
down_revision = '39518b8ea21d'
def upgrade():
op.execute("RENAME TABLE threat_actors TO threats")
def downgrade():
op.execute("RENAME TABLE threats TO threat_actors")
|
<commit_before><commit_msg>Add a migration for threat actor -> threat<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Rename threat actors to threat
Revision ID: 2837682ad516
Revises: 39518b8ea21d
Create Date: 2015-10-29 15:46:46.294919
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '2837682ad516'
down_revision = '39518b8ea21d'
def upgrade():
op.execute("RENAME TABLE threat_actors TO threats")
def downgrade():
op.execute("RENAME TABLE threats TO threat_actors")
|
Add a migration for threat actor -> threat# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Rename threat actors to threat
Revision ID: 2837682ad516
Revises: 39518b8ea21d
Create Date: 2015-10-29 15:46:46.294919
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '2837682ad516'
down_revision = '39518b8ea21d'
def upgrade():
op.execute("RENAME TABLE threat_actors TO threats")
def downgrade():
op.execute("RENAME TABLE threats TO threat_actors")
|
<commit_before><commit_msg>Add a migration for threat actor -> threat<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Rename threat actors to threat
Revision ID: 2837682ad516
Revises: 39518b8ea21d
Create Date: 2015-10-29 15:46:46.294919
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '2837682ad516'
down_revision = '39518b8ea21d'
def upgrade():
op.execute("RENAME TABLE threat_actors TO threats")
def downgrade():
op.execute("RENAME TABLE threats TO threat_actors")
|
|
6be193e6287a1823d6216205e5dbdbcb46895612
|
Lib/test/test_timing.py
|
Lib/test/test_timing.py
|
from test_support import verbose
import timing
r = range(100000)
if verbose:
print 'starting...'
timing.start()
for i in r:
pass
timing.finish()
if verbose:
print 'finished'
secs = timing.seconds()
milli = timing.milli()
micro = timing.micro()
if verbose:
print 'seconds:', secs
print 'milli :', milli
print 'micro :', micro
|
Test of the timing module
|
Test of the timing module
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Test of the timing module
|
from test_support import verbose
import timing
r = range(100000)
if verbose:
print 'starting...'
timing.start()
for i in r:
pass
timing.finish()
if verbose:
print 'finished'
secs = timing.seconds()
milli = timing.milli()
micro = timing.micro()
if verbose:
print 'seconds:', secs
print 'milli :', milli
print 'micro :', micro
|
<commit_before><commit_msg>Test of the timing module<commit_after>
|
from test_support import verbose
import timing
r = range(100000)
if verbose:
print 'starting...'
timing.start()
for i in r:
pass
timing.finish()
if verbose:
print 'finished'
secs = timing.seconds()
milli = timing.milli()
micro = timing.micro()
if verbose:
print 'seconds:', secs
print 'milli :', milli
print 'micro :', micro
|
Test of the timing modulefrom test_support import verbose
import timing
r = range(100000)
if verbose:
print 'starting...'
timing.start()
for i in r:
pass
timing.finish()
if verbose:
print 'finished'
secs = timing.seconds()
milli = timing.milli()
micro = timing.micro()
if verbose:
print 'seconds:', secs
print 'milli :', milli
print 'micro :', micro
|
<commit_before><commit_msg>Test of the timing module<commit_after>from test_support import verbose
import timing
r = range(100000)
if verbose:
print 'starting...'
timing.start()
for i in r:
pass
timing.finish()
if verbose:
print 'finished'
secs = timing.seconds()
milli = timing.milli()
micro = timing.micro()
if verbose:
print 'seconds:', secs
print 'milli :', milli
print 'micro :', micro
|
|
e38211248504bb87b73775e0157a7e1d2dace6ed
|
lib/util/lamearecord.py
|
lib/util/lamearecord.py
|
# coding: utf-8
import os
import re
import shlex
from subprocess import Popen, PIPE
def available_devices():
devices =[]
os.environ['LANG'] = 'C'
command = 'arecord -l'
arecord_l = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
arecord_l.wait()
if arecord_l.returncode != 0:
print(arecord_l.stderr.read())
return devices
for line in arecord_l.stdout.readlines():
m = re.match('^card ([0-9]): .*, device ([0-9]): .*$', line.decode('utf-8'))
if not m:
continue
card = m.group(1)
device = m.group(2)
if card and device:
devices.append('hw:%s,%s' % (card, device))
return devices
def record_wav(hw_id, duration, filename):
command = 'arecord -D %s -f S16_LE -d %d %s' % (hw_id, duration, filename)
call(shlex.split(command))
|
Add sound recording tool with arecord and lame
|
Add sound recording tool with arecord and lame
|
Python
|
apache-2.0
|
nknytk/home-recorder,nknytk/home-recorder
|
Add sound recording tool with arecord and lame
|
# coding: utf-8
import os
import re
import shlex
from subprocess import Popen, PIPE
def available_devices():
devices =[]
os.environ['LANG'] = 'C'
command = 'arecord -l'
arecord_l = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
arecord_l.wait()
if arecord_l.returncode != 0:
print(arecord_l.stderr.read())
return devices
for line in arecord_l.stdout.readlines():
m = re.match('^card ([0-9]): .*, device ([0-9]): .*$', line.decode('utf-8'))
if not m:
continue
card = m.group(1)
device = m.group(2)
if card and device:
devices.append('hw:%s,%s' % (card, device))
return devices
def record_wav(hw_id, duration, filename):
command = 'arecord -D %s -f S16_LE -d %d %s' % (hw_id, duration, filename)
call(shlex.split(command))
|
<commit_before><commit_msg>Add sound recording tool with arecord and lame<commit_after>
|
# coding: utf-8
import os
import re
import shlex
from subprocess import Popen, PIPE
def available_devices():
devices =[]
os.environ['LANG'] = 'C'
command = 'arecord -l'
arecord_l = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
arecord_l.wait()
if arecord_l.returncode != 0:
print(arecord_l.stderr.read())
return devices
for line in arecord_l.stdout.readlines():
m = re.match('^card ([0-9]): .*, device ([0-9]): .*$', line.decode('utf-8'))
if not m:
continue
card = m.group(1)
device = m.group(2)
if card and device:
devices.append('hw:%s,%s' % (card, device))
return devices
def record_wav(hw_id, duration, filename):
command = 'arecord -D %s -f S16_LE -d %d %s' % (hw_id, duration, filename)
call(shlex.split(command))
|
Add sound recording tool with arecord and lame# coding: utf-8
import os
import re
import shlex
from subprocess import Popen, PIPE
def available_devices():
devices =[]
os.environ['LANG'] = 'C'
command = 'arecord -l'
arecord_l = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
arecord_l.wait()
if arecord_l.returncode != 0:
print(arecord_l.stderr.read())
return devices
for line in arecord_l.stdout.readlines():
m = re.match('^card ([0-9]): .*, device ([0-9]): .*$', line.decode('utf-8'))
if not m:
continue
card = m.group(1)
device = m.group(2)
if card and device:
devices.append('hw:%s,%s' % (card, device))
return devices
def record_wav(hw_id, duration, filename):
command = 'arecord -D %s -f S16_LE -d %d %s' % (hw_id, duration, filename)
call(shlex.split(command))
|
<commit_before><commit_msg>Add sound recording tool with arecord and lame<commit_after># coding: utf-8
import os
import re
import shlex
from subprocess import Popen, PIPE
def available_devices():
devices =[]
os.environ['LANG'] = 'C'
command = 'arecord -l'
arecord_l = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE)
arecord_l.wait()
if arecord_l.returncode != 0:
print(arecord_l.stderr.read())
return devices
for line in arecord_l.stdout.readlines():
m = re.match('^card ([0-9]): .*, device ([0-9]): .*$', line.decode('utf-8'))
if not m:
continue
card = m.group(1)
device = m.group(2)
if card and device:
devices.append('hw:%s,%s' % (card, device))
return devices
def record_wav(hw_id, duration, filename):
command = 'arecord -D %s -f S16_LE -d %d %s' % (hw_id, duration, filename)
call(shlex.split(command))
|
|
2adea388d387ff78778dc4e79045ff3d9a6780ae
|
tests/framework_tests/test_oauth_scopes.py
|
tests/framework_tests/test_oauth_scopes.py
|
# -*- coding: utf-8 -*-
from nose.tools import assert_in
from unittest import TestCase
from framework.auth import oauth_scopes
class TestOAuthScopes(TestCase):
def test_each_public_scope_includes_ALWAYS_PUBLIC(self):
for scope in oauth_scopes.public_scopes.itervalues():
assert_in(oauth_scopes.CoreScopes.ALWAYS_PUBLIC, scope.parts)
|
Add test for ALWAYS_PUBLIC injection behavior
|
Add test for ALWAYS_PUBLIC injection behavior
|
Python
|
apache-2.0
|
monikagrabowska/osf.io,HalcyonChimera/osf.io,adlius/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,DanielSBrown/osf.io,cwisecarver/osf.io,DanielSBrown/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,adlius/osf.io,cslzchen/osf.io,wearpants/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,mattclark/osf.io,alexschiller/osf.io,brianjgeiger/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,binoculars/osf.io,erinspace/osf.io,chrisseto/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,felliott/osf.io,mfraezz/osf.io,adlius/osf.io,Nesiehr/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,mluo613/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,icereval/osf.io,HalcyonChimera/osf.io,rdhyee/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,sloria/osf.io,mfraezz/osf.io,aaxelb/osf.io,SSJohns/osf.io,mluo613/osf.io,felliott/osf.io,leb2dg/osf.io,alexschiller/osf.io,mluke93/osf.io,aaxelb/osf.io,mluo613/osf.io,mluo613/osf.io,cwisecarver/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,binoculars/osf.io,emetsger/osf.io,acshi/osf.io,cwisecarver/osf.io,TomBaxter/osf.io,emetsger/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,erinspace/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,SSJohns/osf.io,mattclark/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,hmoco/osf.io,leb2dg/osf.io,wearpants/osf.io,sloria/osf.io,leb2dg/osf.io,pattisdr/osf.io,cwisecarver/osf.io,saradbowman/osf.io,cslzchen/osf.io,caneruguz/osf.io,chennan47/osf.io,chrisseto/osf.io,amyshi188/osf.io,icereval/osf.io,chennan47/osf.io,amyshi188/osf.io,pattisdr/osf.io,alexschiller/osf.io,caneruguz/osf.io,chrisseto/osf.io,aaxelb/osf.io,hmoco/osf.io,acshi/osf.io,wearpants/osf.io,mluke93/osf.io,Johnetordoff/osf.io,acshi/osf.io,TomBaxter/osf.io,samchrisinger/osf.io,Johnetordoff/osf.io,mluke93/osf.io,samchrisinger/osf.io,aaxelb/osf.io,SSJohns/osf.io,cslzchen/osf.io,chrisseto/osf.io,erinspace/osf.io,hmoco/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,mattclark/osf.io,SSJohns/osf.io,binoculars/osf.io,samchrisinger/osf.io,CenterForOpenScience/osf.io,emetsger/osf.io,caneruguz/osf.io,cslzchen/osf.io,acshi/osf.io,rdhyee/osf.io,sloria/osf.io,wearpants/osf.io,felliott/osf.io,caseyrollins/osf.io,acshi/osf.io,alexschiller/osf.io,crcresearch/osf.io,adlius/osf.io,emetsger/osf.io,saradbowman/osf.io,mluke93/osf.io,crcresearch/osf.io,DanielSBrown/osf.io,HalcyonChimera/osf.io,mluo613/osf.io,amyshi188/osf.io,icereval/osf.io,caneruguz/osf.io,rdhyee/osf.io,laurenrevere/osf.io,felliott/osf.io
|
Add test for ALWAYS_PUBLIC injection behavior
|
# -*- coding: utf-8 -*-
from nose.tools import assert_in
from unittest import TestCase
from framework.auth import oauth_scopes
class TestOAuthScopes(TestCase):
def test_each_public_scope_includes_ALWAYS_PUBLIC(self):
for scope in oauth_scopes.public_scopes.itervalues():
assert_in(oauth_scopes.CoreScopes.ALWAYS_PUBLIC, scope.parts)
|
<commit_before><commit_msg>Add test for ALWAYS_PUBLIC injection behavior<commit_after>
|
# -*- coding: utf-8 -*-
from nose.tools import assert_in
from unittest import TestCase
from framework.auth import oauth_scopes
class TestOAuthScopes(TestCase):
def test_each_public_scope_includes_ALWAYS_PUBLIC(self):
for scope in oauth_scopes.public_scopes.itervalues():
assert_in(oauth_scopes.CoreScopes.ALWAYS_PUBLIC, scope.parts)
|
Add test for ALWAYS_PUBLIC injection behavior# -*- coding: utf-8 -*-
from nose.tools import assert_in
from unittest import TestCase
from framework.auth import oauth_scopes
class TestOAuthScopes(TestCase):
def test_each_public_scope_includes_ALWAYS_PUBLIC(self):
for scope in oauth_scopes.public_scopes.itervalues():
assert_in(oauth_scopes.CoreScopes.ALWAYS_PUBLIC, scope.parts)
|
<commit_before><commit_msg>Add test for ALWAYS_PUBLIC injection behavior<commit_after># -*- coding: utf-8 -*-
from nose.tools import assert_in
from unittest import TestCase
from framework.auth import oauth_scopes
class TestOAuthScopes(TestCase):
def test_each_public_scope_includes_ALWAYS_PUBLIC(self):
for scope in oauth_scopes.public_scopes.itervalues():
assert_in(oauth_scopes.CoreScopes.ALWAYS_PUBLIC, scope.parts)
|
|
7d7699bd40b84aee3d210899999c666044943814
|
show_samples_lfw_conditional.py
|
show_samples_lfw_conditional.py
|
from pylearn2.utils import serial
import sys
_, model_path = sys.argv
model = serial.load(model_path)
space = model.generator.get_output_space()
from pylearn2.config import yaml_parse
from pylearn2.datasets import dense_design_matrix
from pylearn2.gui.patch_viewer import PatchViewer
import numpy as np
dataset = yaml_parse.load(model.dataset_yaml_src)
if dataset.view_converter is None:
dataset.view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), dataset.axes)
rows = 4
sample_cols = 5
# First sample conditional data
# TODO: Also try retrieving real conditional data
conditional_data = model.generator.condition_distribution.sample(rows * sample_cols).eval()
# For some reason format_as from VectorSpace is not working right
topo_samples = model.generator.sample(conditional_data).eval()
samples = dataset.get_design_matrix(topo_samples)
print 'Original shape:', samples.shape
dataset.view_converter.axes = ['b', 0, 1, 'c']
topo_samples = dataset.get_topological_view(samples)
pv = PatchViewer(grid_shape=(rows, sample_cols + 1), patch_shape=(32,32),
is_color=True)
scale = np.abs(samples).max()
X = dataset.X
topo = dataset.get_topological_view()
for i in xrange(samples.shape[0]):
topo_sample = topo_samples[i, :, :, :]
print topo_samples.shape, topo_sample.shape
print topo_sample.min(), topo_sample.max(), topo_sample.shape
pv.add_patch(topo_sample / scale, rescale=False)
if (i +1) % sample_cols == 0:
sample = samples[i, :]
dists = np.square(X - sample).sum(axis=1)
j = np.argmin(dists)
match = topo[j]
print 'Nearest data point:', match.min(), match.max(), match.shape
pv.add_patch(match, rescale=True, activation=1)
pv.show()
|
Add sampler for conditional LFW(crop)
|
Add sampler for conditional LFW(crop)
|
Python
|
bsd-3-clause
|
hans/adversarial
|
Add sampler for conditional LFW(crop)
|
from pylearn2.utils import serial
import sys
_, model_path = sys.argv
model = serial.load(model_path)
space = model.generator.get_output_space()
from pylearn2.config import yaml_parse
from pylearn2.datasets import dense_design_matrix
from pylearn2.gui.patch_viewer import PatchViewer
import numpy as np
dataset = yaml_parse.load(model.dataset_yaml_src)
if dataset.view_converter is None:
dataset.view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), dataset.axes)
rows = 4
sample_cols = 5
# First sample conditional data
# TODO: Also try retrieving real conditional data
conditional_data = model.generator.condition_distribution.sample(rows * sample_cols).eval()
# For some reason format_as from VectorSpace is not working right
topo_samples = model.generator.sample(conditional_data).eval()
samples = dataset.get_design_matrix(topo_samples)
print 'Original shape:', samples.shape
dataset.view_converter.axes = ['b', 0, 1, 'c']
topo_samples = dataset.get_topological_view(samples)
pv = PatchViewer(grid_shape=(rows, sample_cols + 1), patch_shape=(32,32),
is_color=True)
scale = np.abs(samples).max()
X = dataset.X
topo = dataset.get_topological_view()
for i in xrange(samples.shape[0]):
topo_sample = topo_samples[i, :, :, :]
print topo_samples.shape, topo_sample.shape
print topo_sample.min(), topo_sample.max(), topo_sample.shape
pv.add_patch(topo_sample / scale, rescale=False)
if (i +1) % sample_cols == 0:
sample = samples[i, :]
dists = np.square(X - sample).sum(axis=1)
j = np.argmin(dists)
match = topo[j]
print 'Nearest data point:', match.min(), match.max(), match.shape
pv.add_patch(match, rescale=True, activation=1)
pv.show()
|
<commit_before><commit_msg>Add sampler for conditional LFW(crop)<commit_after>
|
from pylearn2.utils import serial
import sys
_, model_path = sys.argv
model = serial.load(model_path)
space = model.generator.get_output_space()
from pylearn2.config import yaml_parse
from pylearn2.datasets import dense_design_matrix
from pylearn2.gui.patch_viewer import PatchViewer
import numpy as np
dataset = yaml_parse.load(model.dataset_yaml_src)
if dataset.view_converter is None:
dataset.view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), dataset.axes)
rows = 4
sample_cols = 5
# First sample conditional data
# TODO: Also try retrieving real conditional data
conditional_data = model.generator.condition_distribution.sample(rows * sample_cols).eval()
# For some reason format_as from VectorSpace is not working right
topo_samples = model.generator.sample(conditional_data).eval()
samples = dataset.get_design_matrix(topo_samples)
print 'Original shape:', samples.shape
dataset.view_converter.axes = ['b', 0, 1, 'c']
topo_samples = dataset.get_topological_view(samples)
pv = PatchViewer(grid_shape=(rows, sample_cols + 1), patch_shape=(32,32),
is_color=True)
scale = np.abs(samples).max()
X = dataset.X
topo = dataset.get_topological_view()
for i in xrange(samples.shape[0]):
topo_sample = topo_samples[i, :, :, :]
print topo_samples.shape, topo_sample.shape
print topo_sample.min(), topo_sample.max(), topo_sample.shape
pv.add_patch(topo_sample / scale, rescale=False)
if (i +1) % sample_cols == 0:
sample = samples[i, :]
dists = np.square(X - sample).sum(axis=1)
j = np.argmin(dists)
match = topo[j]
print 'Nearest data point:', match.min(), match.max(), match.shape
pv.add_patch(match, rescale=True, activation=1)
pv.show()
|
Add sampler for conditional LFW(crop)from pylearn2.utils import serial
import sys
_, model_path = sys.argv
model = serial.load(model_path)
space = model.generator.get_output_space()
from pylearn2.config import yaml_parse
from pylearn2.datasets import dense_design_matrix
from pylearn2.gui.patch_viewer import PatchViewer
import numpy as np
dataset = yaml_parse.load(model.dataset_yaml_src)
if dataset.view_converter is None:
dataset.view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), dataset.axes)
rows = 4
sample_cols = 5
# First sample conditional data
# TODO: Also try retrieving real conditional data
conditional_data = model.generator.condition_distribution.sample(rows * sample_cols).eval()
# For some reason format_as from VectorSpace is not working right
topo_samples = model.generator.sample(conditional_data).eval()
samples = dataset.get_design_matrix(topo_samples)
print 'Original shape:', samples.shape
dataset.view_converter.axes = ['b', 0, 1, 'c']
topo_samples = dataset.get_topological_view(samples)
pv = PatchViewer(grid_shape=(rows, sample_cols + 1), patch_shape=(32,32),
is_color=True)
scale = np.abs(samples).max()
X = dataset.X
topo = dataset.get_topological_view()
for i in xrange(samples.shape[0]):
topo_sample = topo_samples[i, :, :, :]
print topo_samples.shape, topo_sample.shape
print topo_sample.min(), topo_sample.max(), topo_sample.shape
pv.add_patch(topo_sample / scale, rescale=False)
if (i +1) % sample_cols == 0:
sample = samples[i, :]
dists = np.square(X - sample).sum(axis=1)
j = np.argmin(dists)
match = topo[j]
print 'Nearest data point:', match.min(), match.max(), match.shape
pv.add_patch(match, rescale=True, activation=1)
pv.show()
|
<commit_before><commit_msg>Add sampler for conditional LFW(crop)<commit_after>from pylearn2.utils import serial
import sys
_, model_path = sys.argv
model = serial.load(model_path)
space = model.generator.get_output_space()
from pylearn2.config import yaml_parse
from pylearn2.datasets import dense_design_matrix
from pylearn2.gui.patch_viewer import PatchViewer
import numpy as np
dataset = yaml_parse.load(model.dataset_yaml_src)
if dataset.view_converter is None:
dataset.view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3), dataset.axes)
rows = 4
sample_cols = 5
# First sample conditional data
# TODO: Also try retrieving real conditional data
conditional_data = model.generator.condition_distribution.sample(rows * sample_cols).eval()
# For some reason format_as from VectorSpace is not working right
topo_samples = model.generator.sample(conditional_data).eval()
samples = dataset.get_design_matrix(topo_samples)
print 'Original shape:', samples.shape
dataset.view_converter.axes = ['b', 0, 1, 'c']
topo_samples = dataset.get_topological_view(samples)
pv = PatchViewer(grid_shape=(rows, sample_cols + 1), patch_shape=(32,32),
is_color=True)
scale = np.abs(samples).max()
X = dataset.X
topo = dataset.get_topological_view()
for i in xrange(samples.shape[0]):
topo_sample = topo_samples[i, :, :, :]
print topo_samples.shape, topo_sample.shape
print topo_sample.min(), topo_sample.max(), topo_sample.shape
pv.add_patch(topo_sample / scale, rescale=False)
if (i +1) % sample_cols == 0:
sample = samples[i, :]
dists = np.square(X - sample).sum(axis=1)
j = np.argmin(dists)
match = topo[j]
print 'Nearest data point:', match.min(), match.max(), match.shape
pv.add_patch(match, rescale=True, activation=1)
pv.show()
|
|
e06590cf16cbf2e52c247c6e5a518103cf4278c2
|
migrations/versions/780_remove_unused_cols.py
|
migrations/versions/780_remove_unused_cols.py
|
"""Remove agreement_returned_at, countersigned_at and agreement_details
columns from supplier_framework table as they are no longer used
Revision ID: 780
Revises: 770
Create Date: 2016-11-07 10:14:00.000000
"""
# revision identifiers, used by Alembic.
revision = '780'
down_revision = '770'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('supplier_frameworks', 'agreement_returned_at')
op.drop_column('supplier_frameworks', 'countersigned_at')
op.drop_column('supplier_frameworks', 'agreement_details')
def downgrade():
# Downgrade reinstates the columns but does not populate them with data.
# These fields could be populated with data from the "current framework agreement" after being reinstated.
# That would be better (or at least more easily) done by a script than by this migration if necessary.
op.add_column('supplier_frameworks', sa.Column('agreement_returned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('countersigned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('agreement_details', sa.dialects.postgresql.JSON(), nullable=True))
|
Add migration to drop now-unused supplier_framework columns
|
Add migration to drop now-unused supplier_framework columns
The recent addition of the framework_agreement table means that these columns
are no longer needed. (They are no longer referred to anywhere in the API code.)
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add migration to drop now-unused supplier_framework columns
The recent addition of the framework_agreement table means that these columns
are no longer needed. (They are no longer referred to anywhere in the API code.)
|
"""Remove agreement_returned_at, countersigned_at and agreement_details
columns from supplier_framework table as they are no longer used
Revision ID: 780
Revises: 770
Create Date: 2016-11-07 10:14:00.000000
"""
# revision identifiers, used by Alembic.
revision = '780'
down_revision = '770'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('supplier_frameworks', 'agreement_returned_at')
op.drop_column('supplier_frameworks', 'countersigned_at')
op.drop_column('supplier_frameworks', 'agreement_details')
def downgrade():
# Downgrade reinstates the columns but does not populate them with data.
# These fields could be populated with data from the "current framework agreement" after being reinstated.
# That would be better (or at least more easily) done by a script than by this migration if necessary.
op.add_column('supplier_frameworks', sa.Column('agreement_returned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('countersigned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('agreement_details', sa.dialects.postgresql.JSON(), nullable=True))
|
<commit_before><commit_msg>Add migration to drop now-unused supplier_framework columns
The recent addition of the framework_agreement table means that these columns
are no longer needed. (They are no longer referred to anywhere in the API code.)<commit_after>
|
"""Remove agreement_returned_at, countersigned_at and agreement_details
columns from supplier_framework table as they are no longer used
Revision ID: 780
Revises: 770
Create Date: 2016-11-07 10:14:00.000000
"""
# revision identifiers, used by Alembic.
revision = '780'
down_revision = '770'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('supplier_frameworks', 'agreement_returned_at')
op.drop_column('supplier_frameworks', 'countersigned_at')
op.drop_column('supplier_frameworks', 'agreement_details')
def downgrade():
# Downgrade reinstates the columns but does not populate them with data.
# These fields could be populated with data from the "current framework agreement" after being reinstated.
# That would be better (or at least more easily) done by a script than by this migration if necessary.
op.add_column('supplier_frameworks', sa.Column('agreement_returned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('countersigned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('agreement_details', sa.dialects.postgresql.JSON(), nullable=True))
|
Add migration to drop now-unused supplier_framework columns
The recent addition of the framework_agreement table means that these columns
are no longer needed. (They are no longer referred to anywhere in the API code.)"""Remove agreement_returned_at, countersigned_at and agreement_details
columns from supplier_framework table as they are no longer used
Revision ID: 780
Revises: 770
Create Date: 2016-11-07 10:14:00.000000
"""
# revision identifiers, used by Alembic.
revision = '780'
down_revision = '770'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('supplier_frameworks', 'agreement_returned_at')
op.drop_column('supplier_frameworks', 'countersigned_at')
op.drop_column('supplier_frameworks', 'agreement_details')
def downgrade():
# Downgrade reinstates the columns but does not populate them with data.
# These fields could be populated with data from the "current framework agreement" after being reinstated.
# That would be better (or at least more easily) done by a script than by this migration if necessary.
op.add_column('supplier_frameworks', sa.Column('agreement_returned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('countersigned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('agreement_details', sa.dialects.postgresql.JSON(), nullable=True))
|
<commit_before><commit_msg>Add migration to drop now-unused supplier_framework columns
The recent addition of the framework_agreement table means that these columns
are no longer needed. (They are no longer referred to anywhere in the API code.)<commit_after>"""Remove agreement_returned_at, countersigned_at and agreement_details
columns from supplier_framework table as they are no longer used
Revision ID: 780
Revises: 770
Create Date: 2016-11-07 10:14:00.000000
"""
# revision identifiers, used by Alembic.
revision = '780'
down_revision = '770'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('supplier_frameworks', 'agreement_returned_at')
op.drop_column('supplier_frameworks', 'countersigned_at')
op.drop_column('supplier_frameworks', 'agreement_details')
def downgrade():
# Downgrade reinstates the columns but does not populate them with data.
# These fields could be populated with data from the "current framework agreement" after being reinstated.
# That would be better (or at least more easily) done by a script than by this migration if necessary.
op.add_column('supplier_frameworks', sa.Column('agreement_returned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('countersigned_at', sa.DateTime(), nullable=True))
op.add_column('supplier_frameworks', sa.Column('agreement_details', sa.dialects.postgresql.JSON(), nullable=True))
|
|
09b35b09c5265ebff9dffbef876df1100a569339
|
zerver/migrations/0405_set_default_for_enable_read_receipts.py
|
zerver/migrations/0405_set_default_for_enable_read_receipts.py
|
# Generated by Django 4.0.6 on 2022-08-08 16:52
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
def set_default_for_enable_read_receipts(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
# We enable read receipts by default in realms which require an invitation to
# join or which allow only users having emails with specific domains to join.
Realm.objects.filter(Q(invite_required=True) | Q(emails_restricted_to_domains=True)).update(
enable_read_receipts=True
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0404_realm_enable_read_receipts"),
]
operations = [
migrations.RunPython(set_default_for_enable_read_receipts, elidable=True),
]
|
Add migration to set default value of enable_read_receipts.
|
migrations: Add migration to set default value of enable_read_receipts.
This migration set default value of enable_read_receipts to True
for existing realms which require an invitation to join.
|
Python
|
apache-2.0
|
andersk/zulip,zulip/zulip,zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,rht/zulip,andersk/zulip,rht/zulip,zulip/zulip,rht/zulip,andersk/zulip,zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,andersk/zulip,rht/zulip,andersk/zulip,zulip/zulip,rht/zulip
|
migrations: Add migration to set default value of enable_read_receipts.
This migration set default value of enable_read_receipts to True
for existing realms which require an invitation to join.
|
# Generated by Django 4.0.6 on 2022-08-08 16:52
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
def set_default_for_enable_read_receipts(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
# We enable read receipts by default in realms which require an invitation to
# join or which allow only users having emails with specific domains to join.
Realm.objects.filter(Q(invite_required=True) | Q(emails_restricted_to_domains=True)).update(
enable_read_receipts=True
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0404_realm_enable_read_receipts"),
]
operations = [
migrations.RunPython(set_default_for_enable_read_receipts, elidable=True),
]
|
<commit_before><commit_msg>migrations: Add migration to set default value of enable_read_receipts.
This migration set default value of enable_read_receipts to True
for existing realms which require an invitation to join.<commit_after>
|
# Generated by Django 4.0.6 on 2022-08-08 16:52
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
def set_default_for_enable_read_receipts(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
# We enable read receipts by default in realms which require an invitation to
# join or which allow only users having emails with specific domains to join.
Realm.objects.filter(Q(invite_required=True) | Q(emails_restricted_to_domains=True)).update(
enable_read_receipts=True
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0404_realm_enable_read_receipts"),
]
operations = [
migrations.RunPython(set_default_for_enable_read_receipts, elidable=True),
]
|
migrations: Add migration to set default value of enable_read_receipts.
This migration set default value of enable_read_receipts to True
for existing realms which require an invitation to join.# Generated by Django 4.0.6 on 2022-08-08 16:52
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
def set_default_for_enable_read_receipts(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
# We enable read receipts by default in realms which require an invitation to
# join or which allow only users having emails with specific domains to join.
Realm.objects.filter(Q(invite_required=True) | Q(emails_restricted_to_domains=True)).update(
enable_read_receipts=True
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0404_realm_enable_read_receipts"),
]
operations = [
migrations.RunPython(set_default_for_enable_read_receipts, elidable=True),
]
|
<commit_before><commit_msg>migrations: Add migration to set default value of enable_read_receipts.
This migration set default value of enable_read_receipts to True
for existing realms which require an invitation to join.<commit_after># Generated by Django 4.0.6 on 2022-08-08 16:52
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import Q
def set_default_for_enable_read_receipts(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
# We enable read receipts by default in realms which require an invitation to
# join or which allow only users having emails with specific domains to join.
Realm.objects.filter(Q(invite_required=True) | Q(emails_restricted_to_domains=True)).update(
enable_read_receipts=True
)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0404_realm_enable_read_receipts"),
]
operations = [
migrations.RunPython(set_default_for_enable_read_receipts, elidable=True),
]
|
|
9e0acc72cf34659c3a95a3495cfa4c5536bb15b7
|
senlin/tests/tempest/api/clusters/test_cluster_show_negative.py
|
senlin/tests/tempest/api/clusters/test_cluster_show_negative.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
class TestClusterShowNegative(base.BaseSenlinTest):
@test.attr(type=['negative'])
@decorators.idempotent_id('bbc593ff-8556-416e-83c3-384e5c14d363')
def test_cluster_show_not_found(self):
self.assertRaises(exceptions.NotFound,
self.client.get_obj,
'clusters',
'bbc593ff-8556-416e-83c3-384e5c14d363')
|
Add negative test for cluster show
|
Add negative test for cluster show
The negative tests in tempest will check the exceptions raised
from an invalid request, so this patch will follow. It will request
a invalid cluster and check the NotFound error.
Change-Id: Ib602800b6e0d184b6a0b9146d79530d8d68289d4
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,openstack/senlin,stackforge/senlin,stackforge/senlin
|
Add negative test for cluster show
The negative tests in tempest will check the exceptions raised
from an invalid request, so this patch will follow. It will request
a invalid cluster and check the NotFound error.
Change-Id: Ib602800b6e0d184b6a0b9146d79530d8d68289d4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
class TestClusterShowNegative(base.BaseSenlinTest):
@test.attr(type=['negative'])
@decorators.idempotent_id('bbc593ff-8556-416e-83c3-384e5c14d363')
def test_cluster_show_not_found(self):
self.assertRaises(exceptions.NotFound,
self.client.get_obj,
'clusters',
'bbc593ff-8556-416e-83c3-384e5c14d363')
|
<commit_before><commit_msg>Add negative test for cluster show
The negative tests in tempest will check the exceptions raised
from an invalid request, so this patch will follow. It will request
a invalid cluster and check the NotFound error.
Change-Id: Ib602800b6e0d184b6a0b9146d79530d8d68289d4<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
class TestClusterShowNegative(base.BaseSenlinTest):
@test.attr(type=['negative'])
@decorators.idempotent_id('bbc593ff-8556-416e-83c3-384e5c14d363')
def test_cluster_show_not_found(self):
self.assertRaises(exceptions.NotFound,
self.client.get_obj,
'clusters',
'bbc593ff-8556-416e-83c3-384e5c14d363')
|
Add negative test for cluster show
The negative tests in tempest will check the exceptions raised
from an invalid request, so this patch will follow. It will request
a invalid cluster and check the NotFound error.
Change-Id: Ib602800b6e0d184b6a0b9146d79530d8d68289d4# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
class TestClusterShowNegative(base.BaseSenlinTest):
@test.attr(type=['negative'])
@decorators.idempotent_id('bbc593ff-8556-416e-83c3-384e5c14d363')
def test_cluster_show_not_found(self):
self.assertRaises(exceptions.NotFound,
self.client.get_obj,
'clusters',
'bbc593ff-8556-416e-83c3-384e5c14d363')
|
<commit_before><commit_msg>Add negative test for cluster show
The negative tests in tempest will check the exceptions raised
from an invalid request, so this patch will follow. It will request
a invalid cluster and check the NotFound error.
Change-Id: Ib602800b6e0d184b6a0b9146d79530d8d68289d4<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
class TestClusterShowNegative(base.BaseSenlinTest):
@test.attr(type=['negative'])
@decorators.idempotent_id('bbc593ff-8556-416e-83c3-384e5c14d363')
def test_cluster_show_not_found(self):
self.assertRaises(exceptions.NotFound,
self.client.get_obj,
'clusters',
'bbc593ff-8556-416e-83c3-384e5c14d363')
|
|
cf8934f07b9d5a7b022d4030f8549f05e6391e35
|
sense/version.py
|
sense/version.py
|
VERSION = "0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
VERSION = "0.0.9" # Add parameters handling to the save method
#"0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
Add parameters handling to the save method
|
Add parameters handling to the save method
|
Python
|
mit
|
Sense-API/sense-python-client
|
VERSION = "0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
Add parameters handling to the save method
|
VERSION = "0.0.9" # Add parameters handling to the save method
#"0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
<commit_before>VERSION = "0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
<commit_msg>Add parameters handling to the save method<commit_after>
|
VERSION = "0.0.9" # Add parameters handling to the save method
#"0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
VERSION = "0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
Add parameters handling to the save methodVERSION = "0.0.9" # Add parameters handling to the save method
#"0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
<commit_before>VERSION = "0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
<commit_msg>Add parameters handling to the save method<commit_after>VERSION = "0.0.9" # Add parameters handling to the save method
#"0.0.8" # Add User-Agent and optional token encoding with sense.app_secret key.
#"0.0.7": Fix a bug in Feed instance_url (case of adressing a feed by type on a retrieved feed)
# 0.0.6: Allow addressing feed by node uid + feed type
# 0.0.5: Enable Node update
# 0.0.4: Add method to post event
# 0.0.3: Allow Node creation and update
# 0.0.2: Add install requires to setup.py
# 0.0.1: Allow Node creation and update
|
59835b91ee82a8d5d12d27992b6494f9487f6848
|
testing/schur_nullspace.py
|
testing/schur_nullspace.py
|
from firedrake import *
from firedrake.slate.preconditioners import create_schur_nullspace
import numpy as np
mesh = UnitCubedSphereMesh(2)
mesh.init_cell_orientations(SpatialCoordinate(mesh))
n = FacetNormal(mesh)
V = FunctionSpace(mesh, "RTCF", 1)
Q = FunctionSpace(mesh, "DG", 0)
W = V*Q
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u)*dx
W_d = FunctionSpace(mesh,
MixedElement([BrokenElement(Vi.ufl_element())
for Vi in W]))
atilde = Tensor(replace(a, dict(zip(a.arguments(),
(TestFunction(W_d),
TrialFunction(W_d))))))
Vt = FunctionSpace(mesh, "HDiv Trace", 0)
gamma = TestFunction(Vt)
sigma, _ = TrialFunctions(W_d)
K = Tensor(gamma('+') * dot(sigma, n) * dS)
A = assemble(a, mat_type="aij")
nullspace = MixedVectorSpaceBasis(W, [W[0], VectorSpaceBasis(constant=True)])
nullspace._build_monolithic_basis()
A.petscmat.setNullSpace(nullspace._nullspace)
Snullsp = create_schur_nullspace(A.petscmat, K * atilde.inv,
W, W_d, Vt,
COMM_WORLD)
v = Snullsp.getVecs()[0]
print "Computed nullspace of S (min, max, norm)", v.array_r.min(), v.array_r.max(), v.norm()
S = K * atilde.inv * K.T
u, s, v = np.linalg.svd(assemble(S, mat_type="aij").M.values)
singular_vector = v[-1]
print "Actual nullspace of S (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
u, s, v = np.linalg.svd(A.M.handle[:, :])
offset = V.dof_dset.size
singular_vector = v[-1][offset:]
print "Nullspace of original operator (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
|
Add test for computing nullspace of the Schur operator
|
Add test for computing nullspace of the Schur operator
|
Python
|
mit
|
thomasgibson/firedrake-hybridization
|
Add test for computing nullspace of the Schur operator
|
from firedrake import *
from firedrake.slate.preconditioners import create_schur_nullspace
import numpy as np
mesh = UnitCubedSphereMesh(2)
mesh.init_cell_orientations(SpatialCoordinate(mesh))
n = FacetNormal(mesh)
V = FunctionSpace(mesh, "RTCF", 1)
Q = FunctionSpace(mesh, "DG", 0)
W = V*Q
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u)*dx
W_d = FunctionSpace(mesh,
MixedElement([BrokenElement(Vi.ufl_element())
for Vi in W]))
atilde = Tensor(replace(a, dict(zip(a.arguments(),
(TestFunction(W_d),
TrialFunction(W_d))))))
Vt = FunctionSpace(mesh, "HDiv Trace", 0)
gamma = TestFunction(Vt)
sigma, _ = TrialFunctions(W_d)
K = Tensor(gamma('+') * dot(sigma, n) * dS)
A = assemble(a, mat_type="aij")
nullspace = MixedVectorSpaceBasis(W, [W[0], VectorSpaceBasis(constant=True)])
nullspace._build_monolithic_basis()
A.petscmat.setNullSpace(nullspace._nullspace)
Snullsp = create_schur_nullspace(A.petscmat, K * atilde.inv,
W, W_d, Vt,
COMM_WORLD)
v = Snullsp.getVecs()[0]
print "Computed nullspace of S (min, max, norm)", v.array_r.min(), v.array_r.max(), v.norm()
S = K * atilde.inv * K.T
u, s, v = np.linalg.svd(assemble(S, mat_type="aij").M.values)
singular_vector = v[-1]
print "Actual nullspace of S (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
u, s, v = np.linalg.svd(A.M.handle[:, :])
offset = V.dof_dset.size
singular_vector = v[-1][offset:]
print "Nullspace of original operator (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
|
<commit_before><commit_msg>Add test for computing nullspace of the Schur operator<commit_after>
|
from firedrake import *
from firedrake.slate.preconditioners import create_schur_nullspace
import numpy as np
mesh = UnitCubedSphereMesh(2)
mesh.init_cell_orientations(SpatialCoordinate(mesh))
n = FacetNormal(mesh)
V = FunctionSpace(mesh, "RTCF", 1)
Q = FunctionSpace(mesh, "DG", 0)
W = V*Q
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u)*dx
W_d = FunctionSpace(mesh,
MixedElement([BrokenElement(Vi.ufl_element())
for Vi in W]))
atilde = Tensor(replace(a, dict(zip(a.arguments(),
(TestFunction(W_d),
TrialFunction(W_d))))))
Vt = FunctionSpace(mesh, "HDiv Trace", 0)
gamma = TestFunction(Vt)
sigma, _ = TrialFunctions(W_d)
K = Tensor(gamma('+') * dot(sigma, n) * dS)
A = assemble(a, mat_type="aij")
nullspace = MixedVectorSpaceBasis(W, [W[0], VectorSpaceBasis(constant=True)])
nullspace._build_monolithic_basis()
A.petscmat.setNullSpace(nullspace._nullspace)
Snullsp = create_schur_nullspace(A.petscmat, K * atilde.inv,
W, W_d, Vt,
COMM_WORLD)
v = Snullsp.getVecs()[0]
print "Computed nullspace of S (min, max, norm)", v.array_r.min(), v.array_r.max(), v.norm()
S = K * atilde.inv * K.T
u, s, v = np.linalg.svd(assemble(S, mat_type="aij").M.values)
singular_vector = v[-1]
print "Actual nullspace of S (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
u, s, v = np.linalg.svd(A.M.handle[:, :])
offset = V.dof_dset.size
singular_vector = v[-1][offset:]
print "Nullspace of original operator (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
|
Add test for computing nullspace of the Schur operatorfrom firedrake import *
from firedrake.slate.preconditioners import create_schur_nullspace
import numpy as np
mesh = UnitCubedSphereMesh(2)
mesh.init_cell_orientations(SpatialCoordinate(mesh))
n = FacetNormal(mesh)
V = FunctionSpace(mesh, "RTCF", 1)
Q = FunctionSpace(mesh, "DG", 0)
W = V*Q
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u)*dx
W_d = FunctionSpace(mesh,
MixedElement([BrokenElement(Vi.ufl_element())
for Vi in W]))
atilde = Tensor(replace(a, dict(zip(a.arguments(),
(TestFunction(W_d),
TrialFunction(W_d))))))
Vt = FunctionSpace(mesh, "HDiv Trace", 0)
gamma = TestFunction(Vt)
sigma, _ = TrialFunctions(W_d)
K = Tensor(gamma('+') * dot(sigma, n) * dS)
A = assemble(a, mat_type="aij")
nullspace = MixedVectorSpaceBasis(W, [W[0], VectorSpaceBasis(constant=True)])
nullspace._build_monolithic_basis()
A.petscmat.setNullSpace(nullspace._nullspace)
Snullsp = create_schur_nullspace(A.petscmat, K * atilde.inv,
W, W_d, Vt,
COMM_WORLD)
v = Snullsp.getVecs()[0]
print "Computed nullspace of S (min, max, norm)", v.array_r.min(), v.array_r.max(), v.norm()
S = K * atilde.inv * K.T
u, s, v = np.linalg.svd(assemble(S, mat_type="aij").M.values)
singular_vector = v[-1]
print "Actual nullspace of S (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
u, s, v = np.linalg.svd(A.M.handle[:, :])
offset = V.dof_dset.size
singular_vector = v[-1][offset:]
print "Nullspace of original operator (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
|
<commit_before><commit_msg>Add test for computing nullspace of the Schur operator<commit_after>from firedrake import *
from firedrake.slate.preconditioners import create_schur_nullspace
import numpy as np
mesh = UnitCubedSphereMesh(2)
mesh.init_cell_orientations(SpatialCoordinate(mesh))
n = FacetNormal(mesh)
V = FunctionSpace(mesh, "RTCF", 1)
Q = FunctionSpace(mesh, "DG", 0)
W = V*Q
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
a = (inner(sigma, tau) + div(sigma)*v + div(tau)*u)*dx
W_d = FunctionSpace(mesh,
MixedElement([BrokenElement(Vi.ufl_element())
for Vi in W]))
atilde = Tensor(replace(a, dict(zip(a.arguments(),
(TestFunction(W_d),
TrialFunction(W_d))))))
Vt = FunctionSpace(mesh, "HDiv Trace", 0)
gamma = TestFunction(Vt)
sigma, _ = TrialFunctions(W_d)
K = Tensor(gamma('+') * dot(sigma, n) * dS)
A = assemble(a, mat_type="aij")
nullspace = MixedVectorSpaceBasis(W, [W[0], VectorSpaceBasis(constant=True)])
nullspace._build_monolithic_basis()
A.petscmat.setNullSpace(nullspace._nullspace)
Snullsp = create_schur_nullspace(A.petscmat, K * atilde.inv,
W, W_d, Vt,
COMM_WORLD)
v = Snullsp.getVecs()[0]
print "Computed nullspace of S (min, max, norm)", v.array_r.min(), v.array_r.max(), v.norm()
S = K * atilde.inv * K.T
u, s, v = np.linalg.svd(assemble(S, mat_type="aij").M.values)
singular_vector = v[-1]
print "Actual nullspace of S (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
u, s, v = np.linalg.svd(A.M.handle[:, :])
offset = V.dof_dset.size
singular_vector = v[-1][offset:]
print "Nullspace of original operator (min, max, norm)", singular_vector.min(), singular_vector.max(), np.linalg.norm(singular_vector)
|
|
cd003fa1d57b442d6889442d0b1815fc3312505c
|
toolbox/replicate_graph.py
|
toolbox/replicate_graph.py
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
import copy
sys.path.append('../.')
sys.path.append('.')
from progressbar import ProgressBar
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replicate nodes, links, divisions and exclusion sets N times, ' \
'so that the total number of timeframes does not change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file that will hold the replicated model')
parser.add_argument('--num', type=int, dest='num', default=2,
help='how many instances of the original model shall be present in the result file')
args = parser.parse_args()
print("Loading model file: " + args.model_filename)
with open(args.model_filename, 'r') as f:
model = json.load(f)
segmentationHypotheses = model['segmentationHypotheses']
# use generator expression instead of list comprehension, we only need it once!
maxId = max((i['id'] for i in segmentationHypotheses))
newModel = copy.deepcopy(model)
for i in range(1, args.num):
offset = i * (maxId + 1000000) # create random gap in IDs
for seg in segmentationHypotheses:
newSeg = copy.deepcopy(seg)
newSeg['id'] = offset + newSeg['id']
newModel['segmentationHypotheses'].append(newSeg)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
newLink = copy.deepcopy(link)
newLink['src'] = offset + newLink['src']
newLink['dest'] = offset + newLink['dest']
newModel['linkingHypotheses'].append(newLink)
if 'exclusions' in model:
for e in model['exclusions']:
newExclusion = [x + offset for x in e]
newModel['exclusions'].append(newExclusion)
if 'divisions' in model:
for d in model['divisions']:
newDiv = copy.deepcopy(d)
newDiv['parent'] = offset + d['parent']
newDiv['children'] = [offset + c for c in d['children']]
newModel['divisions'].append(newDiv)
with open(args.result_filename, 'w') as f:
json.dump(newModel, f, indent=4, separators=(',', ': '))
|
Add script to artificially increase the size of graphs by replicating all nodes and their links
|
Add script to artificially increase the size of graphs by replicating all nodes and their links
|
Python
|
mit
|
chaubold/hytra,chaubold/hytra,chaubold/hytra
|
Add script to artificially increase the size of graphs by replicating all nodes and their links
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
import copy
sys.path.append('../.')
sys.path.append('.')
from progressbar import ProgressBar
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replicate nodes, links, divisions and exclusion sets N times, ' \
'so that the total number of timeframes does not change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file that will hold the replicated model')
parser.add_argument('--num', type=int, dest='num', default=2,
help='how many instances of the original model shall be present in the result file')
args = parser.parse_args()
print("Loading model file: " + args.model_filename)
with open(args.model_filename, 'r') as f:
model = json.load(f)
segmentationHypotheses = model['segmentationHypotheses']
# use generator expression instead of list comprehension, we only need it once!
maxId = max((i['id'] for i in segmentationHypotheses))
newModel = copy.deepcopy(model)
for i in range(1, args.num):
offset = i * (maxId + 1000000) # create random gap in IDs
for seg in segmentationHypotheses:
newSeg = copy.deepcopy(seg)
newSeg['id'] = offset + newSeg['id']
newModel['segmentationHypotheses'].append(newSeg)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
newLink = copy.deepcopy(link)
newLink['src'] = offset + newLink['src']
newLink['dest'] = offset + newLink['dest']
newModel['linkingHypotheses'].append(newLink)
if 'exclusions' in model:
for e in model['exclusions']:
newExclusion = [x + offset for x in e]
newModel['exclusions'].append(newExclusion)
if 'divisions' in model:
for d in model['divisions']:
newDiv = copy.deepcopy(d)
newDiv['parent'] = offset + d['parent']
newDiv['children'] = [offset + c for c in d['children']]
newModel['divisions'].append(newDiv)
with open(args.result_filename, 'w') as f:
json.dump(newModel, f, indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Add script to artificially increase the size of graphs by replicating all nodes and their links<commit_after>
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
import copy
sys.path.append('../.')
sys.path.append('.')
from progressbar import ProgressBar
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replicate nodes, links, divisions and exclusion sets N times, ' \
'so that the total number of timeframes does not change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file that will hold the replicated model')
parser.add_argument('--num', type=int, dest='num', default=2,
help='how many instances of the original model shall be present in the result file')
args = parser.parse_args()
print("Loading model file: " + args.model_filename)
with open(args.model_filename, 'r') as f:
model = json.load(f)
segmentationHypotheses = model['segmentationHypotheses']
# use generator expression instead of list comprehension, we only need it once!
maxId = max((i['id'] for i in segmentationHypotheses))
newModel = copy.deepcopy(model)
for i in range(1, args.num):
offset = i * (maxId + 1000000) # create random gap in IDs
for seg in segmentationHypotheses:
newSeg = copy.deepcopy(seg)
newSeg['id'] = offset + newSeg['id']
newModel['segmentationHypotheses'].append(newSeg)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
newLink = copy.deepcopy(link)
newLink['src'] = offset + newLink['src']
newLink['dest'] = offset + newLink['dest']
newModel['linkingHypotheses'].append(newLink)
if 'exclusions' in model:
for e in model['exclusions']:
newExclusion = [x + offset for x in e]
newModel['exclusions'].append(newExclusion)
if 'divisions' in model:
for d in model['divisions']:
newDiv = copy.deepcopy(d)
newDiv['parent'] = offset + d['parent']
newDiv['children'] = [offset + c for c in d['children']]
newModel['divisions'].append(newDiv)
with open(args.result_filename, 'w') as f:
json.dump(newModel, f, indent=4, separators=(',', ': '))
|
Add script to artificially increase the size of graphs by replicating all nodes and their linksimport sys
import commentjson as json
import os
import argparse
import numpy as np
import copy
sys.path.append('../.')
sys.path.append('.')
from progressbar import ProgressBar
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replicate nodes, links, divisions and exclusion sets N times, ' \
'so that the total number of timeframes does not change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file that will hold the replicated model')
parser.add_argument('--num', type=int, dest='num', default=2,
help='how many instances of the original model shall be present in the result file')
args = parser.parse_args()
print("Loading model file: " + args.model_filename)
with open(args.model_filename, 'r') as f:
model = json.load(f)
segmentationHypotheses = model['segmentationHypotheses']
# use generator expression instead of list comprehension, we only need it once!
maxId = max((i['id'] for i in segmentationHypotheses))
newModel = copy.deepcopy(model)
for i in range(1, args.num):
offset = i * (maxId + 1000000) # create random gap in IDs
for seg in segmentationHypotheses:
newSeg = copy.deepcopy(seg)
newSeg['id'] = offset + newSeg['id']
newModel['segmentationHypotheses'].append(newSeg)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
newLink = copy.deepcopy(link)
newLink['src'] = offset + newLink['src']
newLink['dest'] = offset + newLink['dest']
newModel['linkingHypotheses'].append(newLink)
if 'exclusions' in model:
for e in model['exclusions']:
newExclusion = [x + offset for x in e]
newModel['exclusions'].append(newExclusion)
if 'divisions' in model:
for d in model['divisions']:
newDiv = copy.deepcopy(d)
newDiv['parent'] = offset + d['parent']
newDiv['children'] = [offset + c for c in d['children']]
newModel['divisions'].append(newDiv)
with open(args.result_filename, 'w') as f:
json.dump(newModel, f, indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Add script to artificially increase the size of graphs by replicating all nodes and their links<commit_after>import sys
import commentjson as json
import os
import argparse
import numpy as np
import copy
sys.path.append('../.')
sys.path.append('.')
from progressbar import ProgressBar
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Replicate nodes, links, divisions and exclusion sets N times, ' \
'so that the total number of timeframes does not change',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file that will hold the replicated model')
parser.add_argument('--num', type=int, dest='num', default=2,
help='how many instances of the original model shall be present in the result file')
args = parser.parse_args()
print("Loading model file: " + args.model_filename)
with open(args.model_filename, 'r') as f:
model = json.load(f)
segmentationHypotheses = model['segmentationHypotheses']
# use generator expression instead of list comprehension, we only need it once!
maxId = max((i['id'] for i in segmentationHypotheses))
newModel = copy.deepcopy(model)
for i in range(1, args.num):
offset = i * (maxId + 1000000) # create random gap in IDs
for seg in segmentationHypotheses:
newSeg = copy.deepcopy(seg)
newSeg['id'] = offset + newSeg['id']
newModel['segmentationHypotheses'].append(newSeg)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
newLink = copy.deepcopy(link)
newLink['src'] = offset + newLink['src']
newLink['dest'] = offset + newLink['dest']
newModel['linkingHypotheses'].append(newLink)
if 'exclusions' in model:
for e in model['exclusions']:
newExclusion = [x + offset for x in e]
newModel['exclusions'].append(newExclusion)
if 'divisions' in model:
for d in model['divisions']:
newDiv = copy.deepcopy(d)
newDiv['parent'] = offset + d['parent']
newDiv['children'] = [offset + c for c in d['children']]
newModel['divisions'].append(newDiv)
with open(args.result_filename, 'w') as f:
json.dump(newModel, f, indent=4, separators=(',', ': '))
|
|
ec30e63bc7d82ab77b7951d4deec0d9c6778c243
|
emgapimetadata/management/commands/test-data.py
|
emgapimetadata/management/commands/test-data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
Add command line tool to import metadata
|
Add command line tool to import metadata
|
Python
|
apache-2.0
|
EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi
|
Add command line tool to import metadata
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
<commit_before><commit_msg>Add command line tool to import metadata<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
Add command line tool to import metadata#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
<commit_before><commit_msg>Add command line tool to import metadata<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
|
688fd5fb5a4dcc754ab13ccc7db7d43f56088c71
|
tests/test_optimizer.py
|
tests/test_optimizer.py
|
from unittest import TestCase
import numpy as np
from chainer import cuda, Optimizer
from chainer.optimizer import _sqnorm
cuda.init()
class TestOptimizerUtility(TestCase):
def setUp(self):
# x is an arithmetic progression of length 6
# whose common difference is 0.5
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(_sqnorm(self.x), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(_sqnorm(self.a), 4)
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(_sqnorm(x), 4.75)
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(_sqnorm(a), 4)
class TestOptimizer(TestCase):
def setUp(self):
pass
|
Add unittest for utility functions of optimizer
|
Add unittest for utility functions of optimizer
|
Python
|
mit
|
muupan/chainer,1986ks/chainer,kikusu/chainer,wkentaro/chainer,tscohen/chainer,ytoyama/yans_chainer_hackathon,rezoo/chainer,delta2323/chainer,AlpacaDB/chainer,hvy/chainer,jnishi/chainer,hvy/chainer,keisuke-umezawa/chainer,cupy/cupy,cupy/cupy,minhpqn/chainer,niboshi/chainer,hvy/chainer,wavelets/chainer,chainer/chainer,keisuke-umezawa/chainer,wkentaro/chainer,ysekky/chainer,tereka114/chainer,tkerola/chainer,laysakura/chainer,umitanuki/chainer,hidenori-t/chainer,bayerj/chainer,elviswf/chainer,okuta/chainer,niboshi/chainer,wkentaro/chainer,ikasumi/chainer,cupy/cupy,kikusu/chainer,keisuke-umezawa/chainer,niboshi/chainer,yanweifu/chainer,AlpacaDB/chainer,woodshop/complex-chainer,kashif/chainer,kuwa32/chainer,chainer/chainer,sou81821/chainer,tigerneil/chainer,benob/chainer,jnishi/chainer,cupy/cupy,sinhrks/chainer,truongdq/chainer,jnishi/chainer,ktnyt/chainer,aonotas/chainer,anaruse/chainer,sinhrks/chainer,kiyukuta/chainer,truongdq/chainer,wkentaro/chainer,niboshi/chainer,keisuke-umezawa/chainer,hvy/chainer,t-abe/chainer,jfsantos/chainer,cemoody/chainer,woodshop/chainer,jnishi/chainer,masia02/chainer,chainer/chainer,ronekko/chainer,muupan/chainer,ktnyt/chainer,benob/chainer,chainer/chainer,t-abe/chainer,okuta/chainer,pfnet/chainer,okuta/chainer,ktnyt/chainer,ktnyt/chainer,Kaisuke5/chainer,okuta/chainer
|
Add unittest for utility functions of optimizer
|
from unittest import TestCase
import numpy as np
from chainer import cuda, Optimizer
from chainer.optimizer import _sqnorm
cuda.init()
class TestOptimizerUtility(TestCase):
def setUp(self):
# x is an arithmetic progression of length 6
# whose common difference is 0.5
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(_sqnorm(self.x), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(_sqnorm(self.a), 4)
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(_sqnorm(x), 4.75)
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(_sqnorm(a), 4)
class TestOptimizer(TestCase):
def setUp(self):
pass
|
<commit_before><commit_msg>Add unittest for utility functions of optimizer<commit_after>
|
from unittest import TestCase
import numpy as np
from chainer import cuda, Optimizer
from chainer.optimizer import _sqnorm
cuda.init()
class TestOptimizerUtility(TestCase):
def setUp(self):
# x is an arithmetic progression of length 6
# whose common difference is 0.5
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(_sqnorm(self.x), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(_sqnorm(self.a), 4)
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(_sqnorm(x), 4.75)
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(_sqnorm(a), 4)
class TestOptimizer(TestCase):
def setUp(self):
pass
|
Add unittest for utility functions of optimizerfrom unittest import TestCase
import numpy as np
from chainer import cuda, Optimizer
from chainer.optimizer import _sqnorm
cuda.init()
class TestOptimizerUtility(TestCase):
def setUp(self):
# x is an arithmetic progression of length 6
# whose common difference is 0.5
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(_sqnorm(self.x), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(_sqnorm(self.a), 4)
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(_sqnorm(x), 4.75)
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(_sqnorm(a), 4)
class TestOptimizer(TestCase):
def setUp(self):
pass
|
<commit_before><commit_msg>Add unittest for utility functions of optimizer<commit_after>from unittest import TestCase
import numpy as np
from chainer import cuda, Optimizer
from chainer.optimizer import _sqnorm
cuda.init()
class TestOptimizerUtility(TestCase):
def setUp(self):
# x is an arithmetic progression of length 6
# whose common difference is 0.5
self.x = np.linspace(-1.0, 1.5, num=6).astype(np.float32).reshape(2, 3)
self.a = np.array(2.0)
def test_sqnorm_cpu(self):
# \Sum_{n=0}^{5} (-1.0+0.5n)**2 = 4.75
self.assertAlmostEqual(_sqnorm(self.x), 4.75)
def test_sqnorm_scalar_cpu(self):
self.assertAlmostEqual(_sqnorm(self.a), 4)
def test_sqnorm_gpu(self):
x = cuda.to_gpu(self.x)
self.assertAlmostEqual(_sqnorm(x), 4.75)
def test_sqnorm_scalar_gpu(self):
a = cuda.to_gpu(self.a)
self.assertAlmostEqual(_sqnorm(a), 4)
class TestOptimizer(TestCase):
def setUp(self):
pass
|
|
bffbc23b730081b5cd071a50c2755ecee4adfe99
|
tests/test_create_simple.py
|
tests/test_create_simple.py
|
import npc
import pytest
import os
@pytest.fixture
def characters(campaign, prefs):
os.mkdir(prefs.get('paths.characters'))
return campaign
@pytest.fixture(params=['human', 'fetch', 'goblin'])
def commandline(request):
return ['g', 'testmann', request.param, '-g', 'fork', 'spoon']
def test_missing_template(parser, prefs, campaign):
args = parser.parse_args(['g', 'noname', 'notfound'])
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 7
def test_creates_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
result = npc.commands.create_simple(args, prefs)
assert result.success
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
assert character.check()
def test_duplicate_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 1
def test_adds_group_tags(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
data = next(c for c in npc.parser.get_characters([str(character)], []))
assert data['group'] == ['fork', 'spoon']
|
Add tests for simple char creation
|
Add tests for simple char creation
|
Python
|
mit
|
aurule/npc,aurule/npc
|
Add tests for simple char creation
|
import npc
import pytest
import os
@pytest.fixture
def characters(campaign, prefs):
os.mkdir(prefs.get('paths.characters'))
return campaign
@pytest.fixture(params=['human', 'fetch', 'goblin'])
def commandline(request):
return ['g', 'testmann', request.param, '-g', 'fork', 'spoon']
def test_missing_template(parser, prefs, campaign):
args = parser.parse_args(['g', 'noname', 'notfound'])
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 7
def test_creates_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
result = npc.commands.create_simple(args, prefs)
assert result.success
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
assert character.check()
def test_duplicate_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 1
def test_adds_group_tags(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
data = next(c for c in npc.parser.get_characters([str(character)], []))
assert data['group'] == ['fork', 'spoon']
|
<commit_before><commit_msg>Add tests for simple char creation<commit_after>
|
import npc
import pytest
import os
@pytest.fixture
def characters(campaign, prefs):
os.mkdir(prefs.get('paths.characters'))
return campaign
@pytest.fixture(params=['human', 'fetch', 'goblin'])
def commandline(request):
return ['g', 'testmann', request.param, '-g', 'fork', 'spoon']
def test_missing_template(parser, prefs, campaign):
args = parser.parse_args(['g', 'noname', 'notfound'])
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 7
def test_creates_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
result = npc.commands.create_simple(args, prefs)
assert result.success
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
assert character.check()
def test_duplicate_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 1
def test_adds_group_tags(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
data = next(c for c in npc.parser.get_characters([str(character)], []))
assert data['group'] == ['fork', 'spoon']
|
Add tests for simple char creationimport npc
import pytest
import os
@pytest.fixture
def characters(campaign, prefs):
os.mkdir(prefs.get('paths.characters'))
return campaign
@pytest.fixture(params=['human', 'fetch', 'goblin'])
def commandline(request):
return ['g', 'testmann', request.param, '-g', 'fork', 'spoon']
def test_missing_template(parser, prefs, campaign):
args = parser.parse_args(['g', 'noname', 'notfound'])
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 7
def test_creates_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
result = npc.commands.create_simple(args, prefs)
assert result.success
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
assert character.check()
def test_duplicate_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 1
def test_adds_group_tags(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
data = next(c for c in npc.parser.get_characters([str(character)], []))
assert data['group'] == ['fork', 'spoon']
|
<commit_before><commit_msg>Add tests for simple char creation<commit_after>import npc
import pytest
import os
@pytest.fixture
def characters(campaign, prefs):
os.mkdir(prefs.get('paths.characters'))
return campaign
@pytest.fixture(params=['human', 'fetch', 'goblin'])
def commandline(request):
return ['g', 'testmann', request.param, '-g', 'fork', 'spoon']
def test_missing_template(parser, prefs, campaign):
args = parser.parse_args(['g', 'noname', 'notfound'])
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 7
def test_creates_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
result = npc.commands.create_simple(args, prefs)
assert result.success
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
assert character.check()
def test_duplicate_character(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
result = npc.commands.create_simple(args, prefs)
assert not result.success
assert result.errcode == 1
def test_adds_group_tags(parser, prefs, characters, commandline):
args = parser.parse_args(commandline)
npc.commands.create_simple(args, prefs)
character = characters.join(prefs.get('paths.characters'), 'testmann.nwod')
data = next(c for c in npc.parser.get_characters([str(character)], []))
assert data['group'] == ['fork', 'spoon']
|
|
619dcc460dc54dc06555b6bf880ed1c50b3d5dda
|
scripts/two_way_temperature_conversion.py
|
scripts/two_way_temperature_conversion.py
|
# Temperature Conversion Program (Celcius-Fahrenheit / Fahrenheit-Celcius)
# Display program welcome
print('This program will convert temperatures (Fahrenheit/Celcius)')
print('Enter (F) to convert Fahrenheit to Celcius')
print('Enter (C) to convert Celcius to Fahrenheit')
# Get Temperature to convert
which = raw_input('Enter selection: ')
temp = int(raw_input('Enter temperature to convert: '))
# Determine temperature conversion needed and display results
if which == 'F' or which == 'f':
converted_temp = (temp - 32) * 5.0/9.0
print(str(temp) + ' degrees Fahrenheit equals ' + str(converted_temp) + ' degrees Celcius')
else:
converted_temp = (9.0/5.0 * temp) + 32
print(str(temp) + ' degrees Celcius equals ' + str(converted_temp) + ' degrees Fahrenheit')
|
Apply it 1 from lecture 4 added
|
Apply it 1 from lecture 4 added
|
Python
|
mit
|
NAU-CFL/Python_Learning_Source
|
Apply it 1 from lecture 4 added
|
# Temperature Conversion Program (Celcius-Fahrenheit / Fahrenheit-Celcius)
# Display program welcome
print('This program will convert temperatures (Fahrenheit/Celcius)')
print('Enter (F) to convert Fahrenheit to Celcius')
print('Enter (C) to convert Celcius to Fahrenheit')
# Get Temperature to convert
which = raw_input('Enter selection: ')
temp = int(raw_input('Enter temperature to convert: '))
# Determine temperature conversion needed and display results
if which == 'F' or which == 'f':
converted_temp = (temp - 32) * 5.0/9.0
print(str(temp) + ' degrees Fahrenheit equals ' + str(converted_temp) + ' degrees Celcius')
else:
converted_temp = (9.0/5.0 * temp) + 32
print(str(temp) + ' degrees Celcius equals ' + str(converted_temp) + ' degrees Fahrenheit')
|
<commit_before><commit_msg>Apply it 1 from lecture 4 added<commit_after>
|
# Temperature Conversion Program (Celcius-Fahrenheit / Fahrenheit-Celcius)
# Display program welcome
print('This program will convert temperatures (Fahrenheit/Celcius)')
print('Enter (F) to convert Fahrenheit to Celcius')
print('Enter (C) to convert Celcius to Fahrenheit')
# Get Temperature to convert
which = raw_input('Enter selection: ')
temp = int(raw_input('Enter temperature to convert: '))
# Determine temperature conversion needed and display results
if which == 'F' or which == 'f':
converted_temp = (temp - 32) * 5.0/9.0
print(str(temp) + ' degrees Fahrenheit equals ' + str(converted_temp) + ' degrees Celcius')
else:
converted_temp = (9.0/5.0 * temp) + 32
print(str(temp) + ' degrees Celcius equals ' + str(converted_temp) + ' degrees Fahrenheit')
|
Apply it 1 from lecture 4 added# Temperature Conversion Program (Celcius-Fahrenheit / Fahrenheit-Celcius)
# Display program welcome
print('This program will convert temperatures (Fahrenheit/Celcius)')
print('Enter (F) to convert Fahrenheit to Celcius')
print('Enter (C) to convert Celcius to Fahrenheit')
# Get Temperature to convert
which = raw_input('Enter selection: ')
temp = int(raw_input('Enter temperature to convert: '))
# Determine temperature conversion needed and display results
if which == 'F' or which == 'f':
converted_temp = (temp - 32) * 5.0/9.0
print(str(temp) + ' degrees Fahrenheit equals ' + str(converted_temp) + ' degrees Celcius')
else:
converted_temp = (9.0/5.0 * temp) + 32
print(str(temp) + ' degrees Celcius equals ' + str(converted_temp) + ' degrees Fahrenheit')
|
<commit_before><commit_msg>Apply it 1 from lecture 4 added<commit_after># Temperature Conversion Program (Celcius-Fahrenheit / Fahrenheit-Celcius)
# Display program welcome
print('This program will convert temperatures (Fahrenheit/Celcius)')
print('Enter (F) to convert Fahrenheit to Celcius')
print('Enter (C) to convert Celcius to Fahrenheit')
# Get Temperature to convert
which = raw_input('Enter selection: ')
temp = int(raw_input('Enter temperature to convert: '))
# Determine temperature conversion needed and display results
if which == 'F' or which == 'f':
converted_temp = (temp - 32) * 5.0/9.0
print(str(temp) + ' degrees Fahrenheit equals ' + str(converted_temp) + ' degrees Celcius')
else:
converted_temp = (9.0/5.0 * temp) + 32
print(str(temp) + ' degrees Celcius equals ' + str(converted_temp) + ' degrees Fahrenheit')
|
|
0ef78781d0c4048f2fbe26c05cf81b3ec4f59d26
|
virtool/tests/test_users.py
|
virtool/tests/test_users.py
|
import hashlib
from virtool.utils import random_alphanumeric
from virtool.users import hash_password, check_password, check_legacy_password
class TestHashPassword:
def test_basic(self):
assert check_password("hello_world", hash_password("hello_world"))
class TestLegacyHashPassword:
def test_basic(self):
salt = random_alphanumeric(24)
hashed = hashlib.sha512(salt.encode("utf-8") + "hello_world".encode("utf-8")).hexdigest()
assert check_legacy_password("hello_world", salt, hashed)
|
Add tests for password hashing and checking
|
Add tests for password hashing and checking
|
Python
|
mit
|
virtool/virtool,virtool/virtool,igboyes/virtool,igboyes/virtool
|
Add tests for password hashing and checking
|
import hashlib
from virtool.utils import random_alphanumeric
from virtool.users import hash_password, check_password, check_legacy_password
class TestHashPassword:
def test_basic(self):
assert check_password("hello_world", hash_password("hello_world"))
class TestLegacyHashPassword:
def test_basic(self):
salt = random_alphanumeric(24)
hashed = hashlib.sha512(salt.encode("utf-8") + "hello_world".encode("utf-8")).hexdigest()
assert check_legacy_password("hello_world", salt, hashed)
|
<commit_before><commit_msg>Add tests for password hashing and checking<commit_after>
|
import hashlib
from virtool.utils import random_alphanumeric
from virtool.users import hash_password, check_password, check_legacy_password
class TestHashPassword:
def test_basic(self):
assert check_password("hello_world", hash_password("hello_world"))
class TestLegacyHashPassword:
def test_basic(self):
salt = random_alphanumeric(24)
hashed = hashlib.sha512(salt.encode("utf-8") + "hello_world".encode("utf-8")).hexdigest()
assert check_legacy_password("hello_world", salt, hashed)
|
Add tests for password hashing and checkingimport hashlib
from virtool.utils import random_alphanumeric
from virtool.users import hash_password, check_password, check_legacy_password
class TestHashPassword:
def test_basic(self):
assert check_password("hello_world", hash_password("hello_world"))
class TestLegacyHashPassword:
def test_basic(self):
salt = random_alphanumeric(24)
hashed = hashlib.sha512(salt.encode("utf-8") + "hello_world".encode("utf-8")).hexdigest()
assert check_legacy_password("hello_world", salt, hashed)
|
<commit_before><commit_msg>Add tests for password hashing and checking<commit_after>import hashlib
from virtool.utils import random_alphanumeric
from virtool.users import hash_password, check_password, check_legacy_password
class TestHashPassword:
def test_basic(self):
assert check_password("hello_world", hash_password("hello_world"))
class TestLegacyHashPassword:
def test_basic(self):
salt = random_alphanumeric(24)
hashed = hashlib.sha512(salt.encode("utf-8") + "hello_world".encode("utf-8")).hexdigest()
assert check_legacy_password("hello_world", salt, hashed)
|
|
edd8ac2d77b747cffbcf702e71f2633a148d64c6
|
wagtail/wagtailcore/hooks.py
|
wagtail/wagtailcore/hooks.py
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
return lambda fn: register(hook_name, fn)
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
Return the function again from the hook decorator
|
Return the function again from the hook decorator
The decorator variant of hook registration did not return anything,
meaning that the decorated function would end up being `None`. This was
not noticed, as the functions are rarely called manually, as opposed to
being invoked via the hook.
|
Python
|
bsd-3-clause
|
kaedroho/wagtail,willcodefortea/wagtail,JoshBarr/wagtail,takeshineshiro/wagtail,torchbox/wagtail,dresiu/wagtail,m-sanders/wagtail,jnns/wagtail,bjesus/wagtail,jorge-marques/wagtail,nilnvoid/wagtail,timorieber/wagtail,rsalmaso/wagtail,Toshakins/wagtail,tangentlabs/wagtail,nimasmi/wagtail,WQuanfeng/wagtail,timorieber/wagtail,benjaoming/wagtail,mixxorz/wagtail,thenewguy/wagtail,benjaoming/wagtail,Toshakins/wagtail,inonit/wagtail,mixxorz/wagtail,stevenewey/wagtail,darith27/wagtail,nealtodd/wagtail,kurtrwall/wagtail,taedori81/wagtail,serzans/wagtail,nilnvoid/wagtail,taedori81/wagtail,Pennebaker/wagtail,JoshBarr/wagtail,kurtrwall/wagtail,FlipperPA/wagtail,m-sanders/wagtail,torchbox/wagtail,mixxorz/wagtail,100Shapes/wagtail,takeflight/wagtail,kaedroho/wagtail,rsalmaso/wagtail,taedori81/wagtail,wagtail/wagtail,Klaudit/wagtail,benemery/wagtail,jordij/wagtail,gogobook/wagtail,quru/wagtail,janusnic/wagtail,chrxr/wagtail,torchbox/wagtail,gasman/wagtail,gasman/wagtail,chimeno/wagtail,mephizzle/wagtail,janusnic/wagtail,kurtw/wagtail,marctc/wagtail,kaedroho/wagtail,takeflight/wagtail,gasman/wagtail,zerolab/wagtail,Klaudit/wagtail,kaedroho/wagtail,jorge-marques/wagtail,dresiu/wagtail,tangentlabs/wagtail,chrxr/wagtail,iansprice/wagtail,rjsproxy/wagtail,gogobook/wagtail,mikedingjan/wagtail,thenewguy/wagtail,hamsterbacke23/wagtail,takeshineshiro/wagtail,darith27/wagtail,rv816/wagtail,iho/wagtail,inonit/wagtail,mikedingjan/wagtail,nimasmi/wagtail,davecranwell/wagtail,jorge-marques/wagtail,hamsterbacke23/wagtail,stevenewey/wagtail,nealtodd/wagtail,mephizzle/wagtail,rsalmaso/wagtail,timorieber/wagtail,nutztherookie/wagtail,hanpama/wagtail,100Shapes/wagtail,rsalmaso/wagtail,timorieber/wagtail,marctc/wagtail,wagtail/wagtail,KimGlazebrook/wagtail-experiment,benemery/wagtail,zerolab/wagtail,nealtodd/wagtail,KimGlazebrook/wagtail-experiment,jordij/wagtail,hamsterbacke23/wagtail,nimasmi/wagtail,zerolab/wagtail,100Shapes/wagtail,mjec/wagtail,kurtrwall/wagtail,kurtw/wagtail,marctc/wagtail,iho/wagtail,Toshakins/wagtail,iansprice/wagtail,torchbox/wagtail,Pennebaker/wagtail,tangentlabs/wagtail,mayapurmedia/wagtail,benjaoming/wagtail,mephizzle/wagtail,m-sanders/wagtail,gogobook/wagtail,rv816/wagtail,dresiu/wagtail,quru/wagtail,willcodefortea/wagtail,KimGlazebrook/wagtail-experiment,nutztherookie/wagtail,mephizzle/wagtail,marctc/wagtail,Klaudit/wagtail,mjec/wagtail,jorge-marques/wagtail,zerolab/wagtail,dresiu/wagtail,chimeno/wagtail,jordij/wagtail,Pennebaker/wagtail,Toshakins/wagtail,jordij/wagtail,chrxr/wagtail,rjsproxy/wagtail,hanpama/wagtail,davecranwell/wagtail,gasman/wagtail,thenewguy/wagtail,jnns/wagtail,dresiu/wagtail,Tivix/wagtail,chimeno/wagtail,inonit/wagtail,rjsproxy/wagtail,JoshBarr/wagtail,hamsterbacke23/wagtail,bjesus/wagtail,nilnvoid/wagtail,iho/wagtail,WQuanfeng/wagtail,chimeno/wagtail,FlipperPA/wagtail,janusnic/wagtail,kurtw/wagtail,stevenewey/wagtail,mixxorz/wagtail,benjaoming/wagtail,FlipperPA/wagtail,nrsimha/wagtail,mikedingjan/wagtail,tangentlabs/wagtail,KimGlazebrook/wagtail-experiment,takeshineshiro/wagtail,gogobook/wagtail,takeshineshiro/wagtail,nimasmi/wagtail,mayapurmedia/wagtail,kaedroho/wagtail,bjesus/wagtail,FlipperPA/wagtail,willcodefortea/wagtail,taedori81/wagtail,mayapurmedia/wagtail,wagtail/wagtail,willcodefortea/wagtail,hanpama/wagtail,jnns/wagtail,kurtrwall/wagtail,bjesus/wagtail,serzans/wagtail,serzans/wagtail,nilnvoid/wagtail,takeflight/wagtail,thenewguy/wagtail,quru/wagtail,Klaudit/wagtail,darith27/wagtail,quru/wagtail,nutztherookie/wagtail,zerolab/wagtail,nrsimha/wagtail,iho/wagtail,WQuanfeng/wagtail,wagtail/wagtail,janusnic/wagtail,rv816/wagtail,inonit/wagtail,WQuanfeng/wagtail,Tivix/wagtail,davecranwell/wagtail,mikedingjan/wagtail,benemery/wagtail,JoshBarr/wagtail,gasman/wagtail,davecranwell/wagtail,jorge-marques/wagtail,Tivix/wagtail,thenewguy/wagtail,darith27/wagtail,mayapurmedia/wagtail,iansprice/wagtail,kurtw/wagtail,m-sanders/wagtail,takeflight/wagtail,nrsimha/wagtail,nrsimha/wagtail,hanpama/wagtail,stevenewey/wagtail,chrxr/wagtail,jnns/wagtail,taedori81/wagtail,mjec/wagtail,mixxorz/wagtail,iansprice/wagtail,rsalmaso/wagtail,nealtodd/wagtail,mjec/wagtail,Pennebaker/wagtail,serzans/wagtail,Tivix/wagtail,nutztherookie/wagtail,rjsproxy/wagtail,wagtail/wagtail,benemery/wagtail,chimeno/wagtail,rv816/wagtail
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
return lambda fn: register(hook_name, fn)
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
Return the function again from the hook decorator
The decorator variant of hook registration did not return anything,
meaning that the decorated function would end up being `None`. This was
not noticed, as the functions are rarely called manually, as opposed to
being invoked via the hook.
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
<commit_before>from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
return lambda fn: register(hook_name, fn)
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
<commit_msg>Return the function again from the hook decorator
The decorator variant of hook registration did not return anything,
meaning that the decorated function would end up being `None`. This was
not noticed, as the functions are rarely called manually, as opposed to
being invoked via the hook.<commit_after>
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
return lambda fn: register(hook_name, fn)
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
Return the function again from the hook decorator
The decorator variant of hook registration did not return anything,
meaning that the decorated function would end up being `None`. This was
not noticed, as the functions are rarely called manually, as opposed to
being invoked via the hook.from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
<commit_before>from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
return lambda fn: register(hook_name, fn)
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
<commit_msg>Return the function again from the hook decorator
The decorator variant of hook registration did not return anything,
meaning that the decorated function would end up being `None`. This was
not noticed, as the functions are rarely called manually, as opposed to
being invoked via the hook.<commit_after>from django.conf import settings
try:
from importlib import import_module
except ImportError:
# for Python 2.6, fall back on django.utils.importlib (deprecated as of Django 1.7)
from django.utils.importlib import import_module
_hooks = {}
def register(hook_name, fn=None):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append(fn)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
for app_module in settings.INSTALLED_APPS:
try:
import_module('%s.wagtail_hooks' % app_module)
except ImportError:
continue
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
return _hooks.get(hook_name, [])
|
00bd680b7711d48b22043308871d91d560a69944
|
rabbitmq-req-rep-server.py
|
rabbitmq-req-rep-server.py
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
Add rabbitmq request reply server
|
Add rabbitmq request reply server
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add rabbitmq request reply server
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
<commit_before><commit_msg>Add rabbitmq request reply server<commit_after>
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
Add rabbitmq request reply server#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
<commit_before><commit_msg>Add rabbitmq request reply server<commit_after>#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
|
a60968a4f75067e56e32a55539b8fe2e90e67665
|
2013-nov-bronze/1-combo/py/main.py
|
2013-nov-bronze/1-combo/py/main.py
|
import itertools
def read_combo(file):
return [int(x) - 1 for x in file.readline().split()];
def permute(combo, size):
return [[x % size for x in permutation] for permutation in itertools.product(*[range(v - 2, v + 3) for v in combo])]
fin = open('combo.in', 'r')
size = int(fin.readline())
fout = open('combo.out', 'w')
fout.write(str(len(set(map(tuple, permute(read_combo(fin), size) + permute(read_combo(fin), size))))) + '\n')
fout.close()
fin.close()
|
Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1
|
Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1
|
Python
|
mit
|
hsun324/usaco-solutions,hsun324/usaco-solutions
|
Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1
|
import itertools
def read_combo(file):
return [int(x) - 1 for x in file.readline().split()];
def permute(combo, size):
return [[x % size for x in permutation] for permutation in itertools.product(*[range(v - 2, v + 3) for v in combo])]
fin = open('combo.in', 'r')
size = int(fin.readline())
fout = open('combo.out', 'w')
fout.write(str(len(set(map(tuple, permute(read_combo(fin), size) + permute(read_combo(fin), size))))) + '\n')
fout.close()
fin.close()
|
<commit_before><commit_msg>Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1<commit_after>
|
import itertools
def read_combo(file):
return [int(x) - 1 for x in file.readline().split()];
def permute(combo, size):
return [[x % size for x in permutation] for permutation in itertools.product(*[range(v - 2, v + 3) for v in combo])]
fin = open('combo.in', 'r')
size = int(fin.readline())
fout = open('combo.out', 'w')
fout.write(str(len(set(map(tuple, permute(read_combo(fin), size) + permute(read_combo(fin), size))))) + '\n')
fout.close()
fin.close()
|
Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1import itertools
def read_combo(file):
return [int(x) - 1 for x in file.readline().split()];
def permute(combo, size):
return [[x % size for x in permutation] for permutation in itertools.product(*[range(v - 2, v + 3) for v in combo])]
fin = open('combo.in', 'r')
size = int(fin.readline())
fout = open('combo.out', 'w')
fout.write(str(len(set(map(tuple, permute(read_combo(fin), size) + permute(read_combo(fin), size))))) + '\n')
fout.close()
fin.close()
|
<commit_before><commit_msg>Add Python 2 and 3 solution for Nov. 2013 Bronze Problem 1<commit_after>import itertools
def read_combo(file):
return [int(x) - 1 for x in file.readline().split()];
def permute(combo, size):
return [[x % size for x in permutation] for permutation in itertools.product(*[range(v - 2, v + 3) for v in combo])]
fin = open('combo.in', 'r')
size = int(fin.readline())
fout = open('combo.out', 'w')
fout.write(str(len(set(map(tuple, permute(read_combo(fin), size) + permute(read_combo(fin), size))))) + '\n')
fout.close()
fin.close()
|
|
6f19bb060f0cd906763cd5875227cdb6afb24c7b
|
tempest/tests/services/compute/test_availability_zone_client.py
|
tempest/tests/services/compute/test_availability_zone_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import availability_zone_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAvailabilityZoneClient(base.TestCase):
FAKE_AVAILABIRITY_ZONE_INFO = [
{
"zoneState": {
"available": True
},
"hosts": None,
"zoneName": u'\xf4'
}
]
def setUp(self):
super(TestAvailabilityZoneClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = availability_zone_client.AvailabilityZoneClient(
fake_auth, 'compute', 'regionOne')
def _test_list_availability_zones(self, bytes_body=False):
serialized_body = json.dumps({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_availability_zones()
self.assertEqual({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO}, resp)
def test_list_availability_zones_with_str_body(self):
self._test_list_availability_zones()
def test_list_availability_zones_with_bytes_body(self):
self._test_list_availability_zones(bytes_body=True)
|
Add unit test for availability_zone_client
|
Add unit test for availability_zone_client
This patch adds unit test for availability_zone_client.
Change-Id: I9f043f0cf864773cbd15c23f776487a729c09692
|
Python
|
apache-2.0
|
bigswitch/tempest,tonyli71/tempest,zsoltdudas/lis-tempest,Juniper/tempest,bigswitch/tempest,LIS/lis-tempest,vedujoshi/tempest,vedujoshi/tempest,flyingfish007/tempest,Tesora/tesora-tempest,izadorozhna/tempest,Tesora/tesora-tempest,rakeshmi/tempest,flyingfish007/tempest,pczerkas/tempest,pczerkas/tempest,tonyli71/tempest,cisco-openstack/tempest,sebrandon1/tempest,izadorozhna/tempest,openstack/tempest,Juniper/tempest,masayukig/tempest,LIS/lis-tempest,zsoltdudas/lis-tempest,openstack/tempest,xbezdick/tempest,sebrandon1/tempest,dkalashnik/tempest,rakeshmi/tempest,dkalashnik/tempest,xbezdick/tempest,masayukig/tempest,cisco-openstack/tempest
|
Add unit test for availability_zone_client
This patch adds unit test for availability_zone_client.
Change-Id: I9f043f0cf864773cbd15c23f776487a729c09692
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import availability_zone_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAvailabilityZoneClient(base.TestCase):
FAKE_AVAILABIRITY_ZONE_INFO = [
{
"zoneState": {
"available": True
},
"hosts": None,
"zoneName": u'\xf4'
}
]
def setUp(self):
super(TestAvailabilityZoneClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = availability_zone_client.AvailabilityZoneClient(
fake_auth, 'compute', 'regionOne')
def _test_list_availability_zones(self, bytes_body=False):
serialized_body = json.dumps({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_availability_zones()
self.assertEqual({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO}, resp)
def test_list_availability_zones_with_str_body(self):
self._test_list_availability_zones()
def test_list_availability_zones_with_bytes_body(self):
self._test_list_availability_zones(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for availability_zone_client
This patch adds unit test for availability_zone_client.
Change-Id: I9f043f0cf864773cbd15c23f776487a729c09692<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import availability_zone_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAvailabilityZoneClient(base.TestCase):
FAKE_AVAILABIRITY_ZONE_INFO = [
{
"zoneState": {
"available": True
},
"hosts": None,
"zoneName": u'\xf4'
}
]
def setUp(self):
super(TestAvailabilityZoneClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = availability_zone_client.AvailabilityZoneClient(
fake_auth, 'compute', 'regionOne')
def _test_list_availability_zones(self, bytes_body=False):
serialized_body = json.dumps({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_availability_zones()
self.assertEqual({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO}, resp)
def test_list_availability_zones_with_str_body(self):
self._test_list_availability_zones()
def test_list_availability_zones_with_bytes_body(self):
self._test_list_availability_zones(bytes_body=True)
|
Add unit test for availability_zone_client
This patch adds unit test for availability_zone_client.
Change-Id: I9f043f0cf864773cbd15c23f776487a729c09692# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import availability_zone_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAvailabilityZoneClient(base.TestCase):
FAKE_AVAILABIRITY_ZONE_INFO = [
{
"zoneState": {
"available": True
},
"hosts": None,
"zoneName": u'\xf4'
}
]
def setUp(self):
super(TestAvailabilityZoneClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = availability_zone_client.AvailabilityZoneClient(
fake_auth, 'compute', 'regionOne')
def _test_list_availability_zones(self, bytes_body=False):
serialized_body = json.dumps({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_availability_zones()
self.assertEqual({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO}, resp)
def test_list_availability_zones_with_str_body(self):
self._test_list_availability_zones()
def test_list_availability_zones_with_bytes_body(self):
self._test_list_availability_zones(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for availability_zone_client
This patch adds unit test for availability_zone_client.
Change-Id: I9f043f0cf864773cbd15c23f776487a729c09692<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import availability_zone_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestAvailabilityZoneClient(base.TestCase):
FAKE_AVAILABIRITY_ZONE_INFO = [
{
"zoneState": {
"available": True
},
"hosts": None,
"zoneName": u'\xf4'
}
]
def setUp(self):
super(TestAvailabilityZoneClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = availability_zone_client.AvailabilityZoneClient(
fake_auth, 'compute', 'regionOne')
def _test_list_availability_zones(self, bytes_body=False):
serialized_body = json.dumps({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_availability_zones()
self.assertEqual({
"availabilityZoneInfo": self.FAKE_AVAILABIRITY_ZONE_INFO}, resp)
def test_list_availability_zones_with_str_body(self):
self._test_list_availability_zones()
def test_list_availability_zones_with_bytes_body(self):
self._test_list_availability_zones(bytes_body=True)
|
|
467ba634991629affc3a53bef2e93ed02d7d1ae7
|
trypython/py38/fstring_debug.py
|
trypython/py38/fstring_debug.py
|
"""
Python 3.8 にて導入された f-string での {xxx=} 表記についてのサンプルです。
REFERENCES:: http://bit.ly/2NlJkSc
"""
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# f-string debugging specifier
#
# f-string 内にて {xxx=} と イコールを付与した場合に
# "xxx=値" を出力してくれるようになった。
#
# {xxx = } とすると、"xxx = 値"と表示してくれる
#
# フォーマット指示子も付与できる.
# {xxx = :>10}
# ------------------------------------------------------------
s1 = "hello"
s2 = "world"
s3 = "hoge"
print(f'{s1=}\t{s2 = }\t{s3=:>10}')
def go():
obj = Sample()
obj.exec()
|
Add Python 3.8 f-string debugging specifier.
|
Add Python 3.8 f-string debugging specifier.
|
Python
|
mit
|
devlights/try-python
|
Add Python 3.8 f-string debugging specifier.
|
"""
Python 3.8 にて導入された f-string での {xxx=} 表記についてのサンプルです。
REFERENCES:: http://bit.ly/2NlJkSc
"""
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# f-string debugging specifier
#
# f-string 内にて {xxx=} と イコールを付与した場合に
# "xxx=値" を出力してくれるようになった。
#
# {xxx = } とすると、"xxx = 値"と表示してくれる
#
# フォーマット指示子も付与できる.
# {xxx = :>10}
# ------------------------------------------------------------
s1 = "hello"
s2 = "world"
s3 = "hoge"
print(f'{s1=}\t{s2 = }\t{s3=:>10}')
def go():
obj = Sample()
obj.exec()
|
<commit_before><commit_msg>Add Python 3.8 f-string debugging specifier.<commit_after>
|
"""
Python 3.8 にて導入された f-string での {xxx=} 表記についてのサンプルです。
REFERENCES:: http://bit.ly/2NlJkSc
"""
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# f-string debugging specifier
#
# f-string 内にて {xxx=} と イコールを付与した場合に
# "xxx=値" を出力してくれるようになった。
#
# {xxx = } とすると、"xxx = 値"と表示してくれる
#
# フォーマット指示子も付与できる.
# {xxx = :>10}
# ------------------------------------------------------------
s1 = "hello"
s2 = "world"
s3 = "hoge"
print(f'{s1=}\t{s2 = }\t{s3=:>10}')
def go():
obj = Sample()
obj.exec()
|
Add Python 3.8 f-string debugging specifier."""
Python 3.8 にて導入された f-string での {xxx=} 表記についてのサンプルです。
REFERENCES:: http://bit.ly/2NlJkSc
"""
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# f-string debugging specifier
#
# f-string 内にて {xxx=} と イコールを付与した場合に
# "xxx=値" を出力してくれるようになった。
#
# {xxx = } とすると、"xxx = 値"と表示してくれる
#
# フォーマット指示子も付与できる.
# {xxx = :>10}
# ------------------------------------------------------------
s1 = "hello"
s2 = "world"
s3 = "hoge"
print(f'{s1=}\t{s2 = }\t{s3=:>10}')
def go():
obj = Sample()
obj.exec()
|
<commit_before><commit_msg>Add Python 3.8 f-string debugging specifier.<commit_after>"""
Python 3.8 にて導入された f-string での {xxx=} 表記についてのサンプルです。
REFERENCES:: http://bit.ly/2NlJkSc
"""
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# f-string debugging specifier
#
# f-string 内にて {xxx=} と イコールを付与した場合に
# "xxx=値" を出力してくれるようになった。
#
# {xxx = } とすると、"xxx = 値"と表示してくれる
#
# フォーマット指示子も付与できる.
# {xxx = :>10}
# ------------------------------------------------------------
s1 = "hello"
s2 = "world"
s3 = "hoge"
print(f'{s1=}\t{s2 = }\t{s3=:>10}')
def go():
obj = Sample()
obj.exec()
|
|
dcd053e0249b14c938d94eb749a8b4095c80be29
|
wqflask/utility/pillow_utils.py
|
wqflask/utility/pillow_utils.py
|
from PIL import Image, ImageColor, ImageDraw, ImageFont
import utility.logger
logger = utility.logger.getLogger(__name__ )
BLACK = ImageColor.getrgb("black")
# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90):
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
draw_text = ImageDraw.Draw(tmp_img)
draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/tmp/{}.png".format(text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
|
Create new utility module for drawing
|
Create new utility module for drawing
* wqflask/utility/pillow_utils.py: Create a module to hold some utility
functions for drawing with Pillow. Initialise the module with a function to draw
rotated text.
|
Python
|
agpl-3.0
|
zsloan/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2
|
Create new utility module for drawing
* wqflask/utility/pillow_utils.py: Create a module to hold some utility
functions for drawing with Pillow. Initialise the module with a function to draw
rotated text.
|
from PIL import Image, ImageColor, ImageDraw, ImageFont
import utility.logger
logger = utility.logger.getLogger(__name__ )
BLACK = ImageColor.getrgb("black")
# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90):
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
draw_text = ImageDraw.Draw(tmp_img)
draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/tmp/{}.png".format(text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
|
<commit_before><commit_msg>Create new utility module for drawing
* wqflask/utility/pillow_utils.py: Create a module to hold some utility
functions for drawing with Pillow. Initialise the module with a function to draw
rotated text.<commit_after>
|
from PIL import Image, ImageColor, ImageDraw, ImageFont
import utility.logger
logger = utility.logger.getLogger(__name__ )
BLACK = ImageColor.getrgb("black")
# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90):
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
draw_text = ImageDraw.Draw(tmp_img)
draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/tmp/{}.png".format(text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
|
Create new utility module for drawing
* wqflask/utility/pillow_utils.py: Create a module to hold some utility
functions for drawing with Pillow. Initialise the module with a function to draw
rotated text.from PIL import Image, ImageColor, ImageDraw, ImageFont
import utility.logger
logger = utility.logger.getLogger(__name__ )
BLACK = ImageColor.getrgb("black")
# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90):
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
draw_text = ImageDraw.Draw(tmp_img)
draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/tmp/{}.png".format(text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
|
<commit_before><commit_msg>Create new utility module for drawing
* wqflask/utility/pillow_utils.py: Create a module to hold some utility
functions for drawing with Pillow. Initialise the module with a function to draw
rotated text.<commit_after>from PIL import Image, ImageColor, ImageDraw, ImageFont
import utility.logger
logger = utility.logger.getLogger(__name__ )
BLACK = ImageColor.getrgb("black")
# def draw_rotated_text(canvas: Image, text: str, font: ImageFont, xy: tuple, fill: ImageColor=BLACK, angle: int=-90):
def draw_rotated_text(canvas, text, font, xy, fill=BLACK, angle=-90):
# type: (Image, str, ImageFont, tuple, ImageColor, int)
"""Utility function draw rotated text"""
tmp_img = Image.new("RGBA", font.getsize(text), color=(0,0,0,0))
draw_text = ImageDraw.Draw(tmp_img)
draw_text.text(text=text, xy=(0,0), font=font, fill=fill)
tmp_img2 = tmp_img.rotate(angle, expand=1)
tmp_img2.save("/tmp/{}.png".format(text), format="png")
canvas.paste(im=tmp_img2, box=tuple([int(i) for i in xy]))
|
|
bc8292286bc372a58c3dca70af179536bff7c67a
|
tests/views/test_provincial_legislatures_page.py
|
tests/views/test_provincial_legislatures_page.py
|
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
from pmg.models import House
from pmg.views import utils
class TestProvincialLegislaturesPages(PMGLiveServerTestCase):
def setUp(self):
super(TestProvincialLegislaturesPages, self).setUp()
self.fx = dbfixture.data(HouseData, CommitteeData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestProvincialLegislaturesPages, self).tearDown()
def test_provincial_legislatures_page(self):
"""
Test provincial legislatures page (/provincial-legislatures/)
"""
self.make_request(
"/provincial-legislatures/",
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
def test_provincial_legislature_page_for_province(self):
"""
Test provincial legislatures page (/provincial-legislatures/<province>)
"""
slug = utils.slugify_province(self.fx.HouseData.western_cape.name)
self.make_request(
"/provincial-legislatures/%s" % slug,
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
self.assertIn("Committees", self.html)
self.assertIn("Members", self.html)
|
Add test for provincial legislatures page
|
Add test for provincial legislatures page
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add test for provincial legislatures page
|
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
from pmg.models import House
from pmg.views import utils
class TestProvincialLegislaturesPages(PMGLiveServerTestCase):
def setUp(self):
super(TestProvincialLegislaturesPages, self).setUp()
self.fx = dbfixture.data(HouseData, CommitteeData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestProvincialLegislaturesPages, self).tearDown()
def test_provincial_legislatures_page(self):
"""
Test provincial legislatures page (/provincial-legislatures/)
"""
self.make_request(
"/provincial-legislatures/",
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
def test_provincial_legislature_page_for_province(self):
"""
Test provincial legislatures page (/provincial-legislatures/<province>)
"""
slug = utils.slugify_province(self.fx.HouseData.western_cape.name)
self.make_request(
"/provincial-legislatures/%s" % slug,
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
self.assertIn("Committees", self.html)
self.assertIn("Members", self.html)
|
<commit_before><commit_msg>Add test for provincial legislatures page<commit_after>
|
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
from pmg.models import House
from pmg.views import utils
class TestProvincialLegislaturesPages(PMGLiveServerTestCase):
def setUp(self):
super(TestProvincialLegislaturesPages, self).setUp()
self.fx = dbfixture.data(HouseData, CommitteeData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestProvincialLegislaturesPages, self).tearDown()
def test_provincial_legislatures_page(self):
"""
Test provincial legislatures page (/provincial-legislatures/)
"""
self.make_request(
"/provincial-legislatures/",
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
def test_provincial_legislature_page_for_province(self):
"""
Test provincial legislatures page (/provincial-legislatures/<province>)
"""
slug = utils.slugify_province(self.fx.HouseData.western_cape.name)
self.make_request(
"/provincial-legislatures/%s" % slug,
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
self.assertIn("Committees", self.html)
self.assertIn("Members", self.html)
|
Add test for provincial legislatures pagefrom tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
from pmg.models import House
from pmg.views import utils
class TestProvincialLegislaturesPages(PMGLiveServerTestCase):
def setUp(self):
super(TestProvincialLegislaturesPages, self).setUp()
self.fx = dbfixture.data(HouseData, CommitteeData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestProvincialLegislaturesPages, self).tearDown()
def test_provincial_legislatures_page(self):
"""
Test provincial legislatures page (/provincial-legislatures/)
"""
self.make_request(
"/provincial-legislatures/",
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
def test_provincial_legislature_page_for_province(self):
"""
Test provincial legislatures page (/provincial-legislatures/<province>)
"""
slug = utils.slugify_province(self.fx.HouseData.western_cape.name)
self.make_request(
"/provincial-legislatures/%s" % slug,
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
self.assertIn("Committees", self.html)
self.assertIn("Members", self.html)
|
<commit_before><commit_msg>Add test for provincial legislatures page<commit_after>from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
from pmg.models import House
from pmg.views import utils
class TestProvincialLegislaturesPages(PMGLiveServerTestCase):
def setUp(self):
super(TestProvincialLegislaturesPages, self).setUp()
self.fx = dbfixture.data(HouseData, CommitteeData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestProvincialLegislaturesPages, self).tearDown()
def test_provincial_legislatures_page(self):
"""
Test provincial legislatures page (/provincial-legislatures/)
"""
self.make_request(
"/provincial-legislatures/",
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
def test_provincial_legislature_page_for_province(self):
"""
Test provincial legislatures page (/provincial-legislatures/<province>)
"""
slug = utils.slugify_province(self.fx.HouseData.western_cape.name)
self.make_request(
"/provincial-legislatures/%s" % slug,
follow_redirects=True,
)
self.assertIn("Provincial Legislatures", self.html)
self.assertIn(self.fx.HouseData.western_cape.name, self.html)
self.assertIn("Committees", self.html)
self.assertIn("Members", self.html)
|
|
ae900a60b714e76b9c3d4310b3ed9120afa780fe
|
conflict_minerals_data/api/migrations/0003_auto_20170704_1055.py
|
conflict_minerals_data/api/migrations/0003_auto_20170704_1055.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 17:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170704_1028'),
]
operations = [
migrations.AlterModelOptions(
name='edgarcompanyinfo',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='edgardocumentcontent',
options={'verbose_name_plural': 'Document Content'},
),
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name_plural': 'SD Filings'},
),
migrations.AlterModelOptions(
name='edgarsdfilingdocument',
options={'verbose_name_plural': 'SD Filing Documents'},
),
migrations.AlterModelOptions(
name='edgarsearch',
options={'verbose_name_plural': 'Edgar Requests'},
),
]
|
Add migration for meta names
|
Add migration for meta names
|
Python
|
mit
|
MiningTheDisclosures/conflict-minerals-data,MiningTheDisclosures/conflict-minerals-data,MiningTheDisclosures/conflict-minerals-data,MiningTheDisclosures/conflict-minerals-data
|
Add migration for meta names
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 17:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170704_1028'),
]
operations = [
migrations.AlterModelOptions(
name='edgarcompanyinfo',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='edgardocumentcontent',
options={'verbose_name_plural': 'Document Content'},
),
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name_plural': 'SD Filings'},
),
migrations.AlterModelOptions(
name='edgarsdfilingdocument',
options={'verbose_name_plural': 'SD Filing Documents'},
),
migrations.AlterModelOptions(
name='edgarsearch',
options={'verbose_name_plural': 'Edgar Requests'},
),
]
|
<commit_before><commit_msg>Add migration for meta names<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 17:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170704_1028'),
]
operations = [
migrations.AlterModelOptions(
name='edgarcompanyinfo',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='edgardocumentcontent',
options={'verbose_name_plural': 'Document Content'},
),
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name_plural': 'SD Filings'},
),
migrations.AlterModelOptions(
name='edgarsdfilingdocument',
options={'verbose_name_plural': 'SD Filing Documents'},
),
migrations.AlterModelOptions(
name='edgarsearch',
options={'verbose_name_plural': 'Edgar Requests'},
),
]
|
Add migration for meta names# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 17:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170704_1028'),
]
operations = [
migrations.AlterModelOptions(
name='edgarcompanyinfo',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='edgardocumentcontent',
options={'verbose_name_plural': 'Document Content'},
),
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name_plural': 'SD Filings'},
),
migrations.AlterModelOptions(
name='edgarsdfilingdocument',
options={'verbose_name_plural': 'SD Filing Documents'},
),
migrations.AlterModelOptions(
name='edgarsearch',
options={'verbose_name_plural': 'Edgar Requests'},
),
]
|
<commit_before><commit_msg>Add migration for meta names<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-04 17:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170704_1028'),
]
operations = [
migrations.AlterModelOptions(
name='edgarcompanyinfo',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='edgardocumentcontent',
options={'verbose_name_plural': 'Document Content'},
),
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name_plural': 'SD Filings'},
),
migrations.AlterModelOptions(
name='edgarsdfilingdocument',
options={'verbose_name_plural': 'SD Filing Documents'},
),
migrations.AlterModelOptions(
name='edgarsearch',
options={'verbose_name_plural': 'Edgar Requests'},
),
]
|
|
28449fe72be77c9515610e9542ab24ebf6d8b311
|
osf/management/commands/count_preregistrations.py
|
osf/management/commands/count_preregistrations.py
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from osf.models import Registration, MetaSchema
PREREG_SCHEMA_NAMES = [
'Prereg Challenge',
'AsPredicted Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
]
class Command(BaseCommand):
"""Get a count of preregistrations, grouped by schema."""
def handle(self, *args, **options):
total = 0
for schema_name in PREREG_SCHEMA_NAMES:
metaschemas = MetaSchema.objects.filter(name=schema_name).only('id', 'schema_version')
for metaschema in metaschemas:
registrations = Registration.objects.filter(registered_schema=metaschema).get_roots()
count = registrations.count()
print('{} (Version {}): {}'.format(schema_name, metaschema.schema_version, count))
total += count
print('Total: {}'.format(total))
|
Add command to get number of preregistrations by schema
|
Add command to get number of preregistrations by schema
|
Python
|
apache-2.0
|
TomBaxter/osf.io,mattclark/osf.io,chrisseto/osf.io,caseyrollins/osf.io,leb2dg/osf.io,sloria/osf.io,brianjgeiger/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,crcresearch/osf.io,mattclark/osf.io,adlius/osf.io,erinspace/osf.io,felliott/osf.io,aaxelb/osf.io,crcresearch/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,leb2dg/osf.io,adlius/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,laurenrevere/osf.io,saradbowman/osf.io,felliott/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,pattisdr/osf.io,sloria/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,crcresearch/osf.io,pattisdr/osf.io,caneruguz/osf.io,mfraezz/osf.io,cslzchen/osf.io,cslzchen/osf.io,caneruguz/osf.io,icereval/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,cslzchen/osf.io,icereval/osf.io,baylee-d/osf.io,adlius/osf.io,mfraezz/osf.io,felliott/osf.io,chrisseto/osf.io,binoculars/osf.io,TomBaxter/osf.io,binoculars/osf.io,brianjgeiger/osf.io,sloria/osf.io,mattclark/osf.io,chennan47/osf.io,aaxelb/osf.io,laurenrevere/osf.io,aaxelb/osf.io,pattisdr/osf.io,chennan47/osf.io,erinspace/osf.io,brianjgeiger/osf.io,erinspace/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,chennan47/osf.io,caseyrollins/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,baylee-d/osf.io,mfraezz/osf.io,chrisseto/osf.io,caneruguz/osf.io
|
Add command to get number of preregistrations by schema
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from osf.models import Registration, MetaSchema
PREREG_SCHEMA_NAMES = [
'Prereg Challenge',
'AsPredicted Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
]
class Command(BaseCommand):
"""Get a count of preregistrations, grouped by schema."""
def handle(self, *args, **options):
total = 0
for schema_name in PREREG_SCHEMA_NAMES:
metaschemas = MetaSchema.objects.filter(name=schema_name).only('id', 'schema_version')
for metaschema in metaschemas:
registrations = Registration.objects.filter(registered_schema=metaschema).get_roots()
count = registrations.count()
print('{} (Version {}): {}'.format(schema_name, metaschema.schema_version, count))
total += count
print('Total: {}'.format(total))
|
<commit_before><commit_msg>Add command to get number of preregistrations by schema<commit_after>
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from osf.models import Registration, MetaSchema
PREREG_SCHEMA_NAMES = [
'Prereg Challenge',
'AsPredicted Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
]
class Command(BaseCommand):
"""Get a count of preregistrations, grouped by schema."""
def handle(self, *args, **options):
total = 0
for schema_name in PREREG_SCHEMA_NAMES:
metaschemas = MetaSchema.objects.filter(name=schema_name).only('id', 'schema_version')
for metaschema in metaschemas:
registrations = Registration.objects.filter(registered_schema=metaschema).get_roots()
count = registrations.count()
print('{} (Version {}): {}'.format(schema_name, metaschema.schema_version, count))
total += count
print('Total: {}'.format(total))
|
Add command to get number of preregistrations by schema# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from osf.models import Registration, MetaSchema
PREREG_SCHEMA_NAMES = [
'Prereg Challenge',
'AsPredicted Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
]
class Command(BaseCommand):
"""Get a count of preregistrations, grouped by schema."""
def handle(self, *args, **options):
total = 0
for schema_name in PREREG_SCHEMA_NAMES:
metaschemas = MetaSchema.objects.filter(name=schema_name).only('id', 'schema_version')
for metaschema in metaschemas:
registrations = Registration.objects.filter(registered_schema=metaschema).get_roots()
count = registrations.count()
print('{} (Version {}): {}'.format(schema_name, metaschema.schema_version, count))
total += count
print('Total: {}'.format(total))
|
<commit_before><commit_msg>Add command to get number of preregistrations by schema<commit_after># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from osf.models import Registration, MetaSchema
PREREG_SCHEMA_NAMES = [
'Prereg Challenge',
'AsPredicted Preregistration',
'OSF-Standard Pre-Data Collection Registration',
'Replication Recipe (Brandt et al., 2013): Pre-Registration',
"Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration",
'Election Research Preacceptance Competition',
]
class Command(BaseCommand):
"""Get a count of preregistrations, grouped by schema."""
def handle(self, *args, **options):
total = 0
for schema_name in PREREG_SCHEMA_NAMES:
metaschemas = MetaSchema.objects.filter(name=schema_name).only('id', 'schema_version')
for metaschema in metaschemas:
registrations = Registration.objects.filter(registered_schema=metaschema).get_roots()
count = registrations.count()
print('{} (Version {}): {}'.format(schema_name, metaschema.schema_version, count))
total += count
print('Total: {}'.format(total))
|
|
b93c75d710e75bf28bf3f007251725195ec7c945
|
notifications/migrations/0004_auto_20150826_1508.py
|
notifications/migrations/0004_auto_20150826_1508.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
Add missing migration for Notification model
|
Add missing migration for Notification model
|
Python
|
bsd-3-clause
|
alazaro/django-notifications,letolab/django-notifications,LegoStormtroopr/django-notifications,error0608/django-notifications,iberben/django-notifications,Evidlo/django-notifications,iberben/django-notifications,iberben/django-notifications,LegoStormtroopr/django-notifications,zhang-z/django-notifications,error0608/django-notifications,django-notifications/django-notifications,django-notifications/django-notifications,Evidlo/django-notifications,zhang-z/django-notifications,Evidlo/django-notifications,alazaro/django-notifications,django-notifications/django-notifications,jimlyndon/django-notifications,pebreo/django-notifications,letolab/django-notifications,alazaro/django-notifications,zhang-z/django-notifications,LegoStormtroopr/django-notifications,lukeburden/django-notifications,pebreo/django-notifications,lukeburden/django-notifications,letolab/django-notifications,jimlyndon/django-notifications,pebreo/django-notifications,error0608/django-notifications,lukeburden/django-notifications,jimlyndon/django-notifications
|
Add missing migration for Notification model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
<commit_before><commit_msg>Add missing migration for Notification model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
Add missing migration for Notification model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
<commit_before><commit_msg>Add missing migration for Notification model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
|
d7c07abbc50b536531a5f622a089690d5ff5faa3
|
examples/plot_digits_classification.py
|
examples/plot_digits_classification.py
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# The digits dataset
from scikits.learn import datasets
digits = datasets.load_digits()
# The data that we are interesting in is made of 8x8 images of digits,
# let's have a look at the first 3 images. We know which digit they
# represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_features = len(digits.images)
data = digits.images.reshape((n_features, -1))
# Import a classifier:
from scikits.learn import svm
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_features/2], digits.target[:n_features/2])
# Now predict the value of the digit on the second half:
predicted = classifier.predict(data[n_features/2:])
for index, (image, prediction) in enumerate(zip(
digits.images[n_features/2:],
predicted
)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
Add an example doing classification on digits.
|
ENH/DOC: Add an example doing classification on digits.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@669 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
Python
|
bsd-3-clause
|
AlexanderFabisch/scikit-learn,sarahgrogan/scikit-learn,gotomypc/scikit-learn,jpautom/scikit-learn,cainiaocome/scikit-learn,shyamalschandra/scikit-learn,gclenaghan/scikit-learn,chrsrds/scikit-learn,rahuldhote/scikit-learn,ldirer/scikit-learn,sinhrks/scikit-learn,hdmetor/scikit-learn,lucidfrontier45/scikit-learn,cainiaocome/scikit-learn,abimannans/scikit-learn,huzq/scikit-learn,mhue/scikit-learn,russel1237/scikit-learn,dsullivan7/scikit-learn,jlegendary/scikit-learn,aewhatley/scikit-learn,lucidfrontier45/scikit-learn,appapantula/scikit-learn,procoder317/scikit-learn,yonglehou/scikit-learn,carrillo/scikit-learn,eg-zhang/scikit-learn,mattgiguere/scikit-learn,scikit-learn/scikit-learn,anurag313/scikit-learn,xiaoxiamii/scikit-learn,Adai0808/scikit-learn,themrmax/scikit-learn,xzh86/scikit-learn,raghavrv/scikit-learn,frank-tancf/scikit-learn,sonnyhu/scikit-learn,LiaoPan/scikit-learn,UNR-AERIAL/scikit-learn,michigraber/scikit-learn,Akshay0724/scikit-learn,manhhomienbienthuy/scikit-learn,liberatorqjw/scikit-learn,ycaihua/scikit-learn,r-mart/scikit-learn,betatim/scikit-learn,petosegan/scikit-learn,mayblue9/scikit-learn,AlexanderFabisch/scikit-learn,Barmaley-exe/scikit-learn,manhhomienbienthuy/scikit-learn,jayflo/scikit-learn,andrewnc/scikit-learn,stylianos-kampakis/scikit-learn,UNR-AERIAL/scikit-learn,meduz/scikit-learn,ChanChiChoi/scikit-learn,costypetrisor/scikit-learn,rishikksh20/scikit-learn,rrohan/scikit-learn,scikit-learn/scikit-learn,nikitasingh981/scikit-learn,huobaowangxi/scikit-learn,macks22/scikit-learn,ZenDevelopmentSystems/scikit-learn,vermouthmjl/scikit-learn,potash/scikit-learn,akionakamura/scikit-learn,Fireblend/scikit-learn,aminert/scikit-learn,fyffyt/scikit-learn,xubenben/scikit-learn,yyjiang/scikit-learn,mehdidc/scikit-learn,Srisai85/scikit-learn,IssamLaradji/scikit-learn,mikebenfield/scikit-learn,xavierwu/scikit-learn,Obus/scikit-learn,ycaihua/scikit-learn,zihua/scikit-learn,ycaihua/scikit-learn,anurag313/scikit-learn,glouppe/scikit-learn,ndingwall/scikit-learn,0x0all/scikit-learn,deepesch/scikit-learn,ningchi/scikit-learn,espg/scikit-learn,ldirer/scikit-learn,fredhusser/scikit-learn,liberatorqjw/scikit-learn,meduz/scikit-learn,untom/scikit-learn,ElDeveloper/scikit-learn,Lawrence-Liu/scikit-learn,cl4rke/scikit-learn,lbishal/scikit-learn,ominux/scikit-learn,anntzer/scikit-learn,TomDLT/scikit-learn,abhishekkrthakur/scikit-learn,mehdidc/scikit-learn,mxjl620/scikit-learn,3manuek/scikit-learn,samuel1208/scikit-learn,JPFrancoia/scikit-learn,stylianos-kampakis/scikit-learn,sergeyf/scikit-learn,treycausey/scikit-learn,jlegendary/scikit-learn,devanshdalal/scikit-learn,Fireblend/scikit-learn,kaichogami/scikit-learn,ZENGXH/scikit-learn,jakobworldpeace/scikit-learn,Jimmy-Morzaria/scikit-learn,loli/sklearn-ensembletrees,0asa/scikit-learn,samzhang111/scikit-learn,ssaeger/scikit-learn,bhargav/scikit-learn,luo66/scikit-learn,eickenberg/scikit-learn,MartinDelzant/scikit-learn,AlexRobson/scikit-learn,MatthieuBizien/scikit-learn,mjgrav2001/scikit-learn,elkingtonmcb/scikit-learn,terkkila/scikit-learn,wlamond/scikit-learn,liangz0707/scikit-learn,thientu/scikit-learn,mrshu/scikit-learn,Nyker510/scikit-learn,ilo10/scikit-learn,hrjn/scikit-learn,lesteve/scikit-learn,robin-lai/scikit-learn,nvoron23/scikit-learn,h2educ/scikit-learn,jseabold/scikit-learn,abhishekkrthakur/scikit-learn,q1ang/scikit-learn,Djabbz/scikit-learn,jmetzen/scikit-learn,ssaeger/scikit-learn,xzh86/scikit-learn,tawsifkhan/scikit-learn,yanlend/scikit-learn,henridwyer/scikit-learn,xubenben/scikit-learn,shangwuhencc/scikit-learn,zuku1985/scikit-learn,Windy-Ground/scikit-learn,spallavolu/scikit-learn,fabianp/scikit-learn,pianomania/scikit-learn,victorbergelin/scikit-learn,ycaihua/scikit-learn,jjx02230808/project0223,rexshihaoren/scikit-learn,kagayakidan/scikit-learn,sanketloke/scikit-learn,vibhorag/scikit-learn,rvraghav93/scikit-learn,Nyker510/scikit-learn,jjx02230808/project0223,moutai/scikit-learn,devanshdalal/scikit-learn,evgchz/scikit-learn,abhishekgahlot/scikit-learn,voxlol/scikit-learn,3manuek/scikit-learn,aflaxman/scikit-learn,eg-zhang/scikit-learn,alvarofierroclavero/scikit-learn,cwu2011/scikit-learn,mattgiguere/scikit-learn,gclenaghan/scikit-learn,anirudhjayaraman/scikit-learn,iismd17/scikit-learn,fengzhyuan/scikit-learn,samzhang111/scikit-learn,icdishb/scikit-learn,jorge2703/scikit-learn,bnaul/scikit-learn,djgagne/scikit-learn,Vimos/scikit-learn,sarahgrogan/scikit-learn,AlexanderFabisch/scikit-learn,nmayorov/scikit-learn,justincassidy/scikit-learn,ominux/scikit-learn,kylerbrown/scikit-learn,adamgreenhall/scikit-learn,pianomania/scikit-learn,vinayak-mehta/scikit-learn,olologin/scikit-learn,petosegan/scikit-learn,jayflo/scikit-learn,arabenjamin/scikit-learn,nesterione/scikit-learn,ogrisel/scikit-learn,Achuth17/scikit-learn,roxyboy/scikit-learn,sanketloke/scikit-learn,rajat1994/scikit-learn,cl4rke/scikit-learn,jmschrei/scikit-learn,amueller/scikit-learn,zaxtax/scikit-learn,evgchz/scikit-learn,arahuja/scikit-learn,alexeyum/scikit-learn,mxjl620/scikit-learn,dingocuster/scikit-learn,davidgbe/scikit-learn,vigilv/scikit-learn,LohithBlaze/scikit-learn,wzbozon/scikit-learn,Aasmi/scikit-learn,zhenv5/scikit-learn,h2educ/scikit-learn,YinongLong/scikit-learn,jaidevd/scikit-learn,hlin117/scikit-learn,hrjn/scikit-learn,zorroblue/scikit-learn,davidgbe/scikit-learn,mikebenfield/scikit-learn,thilbern/scikit-learn,RachitKansal/scikit-learn,rsivapr/scikit-learn,theoryno3/scikit-learn,robin-lai/scikit-learn,DonBeo/scikit-learn,roxyboy/scikit-learn,Akshay0724/scikit-learn,cwu2011/scikit-learn,bnaul/scikit-learn,Windy-Ground/scikit-learn,fengzhyuan/scikit-learn,lazywei/scikit-learn,huzq/scikit-learn,DSLituiev/scikit-learn,qifeigit/scikit-learn,pnedunuri/scikit-learn,Djabbz/scikit-learn,toastedcornflakes/scikit-learn,nelson-liu/scikit-learn,simon-pepin/scikit-learn,pompiduskus/scikit-learn,RomainBrault/scikit-learn,mblondel/scikit-learn,Srisai85/scikit-learn,fbagirov/scikit-learn,zorroblue/scikit-learn,PatrickChrist/scikit-learn,siutanwong/scikit-learn,jakirkham/scikit-learn,herilalaina/scikit-learn,aflaxman/scikit-learn,andrewnc/scikit-learn,mwv/scikit-learn,JosmanPS/scikit-learn,rahul-c1/scikit-learn,dsquareindia/scikit-learn,Vimos/scikit-learn,murali-munna/scikit-learn,alexeyum/scikit-learn,dsquareindia/scikit-learn,ZENGXH/scikit-learn,xuewei4d/scikit-learn,phdowling/scikit-learn,BiaDarkia/scikit-learn,xuewei4d/scikit-learn,potash/scikit-learn,pypot/scikit-learn,mehdidc/scikit-learn,jlegendary/scikit-learn,mblondel/scikit-learn,arjoly/scikit-learn,akionakamura/scikit-learn,lenovor/scikit-learn,mfjb/scikit-learn,Myasuka/scikit-learn,hainm/scikit-learn,NunoEdgarGub1/scikit-learn,ZenDevelopmentSystems/scikit-learn,MechCoder/scikit-learn,hsiaoyi0504/scikit-learn,ilyes14/scikit-learn,nmayorov/scikit-learn,anurag313/scikit-learn,kevin-intel/scikit-learn,pypot/scikit-learn,xyguo/scikit-learn,bigdataelephants/scikit-learn,vibhorag/scikit-learn,ZenDevelopmentSystems/scikit-learn,loli/semisupervisedforests,scikit-learn/scikit-learn,mojoboss/scikit-learn,jpautom/scikit-learn,mblondel/scikit-learn,LohithBlaze/scikit-learn,icdishb/scikit-learn,appapantula/scikit-learn,ominux/scikit-learn,andaag/scikit-learn,loli/sklearn-ensembletrees,JosmanPS/scikit-learn,joernhees/scikit-learn,jzt5132/scikit-learn,mhue/scikit-learn,henridwyer/scikit-learn,Sentient07/scikit-learn,gclenaghan/scikit-learn,shahankhatch/scikit-learn,mattilyra/scikit-learn,kylerbrown/scikit-learn,thientu/scikit-learn,procoder317/scikit-learn,jorik041/scikit-learn,rishikksh20/scikit-learn,rahuldhote/scikit-learn,jseabold/scikit-learn,ClimbsRocks/scikit-learn,wzbozon/scikit-learn,manhhomienbienthuy/scikit-learn,ashhher3/scikit-learn,HolgerPeters/scikit-learn,roxyboy/scikit-learn,OshynSong/scikit-learn,ogrisel/scikit-learn,yanlend/scikit-learn,f3r/scikit-learn,vigilv/scikit-learn,huobaowangxi/scikit-learn,akionakamura/scikit-learn,trankmichael/scikit-learn,Lawrence-Liu/scikit-learn,bikong2/scikit-learn,adamgreenhall/scikit-learn,PrashntS/scikit-learn,Clyde-fare/scikit-learn,loli/semisupervisedforests,ephes/scikit-learn,r-mart/scikit-learn,treycausey/scikit-learn,depet/scikit-learn,dingocuster/scikit-learn,huzq/scikit-learn,espg/scikit-learn,glemaitre/scikit-learn,olologin/scikit-learn,rohanp/scikit-learn,voxlol/scikit-learn,fabioticconi/scikit-learn,jakobworldpeace/scikit-learn,cdegroc/scikit-learn,jorge2703/scikit-learn,shikhardb/scikit-learn,smartscheduling/scikit-learn-categorical-tree,rvraghav93/scikit-learn,qifeigit/scikit-learn,fyffyt/scikit-learn,mayblue9/scikit-learn,vermouthmjl/scikit-learn,TomDLT/scikit-learn,altairpearl/scikit-learn,fredhusser/scikit-learn,vinayak-mehta/scikit-learn,gotomypc/scikit-learn,heli522/scikit-learn,ephes/scikit-learn,Titan-C/scikit-learn,vshtanko/scikit-learn,kjung/scikit-learn,f3r/scikit-learn,aabadie/scikit-learn,fredhusser/scikit-learn,hugobowne/scikit-learn,evgchz/scikit-learn,nesterione/scikit-learn,clemkoa/scikit-learn,AlexandreAbraham/scikit-learn,PrashntS/scikit-learn,trungnt13/scikit-learn,mlyundin/scikit-learn,lazywei/scikit-learn,PrashntS/scikit-learn,maheshakya/scikit-learn,manashmndl/scikit-learn,samuel1208/scikit-learn,466152112/scikit-learn,akionakamura/scikit-learn,LohithBlaze/scikit-learn,tosolveit/scikit-learn,deepesch/scikit-learn,Obus/scikit-learn,jmschrei/scikit-learn,cybernet14/scikit-learn,amueller/scikit-learn,harshaneelhg/scikit-learn,rsivapr/scikit-learn,ankurankan/scikit-learn,waterponey/scikit-learn,nvoron23/scikit-learn,q1ang/scikit-learn,victorbergelin/scikit-learn,pypot/scikit-learn,luo66/scikit-learn,beepee14/scikit-learn,sumspr/scikit-learn,zorroblue/scikit-learn,herilalaina/scikit-learn,shenzebang/scikit-learn,Sentient07/scikit-learn,0x0all/scikit-learn,NelisVerhoef/scikit-learn,jblackburne/scikit-learn,anurag313/scikit-learn,ky822/scikit-learn,quheng/scikit-learn,henridwyer/scikit-learn,chrsrds/scikit-learn,robbymeals/scikit-learn,khkaminska/scikit-learn,cdegroc/scikit-learn,henridwyer/scikit-learn,equialgo/scikit-learn,procoder317/scikit-learn,ldirer/scikit-learn,IshankGulati/scikit-learn,jaidevd/scikit-learn,vshtanko/scikit-learn,Vimos/scikit-learn,0asa/scikit-learn,yunfeilu/scikit-learn,yanlend/scikit-learn,fzalkow/scikit-learn,hainm/scikit-learn,kjung/scikit-learn,pnedunuri/scikit-learn,RPGOne/scikit-learn,nvoron23/scikit-learn,idlead/scikit-learn,Adai0808/scikit-learn,zuku1985/scikit-learn,LiaoPan/scikit-learn,ahoyosid/scikit-learn,alexsavio/scikit-learn,lesteve/scikit-learn,CVML/scikit-learn,djgagne/scikit-learn,3manuek/scikit-learn,xyguo/scikit-learn,devanshdalal/scikit-learn,tdhopper/scikit-learn,gotomypc/scikit-learn,nelson-liu/scikit-learn,fzalkow/scikit-learn,raghavrv/scikit-learn,yunfeilu/scikit-learn,sonnyhu/scikit-learn,voxlol/scikit-learn,YinongLong/scikit-learn,michigraber/scikit-learn,RachitKansal/scikit-learn,jakirkham/scikit-learn,PatrickChrist/scikit-learn,TomDLT/scikit-learn,xyguo/scikit-learn,betatim/scikit-learn,hitszxp/scikit-learn,kjung/scikit-learn,cybernet14/scikit-learn,NelisVerhoef/scikit-learn,fabianp/scikit-learn,mayblue9/scikit-learn,PatrickOReilly/scikit-learn,ahoyosid/scikit-learn,zhenv5/scikit-learn,glemaitre/scikit-learn,abimannans/scikit-learn,pratapvardhan/scikit-learn,jakirkham/scikit-learn,DSLituiev/scikit-learn,nesterione/scikit-learn,evgchz/scikit-learn,ngoix/OCRF,bikong2/scikit-learn,pompiduskus/scikit-learn,liberatorqjw/scikit-learn,YinongLong/scikit-learn,arahuja/scikit-learn,Windy-Ground/scikit-learn,DonBeo/scikit-learn,marcocaccin/scikit-learn,rrohan/scikit-learn,gotomypc/scikit-learn,billy-inn/scikit-learn,JosmanPS/scikit-learn,mugizico/scikit-learn,rahul-c1/scikit-learn,walterreade/scikit-learn,eickenberg/scikit-learn,RomainBrault/scikit-learn,vinayak-mehta/scikit-learn,jlegendary/scikit-learn,kaichogami/scikit-learn,ZenDevelopmentSystems/scikit-learn,zhenv5/scikit-learn,xavierwu/scikit-learn,JeanKossaifi/scikit-learn,xavierwu/scikit-learn,phdowling/scikit-learn,florian-f/sklearn,trungnt13/scikit-learn,ominux/scikit-learn,simon-pepin/scikit-learn,MohammedWasim/scikit-learn,clemkoa/scikit-learn,hlin117/scikit-learn,hrjn/scikit-learn,joshloyal/scikit-learn,Achuth17/scikit-learn,simon-pepin/scikit-learn,MohammedWasim/scikit-learn,zorojean/scikit-learn,heli522/scikit-learn,cainiaocome/scikit-learn,shusenl/scikit-learn,jblackburne/scikit-learn,walterreade/scikit-learn,raghavrv/scikit-learn,justincassidy/scikit-learn,kmike/scikit-learn,ngoix/OCRF,ishanic/scikit-learn,jpautom/scikit-learn,joshloyal/scikit-learn,madjelan/scikit-learn,gclenaghan/scikit-learn,pkruskal/scikit-learn,Myasuka/scikit-learn,vortex-ape/scikit-learn,mjudsp/Tsallis,harshaneelhg/scikit-learn,fengzhyuan/scikit-learn,OshynSong/scikit-learn,zorojean/scikit-learn,saiwing-yeung/scikit-learn,qifeigit/scikit-learn,rajat1994/scikit-learn,YinongLong/scikit-learn,RachitKansal/scikit-learn,mattilyra/scikit-learn,lesteve/scikit-learn,vybstat/scikit-learn,khkaminska/scikit-learn,nikitasingh981/scikit-learn,toastedcornflakes/scikit-learn,chrisburr/scikit-learn,0asa/scikit-learn,deepesch/scikit-learn,belltailjp/scikit-learn,PatrickOReilly/scikit-learn,kjung/scikit-learn,bigdataelephants/scikit-learn,cl4rke/scikit-learn,lin-credible/scikit-learn,fyffyt/scikit-learn,olologin/scikit-learn,kashif/scikit-learn,madjelan/scikit-learn,mjgrav2001/scikit-learn,heli522/scikit-learn,dsquareindia/scikit-learn,anirudhjayaraman/scikit-learn,AlexRobson/scikit-learn,kaichogami/scikit-learn,fabianp/scikit-learn,mattilyra/scikit-learn,RPGOne/scikit-learn,harshaneelhg/scikit-learn,0x0all/scikit-learn,andrewnc/scikit-learn,raghavrv/scikit-learn,etkirsch/scikit-learn,shahankhatch/scikit-learn,ChanderG/scikit-learn,michigraber/scikit-learn,jorik041/scikit-learn,B3AU/waveTree,CVML/scikit-learn,jmetzen/scikit-learn,larsmans/scikit-learn,djgagne/scikit-learn,NunoEdgarGub1/scikit-learn,andaag/scikit-learn,sergeyf/scikit-learn,carrillo/scikit-learn,PatrickChrist/scikit-learn,abhishekgahlot/scikit-learn,pnedunuri/scikit-learn,Akshay0724/scikit-learn,russel1237/scikit-learn,maheshakya/scikit-learn,vivekmishra1991/scikit-learn,ashhher3/scikit-learn,ilo10/scikit-learn,hdmetor/scikit-learn,Fireblend/scikit-learn,cauchycui/scikit-learn,untom/scikit-learn,betatim/scikit-learn,elkingtonmcb/scikit-learn,jm-begon/scikit-learn,Srisai85/scikit-learn,nhejazi/scikit-learn,ilyes14/scikit-learn,aflaxman/scikit-learn,ltiao/scikit-learn,aetilley/scikit-learn,mrshu/scikit-learn,rahuldhote/scikit-learn,wanggang3333/scikit-learn,heli522/scikit-learn,alvarofierroclavero/scikit-learn,mayblue9/scikit-learn,djgagne/scikit-learn,ogrisel/scikit-learn,thilbern/scikit-learn,mehdidc/scikit-learn,aewhatley/scikit-learn,sumspr/scikit-learn,icdishb/scikit-learn,r-mart/scikit-learn,aabadie/scikit-learn,ephes/scikit-learn,frank-tancf/scikit-learn,bhargav/scikit-learn,mxjl620/scikit-learn,clemkoa/scikit-learn,smartscheduling/scikit-learn-categorical-tree,Garrett-R/scikit-learn,jereze/scikit-learn,anntzer/scikit-learn,roxyboy/scikit-learn,sonnyhu/scikit-learn,joshloyal/scikit-learn,russel1237/scikit-learn,pkruskal/scikit-learn,thientu/scikit-learn,ndingwall/scikit-learn,B3AU/waveTree,yanlend/scikit-learn,ky822/scikit-learn,dhruv13J/scikit-learn,vshtanko/scikit-learn,mikebenfield/scikit-learn,voxlol/scikit-learn,mxjl620/scikit-learn,pratapvardhan/scikit-learn,ChanChiChoi/scikit-learn,spallavolu/scikit-learn,tmhm/scikit-learn,espg/scikit-learn,khkaminska/scikit-learn,liyu1990/sklearn,kylerbrown/scikit-learn,etkirsch/scikit-learn,pythonvietnam/scikit-learn,jkarnows/scikit-learn,simon-pepin/scikit-learn,aabadie/scikit-learn,ky822/scikit-learn,AIML/scikit-learn,cybernet14/scikit-learn,rohanp/scikit-learn,ngoix/OCRF,bnaul/scikit-learn,hainm/scikit-learn,altairpearl/scikit-learn,zihua/scikit-learn,ChanChiChoi/scikit-learn,IshankGulati/scikit-learn,AnasGhrab/scikit-learn,belltailjp/scikit-learn,IshankGulati/scikit-learn,eickenberg/scikit-learn,pianomania/scikit-learn,aetilley/scikit-learn,hitszxp/scikit-learn,zaxtax/scikit-learn,moutai/scikit-learn,idlead/scikit-learn,vybstat/scikit-learn,fabianp/scikit-learn,f3r/scikit-learn,Lawrence-Liu/scikit-learn,appapantula/scikit-learn,shikhardb/scikit-learn,liyu1990/sklearn,jmetzen/scikit-learn,shyamalschandra/scikit-learn,sarahgrogan/scikit-learn,rsivapr/scikit-learn,jorik041/scikit-learn,mrshu/scikit-learn,rohanp/scikit-learn,Barmaley-exe/scikit-learn,vermouthmjl/scikit-learn,nmayorov/scikit-learn,jjx02230808/project0223,kylerbrown/scikit-learn,bigdataelephants/scikit-learn,murali-munna/scikit-learn,iismd17/scikit-learn,abimannans/scikit-learn,Garrett-R/scikit-learn,AIML/scikit-learn,466152112/scikit-learn,shusenl/scikit-learn,untom/scikit-learn,nomadcube/scikit-learn,mattilyra/scikit-learn,clemkoa/scikit-learn,CforED/Machine-Learning,lazywei/scikit-learn,h2educ/scikit-learn,arjoly/scikit-learn,devanshdalal/scikit-learn,cybernet14/scikit-learn,DSLituiev/scikit-learn,ivannz/scikit-learn,jblackburne/scikit-learn,iismd17/scikit-learn,aflaxman/scikit-learn,jmschrei/scikit-learn,mhdella/scikit-learn,nvoron23/scikit-learn,siutanwong/scikit-learn,JsNoNo/scikit-learn,jseabold/scikit-learn,moutai/scikit-learn,pythonvietnam/scikit-learn,sgenoud/scikit-learn,themrmax/scikit-learn,mhdella/scikit-learn,RachitKansal/scikit-learn,qifeigit/scikit-learn,xiaoxiamii/scikit-learn,schets/scikit-learn,equialgo/scikit-learn,lazywei/scikit-learn,billy-inn/scikit-learn,btabibian/scikit-learn,jm-begon/scikit-learn,samzhang111/scikit-learn,liangz0707/scikit-learn,MatthieuBizien/scikit-learn,glouppe/scikit-learn,ngoix/OCRF,robbymeals/scikit-learn,vermouthmjl/scikit-learn,kagayakidan/scikit-learn,dsullivan7/scikit-learn,imaculate/scikit-learn,petosegan/scikit-learn,wanggang3333/scikit-learn,anirudhjayaraman/scikit-learn,tdhopper/scikit-learn,rvraghav93/scikit-learn,nikitasingh981/scikit-learn,sgenoud/scikit-learn,AlexRobson/scikit-learn,alexsavio/scikit-learn,mugizico/scikit-learn,hsiaoyi0504/scikit-learn,vigilv/scikit-learn,RPGOne/scikit-learn,sgenoud/scikit-learn,Clyde-fare/scikit-learn,nrhine1/scikit-learn,spallavolu/scikit-learn,hitszxp/scikit-learn,ningchi/scikit-learn,stylianos-kampakis/scikit-learn,vinayak-mehta/scikit-learn,jayflo/scikit-learn,mjudsp/Tsallis,costypetrisor/scikit-learn,mrshu/scikit-learn,themrmax/scikit-learn,davidgbe/scikit-learn,ndingwall/scikit-learn,cainiaocome/scikit-learn,fbagirov/scikit-learn,pratapvardhan/scikit-learn,dingocuster/scikit-learn,yyjiang/scikit-learn,trankmichael/scikit-learn,B3AU/waveTree,loli/sklearn-ensembletrees,vivekmishra1991/scikit-learn,Windy-Ground/scikit-learn,jzt5132/scikit-learn,zhenv5/scikit-learn,vortex-ape/scikit-learn,aminert/scikit-learn,appapantula/scikit-learn,kmike/scikit-learn,kmike/scikit-learn,krez13/scikit-learn,tmhm/scikit-learn,Myasuka/scikit-learn,robbymeals/scikit-learn,zihua/scikit-learn,0x0all/scikit-learn,ishanic/scikit-learn,Barmaley-exe/scikit-learn,smartscheduling/scikit-learn-categorical-tree,alvarofierroclavero/scikit-learn,plissonf/scikit-learn,zorojean/scikit-learn,AlexandreAbraham/scikit-learn,trankmichael/scikit-learn,hsiaoyi0504/scikit-learn,rahul-c1/scikit-learn,shangwuhencc/scikit-learn,sinhrks/scikit-learn,Sentient07/scikit-learn,equialgo/scikit-learn,Garrett-R/scikit-learn,ElDeveloper/scikit-learn,bikong2/scikit-learn,tmhm/scikit-learn,glemaitre/scikit-learn,aetilley/scikit-learn,etkirsch/scikit-learn,mhdella/scikit-learn,rsivapr/scikit-learn,mattilyra/scikit-learn,JPFrancoia/scikit-learn,h2educ/scikit-learn,joernhees/scikit-learn,Nyker510/scikit-learn,imaculate/scikit-learn,DonBeo/scikit-learn,massmutual/scikit-learn,jm-begon/scikit-learn,kevin-intel/scikit-learn,rohanp/scikit-learn,Titan-C/scikit-learn,JeanKossaifi/scikit-learn,AlexanderFabisch/scikit-learn,robin-lai/scikit-learn,aminert/scikit-learn,mjgrav2001/scikit-learn,bigdataelephants/scikit-learn,adamgreenhall/scikit-learn,LohithBlaze/scikit-learn,manashmndl/scikit-learn,robin-lai/scikit-learn,eg-zhang/scikit-learn,fengzhyuan/scikit-learn,kashif/scikit-learn,yyjiang/scikit-learn,murali-munna/scikit-learn,rexshihaoren/scikit-learn,pompiduskus/scikit-learn,TomDLT/scikit-learn,florian-f/sklearn,chrisburr/scikit-learn,depet/scikit-learn,ltiao/scikit-learn,vivekmishra1991/scikit-learn,chrisburr/scikit-learn,toastedcornflakes/scikit-learn,plissonf/scikit-learn,iismd17/scikit-learn,tdhopper/scikit-learn,justincassidy/scikit-learn,macks22/scikit-learn,ashhher3/scikit-learn,kashif/scikit-learn,JsNoNo/scikit-learn,ankurankan/scikit-learn,fabioticconi/scikit-learn,joshloyal/scikit-learn,larsmans/scikit-learn,NelisVerhoef/scikit-learn,hugobowne/scikit-learn,mwv/scikit-learn,glennq/scikit-learn,lenovor/scikit-learn,jkarnows/scikit-learn,wanggang3333/scikit-learn,vigilv/scikit-learn,Jimmy-Morzaria/scikit-learn,Achuth17/scikit-learn,kaichogami/scikit-learn,dingocuster/scikit-learn,krez13/scikit-learn,rishikksh20/scikit-learn,cauchycui/scikit-learn,betatim/scikit-learn,Aasmi/scikit-learn,samuel1208/scikit-learn,xuewei4d/scikit-learn,evgchz/scikit-learn,harshaneelhg/scikit-learn,Myasuka/scikit-learn,mjudsp/Tsallis,AnasGhrab/scikit-learn,chrsrds/scikit-learn,jpautom/scikit-learn,PatrickOReilly/scikit-learn,Jimmy-Morzaria/scikit-learn,liangz0707/scikit-learn,bthirion/scikit-learn,Lawrence-Liu/scikit-learn,waterponey/scikit-learn,abhishekkrthakur/scikit-learn,BiaDarkia/scikit-learn,q1ang/scikit-learn,terkkila/scikit-learn,thilbern/scikit-learn,0x0all/scikit-learn,xavierwu/scikit-learn,glennq/scikit-learn,poryfly/scikit-learn,elkingtonmcb/scikit-learn,CforED/Machine-Learning,ivannz/scikit-learn,ndingwall/scikit-learn,tomlof/scikit-learn,idlead/scikit-learn,beepee14/scikit-learn,mfjb/scikit-learn,jjx02230808/project0223,NelisVerhoef/scikit-learn,cwu2011/scikit-learn,LiaoPan/scikit-learn,ltiao/scikit-learn,terkkila/scikit-learn,nomadcube/scikit-learn,alvarofierroclavero/scikit-learn,cdegroc/scikit-learn,samuel1208/scikit-learn,Adai0808/scikit-learn,macks22/scikit-learn,3manuek/scikit-learn,madjelan/scikit-learn,RayMick/scikit-learn,giorgiop/scikit-learn,ElDeveloper/scikit-learn,schets/scikit-learn,fbagirov/scikit-learn,abhishekgahlot/scikit-learn,ChanChiChoi/scikit-learn,schets/scikit-learn,russel1237/scikit-learn,pv/scikit-learn,ashhher3/scikit-learn,poryfly/scikit-learn,ky822/scikit-learn,khkaminska/scikit-learn,icdishb/scikit-learn,herilalaina/scikit-learn,mhdella/scikit-learn,sarahgrogan/scikit-learn,nrhine1/scikit-learn,ilyes14/scikit-learn,massmutual/scikit-learn,shikhardb/scikit-learn,loli/sklearn-ensembletrees,jkarnows/scikit-learn,Djabbz/scikit-learn,IndraVikas/scikit-learn,xwolf12/scikit-learn,hsiaoyi0504/scikit-learn,arjoly/scikit-learn,glouppe/scikit-learn,zuku1985/scikit-learn,HolgerPeters/scikit-learn,ankurankan/scikit-learn,RomainBrault/scikit-learn,MartinDelzant/scikit-learn,anntzer/scikit-learn,aetilley/scikit-learn,shenzebang/scikit-learn,luo66/scikit-learn,Aasmi/scikit-learn,ivannz/scikit-learn,depet/scikit-learn,aabadie/scikit-learn,Barmaley-exe/scikit-learn,kevin-intel/scikit-learn,vibhorag/scikit-learn,PatrickChrist/scikit-learn,quheng/scikit-learn,ankurankan/scikit-learn,jaidevd/scikit-learn,yonglehou/scikit-learn,loli/sklearn-ensembletrees,ishanic/scikit-learn,DonBeo/scikit-learn,henrykironde/scikit-learn,CVML/scikit-learn,nhejazi/scikit-learn,ngoix/OCRF,466152112/scikit-learn,hugobowne/scikit-learn,yonglehou/scikit-learn,mwv/scikit-learn,wazeerzulfikar/scikit-learn,yyjiang/scikit-learn,xwolf12/scikit-learn,poryfly/scikit-learn,meduz/scikit-learn,btabibian/scikit-learn,eickenberg/scikit-learn,michigraber/scikit-learn,mjgrav2001/scikit-learn,bthirion/scikit-learn,olologin/scikit-learn,MechCoder/scikit-learn,elkingtonmcb/scikit-learn,RayMick/scikit-learn,jakirkham/scikit-learn,costypetrisor/scikit-learn,wanggang3333/scikit-learn,ahoyosid/scikit-learn,ssaeger/scikit-learn,jkarnows/scikit-learn,bikong2/scikit-learn,tawsifkhan/scikit-learn,huobaowangxi/scikit-learn,wlamond/scikit-learn,0asa/scikit-learn,etkirsch/scikit-learn,maheshakya/scikit-learn,jakobworldpeace/scikit-learn,mlyundin/scikit-learn,hitszxp/scikit-learn,fabioticconi/scikit-learn,CforED/Machine-Learning,JsNoNo/scikit-learn,trungnt13/scikit-learn,kagayakidan/scikit-learn,jereze/scikit-learn,hitszxp/scikit-learn,jmschrei/scikit-learn,IshankGulati/scikit-learn,rexshihaoren/scikit-learn,stylianos-kampakis/scikit-learn,MartinSavc/scikit-learn,henrykironde/scikit-learn,lenovor/scikit-learn,lbishal/scikit-learn,lbishal/scikit-learn,AnasGhrab/scikit-learn,mugizico/scikit-learn,nomadcube/scikit-learn,ldirer/scikit-learn,r-mart/scikit-learn,ilyes14/scikit-learn,eickenberg/scikit-learn,tomlof/scikit-learn,AIML/scikit-learn,themrmax/scikit-learn,RPGOne/scikit-learn,krez13/scikit-learn,shusenl/scikit-learn,frank-tancf/scikit-learn,deepesch/scikit-learn,tomlof/scikit-learn,marcocaccin/scikit-learn,abhishekgahlot/scikit-learn,aminert/scikit-learn,davidgbe/scikit-learn,fabioticconi/scikit-learn,rishikksh20/scikit-learn,lucidfrontier45/scikit-learn,OshynSong/scikit-learn,lin-credible/scikit-learn,cl4rke/scikit-learn,0asa/scikit-learn,pratapvardhan/scikit-learn,manhhomienbienthuy/scikit-learn,ahoyosid/scikit-learn,joernhees/scikit-learn,UNR-AERIAL/scikit-learn,arahuja/scikit-learn,huobaowangxi/scikit-learn,hdmetor/scikit-learn,pypot/scikit-learn,pkruskal/scikit-learn,chrsrds/scikit-learn,JosmanPS/scikit-learn,bhargav/scikit-learn,bnaul/scikit-learn,rajat1994/scikit-learn,meduz/scikit-learn,jorge2703/scikit-learn,pv/scikit-learn,wzbozon/scikit-learn,giorgiop/scikit-learn,bhargav/scikit-learn,wlamond/scikit-learn,smartscheduling/scikit-learn-categorical-tree,samzhang111/scikit-learn,zaxtax/scikit-learn,hugobowne/scikit-learn,yonglehou/scikit-learn,Clyde-fare/scikit-learn,saiwing-yeung/scikit-learn,liberatorqjw/scikit-learn,NunoEdgarGub1/scikit-learn,ChanderG/scikit-learn,lucidfrontier45/scikit-learn,poryfly/scikit-learn,giorgiop/scikit-learn,shyamalschandra/scikit-learn,nelson-liu/scikit-learn,shangwuhencc/scikit-learn,xzh86/scikit-learn,AIML/scikit-learn,alexsavio/scikit-learn,Akshay0724/scikit-learn,liyu1990/sklearn,mfjb/scikit-learn,glennq/scikit-learn,Obus/scikit-learn,ycaihua/scikit-learn,mjudsp/Tsallis,MartinDelzant/scikit-learn,rrohan/scikit-learn,tomlof/scikit-learn,moutai/scikit-learn,BiaDarkia/scikit-learn,amueller/scikit-learn,rrohan/scikit-learn,vshtanko/scikit-learn,fbagirov/scikit-learn,carrillo/scikit-learn,billy-inn/scikit-learn,RomainBrault/scikit-learn,Garrett-R/scikit-learn,MatthieuBizien/scikit-learn,OshynSong/scikit-learn,hsuantien/scikit-learn,tdhopper/scikit-learn,wazeerzulfikar/scikit-learn,mrshu/scikit-learn,theoryno3/scikit-learn,JsNoNo/scikit-learn,LiaoPan/scikit-learn,alexeyum/scikit-learn,treycausey/scikit-learn,phdowling/scikit-learn,jorik041/scikit-learn,zihua/scikit-learn,pnedunuri/scikit-learn,victorbergelin/scikit-learn,AlexRobson/scikit-learn,amueller/scikit-learn,ivannz/scikit-learn,abhishekgahlot/scikit-learn,jm-begon/scikit-learn,ClimbsRocks/scikit-learn,xuewei4d/scikit-learn,marcocaccin/scikit-learn,Sentient07/scikit-learn,nhejazi/scikit-learn,CforED/Machine-Learning,ogrisel/scikit-learn,madjelan/scikit-learn,ClimbsRocks/scikit-learn,thientu/scikit-learn,waterponey/scikit-learn,ChanderG/scikit-learn,ningchi/scikit-learn,fzalkow/scikit-learn,xzh86/scikit-learn,walterreade/scikit-learn,larsmans/scikit-learn,JeanKossaifi/scikit-learn,hainm/scikit-learn,ngoix/OCRF,pianomania/scikit-learn,beepee14/scikit-learn,ssaeger/scikit-learn,jaidevd/scikit-learn,belltailjp/scikit-learn,MatthieuBizien/scikit-learn,shusenl/scikit-learn,pv/scikit-learn,rsivapr/scikit-learn,shahankhatch/scikit-learn,fyffyt/scikit-learn,maheshakya/scikit-learn,nhejazi/scikit-learn,quheng/scikit-learn,depet/scikit-learn,andaag/scikit-learn,MartinSavc/scikit-learn,maheshakya/scikit-learn,IndraVikas/scikit-learn,dsquareindia/scikit-learn,mhue/scikit-learn,pythonvietnam/scikit-learn,JPFrancoia/scikit-learn,alexsavio/scikit-learn,frank-tancf/scikit-learn,lbishal/scikit-learn,Nyker510/scikit-learn,cauchycui/scikit-learn,equialgo/scikit-learn,glouppe/scikit-learn,HolgerPeters/scikit-learn,florian-f/sklearn,ChanderG/scikit-learn,treycausey/scikit-learn,siutanwong/scikit-learn,tmhm/scikit-learn,jorge2703/scikit-learn,nelson-liu/scikit-learn,zuku1985/scikit-learn,nesterione/scikit-learn,sinhrks/scikit-learn,B3AU/waveTree,PrashntS/scikit-learn,wazeerzulfikar/scikit-learn,espg/scikit-learn,jseabold/scikit-learn,mfjb/scikit-learn,untom/scikit-learn,phdowling/scikit-learn,BiaDarkia/scikit-learn,jereze/scikit-learn,altairpearl/scikit-learn,saiwing-yeung/scikit-learn,Fireblend/scikit-learn,kmike/scikit-learn,chrisburr/scikit-learn,xwolf12/scikit-learn,jzt5132/scikit-learn,shyamalschandra/scikit-learn,vibhorag/scikit-learn,walterreade/scikit-learn,sinhrks/scikit-learn,billy-inn/scikit-learn,UNR-AERIAL/scikit-learn,RayMick/scikit-learn,tosolveit/scikit-learn,sanketloke/scikit-learn,AlexandreAbraham/scikit-learn,ClimbsRocks/scikit-learn,sonnyhu/scikit-learn,AnasGhrab/scikit-learn,mattgiguere/scikit-learn,Aasmi/scikit-learn,manashmndl/scikit-learn,466152112/scikit-learn,hsuantien/scikit-learn,procoder317/scikit-learn,Jimmy-Morzaria/scikit-learn,shangwuhencc/scikit-learn,wlamond/scikit-learn,MechCoder/scikit-learn,rajat1994/scikit-learn,adamgreenhall/scikit-learn,sumspr/scikit-learn,mojoboss/scikit-learn,henrykironde/scikit-learn,sgenoud/scikit-learn,Garrett-R/scikit-learn,xiaoxiamii/scikit-learn,massmutual/scikit-learn,arabenjamin/scikit-learn,larsmans/scikit-learn,macks22/scikit-learn,toastedcornflakes/scikit-learn,loli/semisupervisedforests,glennq/scikit-learn,btabibian/scikit-learn,liangz0707/scikit-learn,rahul-c1/scikit-learn,mikebenfield/scikit-learn,mojoboss/scikit-learn,tawsifkhan/scikit-learn,dhruv13J/scikit-learn,vybstat/scikit-learn,anirudhjayaraman/scikit-learn,yask123/scikit-learn,shahankhatch/scikit-learn,IssamLaradji/scikit-learn,pompiduskus/scikit-learn,larsmans/scikit-learn,idlead/scikit-learn,JPFrancoia/scikit-learn,zorroblue/scikit-learn,mhue/scikit-learn,dhruv13J/scikit-learn,cauchycui/scikit-learn,bthirion/scikit-learn,CVML/scikit-learn,ankurankan/scikit-learn,shikhardb/scikit-learn,jereze/scikit-learn,hsuantien/scikit-learn,costypetrisor/scikit-learn,schets/scikit-learn,dhruv13J/scikit-learn,imaculate/scikit-learn,kashif/scikit-learn,theoryno3/scikit-learn,wzbozon/scikit-learn,eg-zhang/scikit-learn,kagayakidan/scikit-learn,ningchi/scikit-learn,xwolf12/scikit-learn,IndraVikas/scikit-learn,sergeyf/scikit-learn,Djabbz/scikit-learn,lenovor/scikit-learn,beepee14/scikit-learn,thilbern/scikit-learn,IssamLaradji/scikit-learn,giorgiop/scikit-learn,huzq/scikit-learn,potash/scikit-learn,dsullivan7/scikit-learn,vivekmishra1991/scikit-learn,glemaitre/scikit-learn,bthirion/scikit-learn,Achuth17/scikit-learn,btabibian/scikit-learn,scikit-learn/scikit-learn,cdegroc/scikit-learn,yask123/scikit-learn,ElDeveloper/scikit-learn,pythonvietnam/scikit-learn,plissonf/scikit-learn,jmetzen/scikit-learn,depet/scikit-learn,florian-f/sklearn,Clyde-fare/scikit-learn,xiaoxiamii/scikit-learn,aewhatley/scikit-learn,spallavolu/scikit-learn,jayflo/scikit-learn,MartinSavc/scikit-learn,belltailjp/scikit-learn,yask123/scikit-learn,sgenoud/scikit-learn,ZENGXH/scikit-learn,mwv/scikit-learn,HolgerPeters/scikit-learn,yask123/scikit-learn,Titan-C/scikit-learn,quheng/scikit-learn,mojoboss/scikit-learn,zorojean/scikit-learn,rvraghav93/scikit-learn,luo66/scikit-learn,manashmndl/scikit-learn,DSLituiev/scikit-learn,lin-credible/scikit-learn,mjudsp/Tsallis,siutanwong/scikit-learn,JeanKossaifi/scikit-learn,mlyundin/scikit-learn,liyu1990/sklearn,vortex-ape/scikit-learn,MartinSavc/scikit-learn,loli/semisupervisedforests,shenzebang/scikit-learn,tawsifkhan/scikit-learn,anntzer/scikit-learn,massmutual/scikit-learn,nrhine1/scikit-learn,ilo10/scikit-learn,potash/scikit-learn,plissonf/scikit-learn,IndraVikas/scikit-learn,arabenjamin/scikit-learn,ephes/scikit-learn,sumspr/scikit-learn,abimannans/scikit-learn,MohammedWasim/scikit-learn,joernhees/scikit-learn,nmayorov/scikit-learn,mlyundin/scikit-learn,dsullivan7/scikit-learn,q1ang/scikit-learn,andaag/scikit-learn,f3r/scikit-learn,PatrickOReilly/scikit-learn,Vimos/scikit-learn,pv/scikit-learn,petosegan/scikit-learn,saiwing-yeung/scikit-learn,xyguo/scikit-learn,kevin-intel/scikit-learn,altairpearl/scikit-learn,AlexandreAbraham/scikit-learn,herilalaina/scikit-learn,tosolveit/scikit-learn,fredhusser/scikit-learn,IssamLaradji/scikit-learn,kmike/scikit-learn,hlin117/scikit-learn,lin-credible/scikit-learn,victorbergelin/scikit-learn,lesteve/scikit-learn,alexeyum/scikit-learn,robbymeals/scikit-learn,ZENGXH/scikit-learn,lucidfrontier45/scikit-learn,henrykironde/scikit-learn,terkkila/scikit-learn,hdmetor/scikit-learn,vybstat/scikit-learn,Adai0808/scikit-learn,hrjn/scikit-learn,MohammedWasim/scikit-learn,NunoEdgarGub1/scikit-learn,ilo10/scikit-learn,yunfeilu/scikit-learn,nrhine1/scikit-learn,theoryno3/scikit-learn,aewhatley/scikit-learn,hsuantien/scikit-learn,B3AU/waveTree,abhishekkrthakur/scikit-learn,arahuja/scikit-learn,zaxtax/scikit-learn,florian-f/sklearn,carrillo/scikit-learn,xubenben/scikit-learn,vortex-ape/scikit-learn,nikitasingh981/scikit-learn,jakobworldpeace/scikit-learn,andrewnc/scikit-learn,arabenjamin/scikit-learn,Titan-C/scikit-learn,yunfeilu/scikit-learn,Srisai85/scikit-learn,fzalkow/scikit-learn,mattgiguere/scikit-learn,mblondel/scikit-learn,rahuldhote/scikit-learn,arjoly/scikit-learn,mugizico/scikit-learn,pkruskal/scikit-learn,marcocaccin/scikit-learn,ishanic/scikit-learn,trankmichael/scikit-learn,imaculate/scikit-learn,justincassidy/scikit-learn,treycausey/scikit-learn,rexshihaoren/scikit-learn,RayMick/scikit-learn,cwu2011/scikit-learn,hlin117/scikit-learn,tosolveit/scikit-learn,murali-munna/scikit-learn,jzt5132/scikit-learn,MartinDelzant/scikit-learn,wazeerzulfikar/scikit-learn,sanketloke/scikit-learn,shenzebang/scikit-learn,trungnt13/scikit-learn,krez13/scikit-learn,waterponey/scikit-learn,ltiao/scikit-learn,sergeyf/scikit-learn,jblackburne/scikit-learn,xubenben/scikit-learn,MechCoder/scikit-learn,nomadcube/scikit-learn,Obus/scikit-learn
|
ENH/DOC: Add an example doing classification on digits.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@669 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# The digits dataset
from scikits.learn import datasets
digits = datasets.load_digits()
# The data that we are interesting in is made of 8x8 images of digits,
# let's have a look at the first 3 images. We know which digit they
# represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_features = len(digits.images)
data = digits.images.reshape((n_features, -1))
# Import a classifier:
from scikits.learn import svm
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_features/2], digits.target[:n_features/2])
# Now predict the value of the digit on the second half:
predicted = classifier.predict(data[n_features/2:])
for index, (image, prediction) in enumerate(zip(
digits.images[n_features/2:],
predicted
)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
<commit_before><commit_msg>ENH/DOC: Add an example doing classification on digits.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@669 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# The digits dataset
from scikits.learn import datasets
digits = datasets.load_digits()
# The data that we are interesting in is made of 8x8 images of digits,
# let's have a look at the first 3 images. We know which digit they
# represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_features = len(digits.images)
data = digits.images.reshape((n_features, -1))
# Import a classifier:
from scikits.learn import svm
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_features/2], digits.target[:n_features/2])
# Now predict the value of the digit on the second half:
predicted = classifier.predict(data[n_features/2:])
for index, (image, prediction) in enumerate(zip(
digits.images[n_features/2:],
predicted
)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
ENH/DOC: Add an example doing classification on digits.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@669 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# The digits dataset
from scikits.learn import datasets
digits = datasets.load_digits()
# The data that we are interesting in is made of 8x8 images of digits,
# let's have a look at the first 3 images. We know which digit they
# represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_features = len(digits.images)
data = digits.images.reshape((n_features, -1))
# Import a classifier:
from scikits.learn import svm
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_features/2], digits.target[:n_features/2])
# Now predict the value of the digit on the second half:
predicted = classifier.predict(data[n_features/2:])
for index, (image, prediction) in enumerate(zip(
digits.images[n_features/2:],
predicted
)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
<commit_before><commit_msg>ENH/DOC: Add an example doing classification on digits.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@669 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
# The digits dataset
from scikits.learn import datasets
digits = datasets.load_digits()
# The data that we are interesting in is made of 8x8 images of digits,
# let's have a look at the first 3 images. We know which digit they
# represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_features = len(digits.images)
data = digits.images.reshape((n_features, -1))
# Import a classifier:
from scikits.learn import svm
classifier = svm.SVC()
# We learn the digits on the first half of the digits
classifier.fit(data[:n_features/2], digits.target[:n_features/2])
# Now predict the value of the digit on the second half:
predicted = classifier.predict(data[n_features/2:])
for index, (image, prediction) in enumerate(zip(
digits.images[n_features/2:],
predicted
)[:4]):
pl.subplot(2, 4, index+5)
pl.imshow(image, cmap=pl.cm.gray_r)
pl.title('Prediction: %i' % prediction)
pl.show()
|
|
0c095cd6f1e7e04aed458f635acb3101b25d319a
|
umibukela/migrations/0014_auto_20170110_1019.py
|
umibukela/migrations/0014_auto_20170110_1019.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0013_auto_20161215_1252'),
]
operations = [
migrations.RemoveField(
model_name='surveysource',
name='survey',
),
migrations.DeleteModel(
name='SurveySource',
),
]
|
Add forgotten table delete for table we don't need
|
Add forgotten table delete for table we don't need
|
Python
|
mit
|
Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela
|
Add forgotten table delete for table we don't need
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0013_auto_20161215_1252'),
]
operations = [
migrations.RemoveField(
model_name='surveysource',
name='survey',
),
migrations.DeleteModel(
name='SurveySource',
),
]
|
<commit_before><commit_msg>Add forgotten table delete for table we don't need<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0013_auto_20161215_1252'),
]
operations = [
migrations.RemoveField(
model_name='surveysource',
name='survey',
),
migrations.DeleteModel(
name='SurveySource',
),
]
|
Add forgotten table delete for table we don't need# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0013_auto_20161215_1252'),
]
operations = [
migrations.RemoveField(
model_name='surveysource',
name='survey',
),
migrations.DeleteModel(
name='SurveySource',
),
]
|
<commit_before><commit_msg>Add forgotten table delete for table we don't need<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0013_auto_20161215_1252'),
]
operations = [
migrations.RemoveField(
model_name='surveysource',
name='survey',
),
migrations.DeleteModel(
name='SurveySource',
),
]
|
|
02c0c3bd4b5ff7629af35bfb8a21dba38133033e
|
scripts/mvf_read_benchmark.py
|
scripts/mvf_read_benchmark.py
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import dask.array as da
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
f = katdal.open(args.filename)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
Add a tool for benchmarking read performance
|
Add a tool for benchmarking read performance
It's not installed, on the assumption that it will be used by katdal
developers, not users.
|
Python
|
bsd-3-clause
|
ska-sa/katdal
|
Add a tool for benchmarking read performance
It's not installed, on the assumption that it will be used by katdal
developers, not users.
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import dask.array as da
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
f = katdal.open(args.filename)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
<commit_before><commit_msg>Add a tool for benchmarking read performance
It's not installed, on the assumption that it will be used by katdal
developers, not users.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import dask.array as da
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
f = katdal.open(args.filename)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
Add a tool for benchmarking read performance
It's not installed, on the assumption that it will be used by katdal
developers, not users.#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import dask.array as da
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
f = katdal.open(args.filename)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
<commit_before><commit_msg>Add a tool for benchmarking read performance
It's not installed, on the assumption that it will be used by katdal
developers, not users.<commit_after>#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import dask.array as da
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
f = katdal.open(args.filename)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
|
2c3e29c78e2600b33380847352f914049f2b9f25
|
ynr/apps/people/migrations/0006_move_person_gfks.py
|
ynr/apps/people/migrations/0006_move_person_gfks.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-29 15:10
from __future__ import unicode_literals
from django.db import migrations
def move_popolo_person_gfks_to_people_person(apps, schema_editor):
PeoplePerson = apps.get_model("people", "Person")
PopoloPerson = apps.get_model("popolo", "Person")
ContentType = apps.get_model("contenttypes", "ContentType")
models = [
apps.get_model("popolo", "Link"),
apps.get_model("popolo", "OtherName"),
apps.get_model("popolo", "ContactDetail"),
apps.get_model("popolo", "Identifier"),
apps.get_model("popolo", "Source"),
]
people_person_ct = ContentType.objects.get_for_model(PeoplePerson).pk
popolo_person_ct = ContentType.objects.get_for_model(PopoloPerson).pk
for model in models:
model.objects.filter(content_type=popolo_person_ct).update(
content_type=people_person_ct
)
class Migration(migrations.Migration):
dependencies = [("people", "0005_move_person_image_fk_to_person_app")]
operations = [
migrations.RunPython(
move_popolo_person_gfks_to_people_person, migrations.RunPython.noop
)
]
|
Move GenericForeignKeys from popolo.Person to people.Person
|
Move GenericForeignKeys from popolo.Person to people.Person
This was missing from the work to move People to the person app
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Move GenericForeignKeys from popolo.Person to people.Person
This was missing from the work to move People to the person app
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-29 15:10
from __future__ import unicode_literals
from django.db import migrations
def move_popolo_person_gfks_to_people_person(apps, schema_editor):
PeoplePerson = apps.get_model("people", "Person")
PopoloPerson = apps.get_model("popolo", "Person")
ContentType = apps.get_model("contenttypes", "ContentType")
models = [
apps.get_model("popolo", "Link"),
apps.get_model("popolo", "OtherName"),
apps.get_model("popolo", "ContactDetail"),
apps.get_model("popolo", "Identifier"),
apps.get_model("popolo", "Source"),
]
people_person_ct = ContentType.objects.get_for_model(PeoplePerson).pk
popolo_person_ct = ContentType.objects.get_for_model(PopoloPerson).pk
for model in models:
model.objects.filter(content_type=popolo_person_ct).update(
content_type=people_person_ct
)
class Migration(migrations.Migration):
dependencies = [("people", "0005_move_person_image_fk_to_person_app")]
operations = [
migrations.RunPython(
move_popolo_person_gfks_to_people_person, migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Move GenericForeignKeys from popolo.Person to people.Person
This was missing from the work to move People to the person app<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-29 15:10
from __future__ import unicode_literals
from django.db import migrations
def move_popolo_person_gfks_to_people_person(apps, schema_editor):
PeoplePerson = apps.get_model("people", "Person")
PopoloPerson = apps.get_model("popolo", "Person")
ContentType = apps.get_model("contenttypes", "ContentType")
models = [
apps.get_model("popolo", "Link"),
apps.get_model("popolo", "OtherName"),
apps.get_model("popolo", "ContactDetail"),
apps.get_model("popolo", "Identifier"),
apps.get_model("popolo", "Source"),
]
people_person_ct = ContentType.objects.get_for_model(PeoplePerson).pk
popolo_person_ct = ContentType.objects.get_for_model(PopoloPerson).pk
for model in models:
model.objects.filter(content_type=popolo_person_ct).update(
content_type=people_person_ct
)
class Migration(migrations.Migration):
dependencies = [("people", "0005_move_person_image_fk_to_person_app")]
operations = [
migrations.RunPython(
move_popolo_person_gfks_to_people_person, migrations.RunPython.noop
)
]
|
Move GenericForeignKeys from popolo.Person to people.Person
This was missing from the work to move People to the person app# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-29 15:10
from __future__ import unicode_literals
from django.db import migrations
def move_popolo_person_gfks_to_people_person(apps, schema_editor):
PeoplePerson = apps.get_model("people", "Person")
PopoloPerson = apps.get_model("popolo", "Person")
ContentType = apps.get_model("contenttypes", "ContentType")
models = [
apps.get_model("popolo", "Link"),
apps.get_model("popolo", "OtherName"),
apps.get_model("popolo", "ContactDetail"),
apps.get_model("popolo", "Identifier"),
apps.get_model("popolo", "Source"),
]
people_person_ct = ContentType.objects.get_for_model(PeoplePerson).pk
popolo_person_ct = ContentType.objects.get_for_model(PopoloPerson).pk
for model in models:
model.objects.filter(content_type=popolo_person_ct).update(
content_type=people_person_ct
)
class Migration(migrations.Migration):
dependencies = [("people", "0005_move_person_image_fk_to_person_app")]
operations = [
migrations.RunPython(
move_popolo_person_gfks_to_people_person, migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Move GenericForeignKeys from popolo.Person to people.Person
This was missing from the work to move People to the person app<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-29 15:10
from __future__ import unicode_literals
from django.db import migrations
def move_popolo_person_gfks_to_people_person(apps, schema_editor):
PeoplePerson = apps.get_model("people", "Person")
PopoloPerson = apps.get_model("popolo", "Person")
ContentType = apps.get_model("contenttypes", "ContentType")
models = [
apps.get_model("popolo", "Link"),
apps.get_model("popolo", "OtherName"),
apps.get_model("popolo", "ContactDetail"),
apps.get_model("popolo", "Identifier"),
apps.get_model("popolo", "Source"),
]
people_person_ct = ContentType.objects.get_for_model(PeoplePerson).pk
popolo_person_ct = ContentType.objects.get_for_model(PopoloPerson).pk
for model in models:
model.objects.filter(content_type=popolo_person_ct).update(
content_type=people_person_ct
)
class Migration(migrations.Migration):
dependencies = [("people", "0005_move_person_image_fk_to_person_app")]
operations = [
migrations.RunPython(
move_popolo_person_gfks_to_people_person, migrations.RunPython.noop
)
]
|
|
8fa5871adb9b872d1ac1117810b8511a8325ad5c
|
openprescribing/dmd/management/commands/summarise_ncso_concessions.py
|
openprescribing/dmd/management/commands/summarise_ncso_concessions.py
|
from datetime import date
from django.core.management import BaseCommand
from dmd.models import NCSOConcession
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(
date=first_of_month
).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append("There are {} concessions".format(num_concessions))
lines.append(
"There are {} concessions for {}".format(
num_concessions_in_this_month, today.strftime("%B %Y")
)
)
if num_unmatched_concessions == 0:
lines.append("There are no unreconciled concessions")
elif num_unmatched_concessions == 1:
lines.append("There is 1 unreconciled concession")
else:
lines.append(
"There are {} unreconciled concessions".format(
num_unmatched_concessions
)
)
if num_unmatched_concessions > 0:
lines.append("")
lines.append("To reconcile, tell ebmbot:")
lines.append("`op reconcile concession [ID] against [VMPP ID]`")
for c in unmatched_concessions:
lines.append("-" * 80)
lines.append("ID: {}".format(c.id))
lines.append(u"Drug: {}".format(c.drug))
lines.append(u"Pack size: {}".format(c.pack_size))
print("\n".join(lines))
|
Add task to summarise concessions
|
Add task to summarise concessions
|
Python
|
mit
|
annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc
|
Add task to summarise concessions
|
from datetime import date
from django.core.management import BaseCommand
from dmd.models import NCSOConcession
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(
date=first_of_month
).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append("There are {} concessions".format(num_concessions))
lines.append(
"There are {} concessions for {}".format(
num_concessions_in_this_month, today.strftime("%B %Y")
)
)
if num_unmatched_concessions == 0:
lines.append("There are no unreconciled concessions")
elif num_unmatched_concessions == 1:
lines.append("There is 1 unreconciled concession")
else:
lines.append(
"There are {} unreconciled concessions".format(
num_unmatched_concessions
)
)
if num_unmatched_concessions > 0:
lines.append("")
lines.append("To reconcile, tell ebmbot:")
lines.append("`op reconcile concession [ID] against [VMPP ID]`")
for c in unmatched_concessions:
lines.append("-" * 80)
lines.append("ID: {}".format(c.id))
lines.append(u"Drug: {}".format(c.drug))
lines.append(u"Pack size: {}".format(c.pack_size))
print("\n".join(lines))
|
<commit_before><commit_msg>Add task to summarise concessions<commit_after>
|
from datetime import date
from django.core.management import BaseCommand
from dmd.models import NCSOConcession
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(
date=first_of_month
).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append("There are {} concessions".format(num_concessions))
lines.append(
"There are {} concessions for {}".format(
num_concessions_in_this_month, today.strftime("%B %Y")
)
)
if num_unmatched_concessions == 0:
lines.append("There are no unreconciled concessions")
elif num_unmatched_concessions == 1:
lines.append("There is 1 unreconciled concession")
else:
lines.append(
"There are {} unreconciled concessions".format(
num_unmatched_concessions
)
)
if num_unmatched_concessions > 0:
lines.append("")
lines.append("To reconcile, tell ebmbot:")
lines.append("`op reconcile concession [ID] against [VMPP ID]`")
for c in unmatched_concessions:
lines.append("-" * 80)
lines.append("ID: {}".format(c.id))
lines.append(u"Drug: {}".format(c.drug))
lines.append(u"Pack size: {}".format(c.pack_size))
print("\n".join(lines))
|
Add task to summarise concessionsfrom datetime import date
from django.core.management import BaseCommand
from dmd.models import NCSOConcession
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(
date=first_of_month
).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append("There are {} concessions".format(num_concessions))
lines.append(
"There are {} concessions for {}".format(
num_concessions_in_this_month, today.strftime("%B %Y")
)
)
if num_unmatched_concessions == 0:
lines.append("There are no unreconciled concessions")
elif num_unmatched_concessions == 1:
lines.append("There is 1 unreconciled concession")
else:
lines.append(
"There are {} unreconciled concessions".format(
num_unmatched_concessions
)
)
if num_unmatched_concessions > 0:
lines.append("")
lines.append("To reconcile, tell ebmbot:")
lines.append("`op reconcile concession [ID] against [VMPP ID]`")
for c in unmatched_concessions:
lines.append("-" * 80)
lines.append("ID: {}".format(c.id))
lines.append(u"Drug: {}".format(c.drug))
lines.append(u"Pack size: {}".format(c.pack_size))
print("\n".join(lines))
|
<commit_before><commit_msg>Add task to summarise concessions<commit_after>from datetime import date
from django.core.management import BaseCommand
from dmd.models import NCSOConcession
class Command(BaseCommand):
def handle(self, *args, **kwargs):
today = date.today()
first_of_month = date(today.year, today.month, 1)
num_concessions = NCSOConcession.objects.count()
num_concessions_in_this_month = NCSOConcession.objects.filter(
date=first_of_month
).count()
unmatched_concessions = NCSOConcession.objects.filter(vmpp_id__isnull=True)
num_unmatched_concessions = unmatched_concessions.count()
lines = []
lines.append("There are {} concessions".format(num_concessions))
lines.append(
"There are {} concessions for {}".format(
num_concessions_in_this_month, today.strftime("%B %Y")
)
)
if num_unmatched_concessions == 0:
lines.append("There are no unreconciled concessions")
elif num_unmatched_concessions == 1:
lines.append("There is 1 unreconciled concession")
else:
lines.append(
"There are {} unreconciled concessions".format(
num_unmatched_concessions
)
)
if num_unmatched_concessions > 0:
lines.append("")
lines.append("To reconcile, tell ebmbot:")
lines.append("`op reconcile concession [ID] against [VMPP ID]`")
for c in unmatched_concessions:
lines.append("-" * 80)
lines.append("ID: {}".format(c.id))
lines.append(u"Drug: {}".format(c.drug))
lines.append(u"Pack size: {}".format(c.pack_size))
print("\n".join(lines))
|
|
149534662f865793bbff7e54027af6751039a682
|
code/test1/continued_fraction.py
|
code/test1/continued_fraction.py
|
#!/usr/bin/python3
import time
def c_frac(n):
s = 1
for i in range(0, n):
s = 1.0 + 1.0 / s
return s
start_time = time.time()
print(c_frac(1000000))
print("--- %f seconds ---" % (time.time() - start_time))
|
Add old code from test1.
|
Add old code from test1.
|
Python
|
mit
|
djpetti/csci2963-DanielPetti,djpetti/csci2963-DanielPetti,djpetti/csci2963-DanielPetti,djpetti/csci2963-DanielPetti
|
Add old code from test1.
|
#!/usr/bin/python3
import time
def c_frac(n):
s = 1
for i in range(0, n):
s = 1.0 + 1.0 / s
return s
start_time = time.time()
print(c_frac(1000000))
print("--- %f seconds ---" % (time.time() - start_time))
|
<commit_before><commit_msg>Add old code from test1.<commit_after>
|
#!/usr/bin/python3
import time
def c_frac(n):
s = 1
for i in range(0, n):
s = 1.0 + 1.0 / s
return s
start_time = time.time()
print(c_frac(1000000))
print("--- %f seconds ---" % (time.time() - start_time))
|
Add old code from test1.#!/usr/bin/python3
import time
def c_frac(n):
s = 1
for i in range(0, n):
s = 1.0 + 1.0 / s
return s
start_time = time.time()
print(c_frac(1000000))
print("--- %f seconds ---" % (time.time() - start_time))
|
<commit_before><commit_msg>Add old code from test1.<commit_after>#!/usr/bin/python3
import time
def c_frac(n):
s = 1
for i in range(0, n):
s = 1.0 + 1.0 / s
return s
start_time = time.time()
print(c_frac(1000000))
print("--- %f seconds ---" % (time.time() - start_time))
|
|
58ca779abe014e85509555c274ae6960e152b9ca
|
eche/eche_types.py
|
eche/eche_types.py
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
Create Symbol, List, Boolean, Nil and Atom types.
|
Create Symbol, List, Boolean, Nil and Atom types.
|
Python
|
mit
|
skk/eche
|
Create Symbol, List, Boolean, Nil and Atom types.
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
<commit_before><commit_msg>Create Symbol, List, Boolean, Nil and Atom types.<commit_after>
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
Create Symbol, List, Boolean, Nil and Atom types.class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
<commit_before><commit_msg>Create Symbol, List, Boolean, Nil and Atom types.<commit_after>class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
|
a6f291a3beb7ecb7d67b81fe92e7cca6db2139dc
|
example_scraper.py
|
example_scraper.py
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
# Create dataset
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.post(API + '/dataset/', json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
dataset = json.loads(response.content)
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % dataset['slug'], json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
print response.content
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
DATASET_SLUG = 'test-dataset'
# Check if dataset exists
response = requests.get(API + '/dataset/%s/' % DATASET_SLUG, params=AUTH_PARAMS)
# Create dataset if necessary
if response.status_code == 404:
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.put(API + '/dataset/%s/' % DATASET_SLUG, json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % DATASET_SLUG, json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
|
Update example scraper to use known slug.
|
Update example scraper to use known slug.
|
Python
|
mit
|
PalmBeachPost/panda,pandaproject/panda,PalmBeachPost/panda,newsapps/panda,ibrahimcesar/panda,pandaproject/panda,NUKnightLab/panda,ibrahimcesar/panda,NUKnightLab/panda,datadesk/panda,PalmBeachPost/panda,PalmBeachPost/panda,NUKnightLab/panda,ibrahimcesar/panda,datadesk/panda,ibrahimcesar/panda,pandaproject/panda,newsapps/panda,NUKnightLab/panda,datadesk/panda,pandaproject/panda,ibrahimcesar/panda,pandaproject/panda,datadesk/panda,newsapps/panda,PalmBeachPost/panda,newsapps/panda,datadesk/panda
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
# Create dataset
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.post(API + '/dataset/', json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
dataset = json.loads(response.content)
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % dataset['slug'], json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
print response.content
Update example scraper to use known slug.
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
DATASET_SLUG = 'test-dataset'
# Check if dataset exists
response = requests.get(API + '/dataset/%s/' % DATASET_SLUG, params=AUTH_PARAMS)
# Create dataset if necessary
if response.status_code == 404:
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.put(API + '/dataset/%s/' % DATASET_SLUG, json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % DATASET_SLUG, json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
|
<commit_before>#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
# Create dataset
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.post(API + '/dataset/', json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
dataset = json.loads(response.content)
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % dataset['slug'], json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
print response.content
<commit_msg>Update example scraper to use known slug.<commit_after>
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
DATASET_SLUG = 'test-dataset'
# Check if dataset exists
response = requests.get(API + '/dataset/%s/' % DATASET_SLUG, params=AUTH_PARAMS)
# Create dataset if necessary
if response.status_code == 404:
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.put(API + '/dataset/%s/' % DATASET_SLUG, json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % DATASET_SLUG, json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
|
#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
# Create dataset
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.post(API + '/dataset/', json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
dataset = json.loads(response.content)
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % dataset['slug'], json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
print response.content
Update example scraper to use known slug.#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
DATASET_SLUG = 'test-dataset'
# Check if dataset exists
response = requests.get(API + '/dataset/%s/' % DATASET_SLUG, params=AUTH_PARAMS)
# Create dataset if necessary
if response.status_code == 404:
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.put(API + '/dataset/%s/' % DATASET_SLUG, json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % DATASET_SLUG, json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
|
<commit_before>#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
# Create dataset
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.post(API + '/dataset/', json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
dataset = json.loads(response.content)
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % dataset['slug'], json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
print response.content
<commit_msg>Update example scraper to use known slug.<commit_after>#!/usr/bin/env python
import json
import requests
API = 'http://localhost:8000/api/1.0'
AUTH_PARAMS = {
'email': 'panda@pandaproject.net',
'api_key': 'edfe6c5ffd1be4d3bf22f69188ac6bc0fc04c84b'
}
DATASET_SLUG = 'test-dataset'
# Check if dataset exists
response = requests.get(API + '/dataset/%s/' % DATASET_SLUG, params=AUTH_PARAMS)
# Create dataset if necessary
if response.status_code == 404:
dataset = {
'name': 'Test Dataset from API',
'schema': [{
'column': 'A',
'type': 'unicode'
}, {
'column': 'B',
'type': 'unicode'
}, {
'column': 'C',
'type': 'unicode'
}]
}
response = requests.put(API + '/dataset/%s/' % DATASET_SLUG, json.dumps(dataset), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
# Write data
data = { 'objects': [{
'data': ['The', 'PANDA', 'lives.']
}, {
'data': ['More', 'data', 'here.']
}]}
response = requests.put(API + '/dataset/%s/data/' % DATASET_SLUG, json.dumps(data), params=AUTH_PARAMS, headers={ 'Content-Type': 'application/json' })
|
bf57364ed872b25bbc4864cf9171b2345a5c0e09
|
api/rest/resources/plugin.py
|
api/rest/resources/plugin.py
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
import logging
from amcat.models import Plugin
from api.rest.resources.amcatresource import AmCATResource
log = logging.getLogger(__name__)
class PluginResource(AmCATResource):
model = Plugin
extra_filters = ["plugin_type"]
|
Fix uploader bugs and make medium text field; add textarea for entering text directly
|
Fix uploader bugs and make medium text field; add textarea for entering text directly
|
Python
|
agpl-3.0
|
amcat/amcat,amcat/amcat,tschmorleiz/amcat,amcat/amcat,amcat/amcat,amcat/amcat,tschmorleiz/amcat,tschmorleiz/amcat,tschmorleiz/amcat,tschmorleiz/amcat,amcat/amcat
|
Fix uploader bugs and make medium text field; add textarea for entering text directly
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
import logging
from amcat.models import Plugin
from api.rest.resources.amcatresource import AmCATResource
log = logging.getLogger(__name__)
class PluginResource(AmCATResource):
model = Plugin
extra_filters = ["plugin_type"]
|
<commit_before><commit_msg>Fix uploader bugs and make medium text field; add textarea for entering text directly<commit_after>
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
import logging
from amcat.models import Plugin
from api.rest.resources.amcatresource import AmCATResource
log = logging.getLogger(__name__)
class PluginResource(AmCATResource):
model = Plugin
extra_filters = ["plugin_type"]
|
Fix uploader bugs and make medium text field; add textarea for entering text directly###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
import logging
from amcat.models import Plugin
from api.rest.resources.amcatresource import AmCATResource
log = logging.getLogger(__name__)
class PluginResource(AmCATResource):
model = Plugin
extra_filters = ["plugin_type"]
|
<commit_before><commit_msg>Fix uploader bugs and make medium text field; add textarea for entering text directly<commit_after>###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
import logging
from amcat.models import Plugin
from api.rest.resources.amcatresource import AmCATResource
log = logging.getLogger(__name__)
class PluginResource(AmCATResource):
model = Plugin
extra_filters = ["plugin_type"]
|
|
4973b0dff43f60e91af70349847b8cfc256004c5
|
credentials/apps/catalog/migrations/0013_drop_old_start_end_fields.py
|
credentials/apps/catalog/migrations/0013_drop_old_start_end_fields.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-31 18:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_courserun_copy_column_values'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='end',
),
migrations.RemoveField(
model_name='courserun',
name='start',
),
]
|
Rename start and end fields (4.2/4)
|
Rename start and end fields (4.2/4)
This is the 4.2th stage of renaming the start and end fields of
CourseRun to start_date and end_date.
This release ONLY removes the old columns via migration. Note that this
does not include removing the django model fields corresponding to the
old columns.
DE-1708
|
Python
|
agpl-3.0
|
edx/credentials,edx/credentials,edx/credentials,edx/credentials
|
Rename start and end fields (4.2/4)
This is the 4.2th stage of renaming the start and end fields of
CourseRun to start_date and end_date.
This release ONLY removes the old columns via migration. Note that this
does not include removing the django model fields corresponding to the
old columns.
DE-1708
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-31 18:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_courserun_copy_column_values'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='end',
),
migrations.RemoveField(
model_name='courserun',
name='start',
),
]
|
<commit_before><commit_msg>Rename start and end fields (4.2/4)
This is the 4.2th stage of renaming the start and end fields of
CourseRun to start_date and end_date.
This release ONLY removes the old columns via migration. Note that this
does not include removing the django model fields corresponding to the
old columns.
DE-1708<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-31 18:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_courserun_copy_column_values'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='end',
),
migrations.RemoveField(
model_name='courserun',
name='start',
),
]
|
Rename start and end fields (4.2/4)
This is the 4.2th stage of renaming the start and end fields of
CourseRun to start_date and end_date.
This release ONLY removes the old columns via migration. Note that this
does not include removing the django model fields corresponding to the
old columns.
DE-1708# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-31 18:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_courserun_copy_column_values'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='end',
),
migrations.RemoveField(
model_name='courserun',
name='start',
),
]
|
<commit_before><commit_msg>Rename start and end fields (4.2/4)
This is the 4.2th stage of renaming the start and end fields of
CourseRun to start_date and end_date.
This release ONLY removes the old columns via migration. Note that this
does not include removing the django model fields corresponding to the
old columns.
DE-1708<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-31 18:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0012_courserun_copy_column_values'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='end',
),
migrations.RemoveField(
model_name='courserun',
name='start',
),
]
|
|
5bd191920c0e0fa5bc869999473bc638030e3ba7
|
notifications/migrations/0003_create_default_user_created_template.py
|
notifications/migrations/0003_create_default_user_created_template.py
|
from django.db import migrations
NOTIFICATION_TYPES = ('user_created',)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
USER_CREATED_SUBJECT_FI = "Uusi käyttäjätunnus luotu - {{ user.date_joined|format_datetime('fi') }}"
USER_CREATED_HTML_BODY_FI = \
"""Tapahtumarajapintaan on luotu uusi käyttäjätunnus {{ user.date_joined|format_datetime('fi') }}:
<br/><br/>
<a href="mailto:{{ user.email }}">{{ user.email }}</a>
<br/><br/>
<a href="https://api.hel.fi/linkedevents/admin/django_orghierarchy/organization/">Siirry käyttäjien hallintaan »</a>"""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_create_default_templates'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add migration to create "user created" template
|
Add migration to create "user created" template
|
Python
|
mit
|
City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents
|
Add migration to create "user created" template
|
from django.db import migrations
NOTIFICATION_TYPES = ('user_created',)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
USER_CREATED_SUBJECT_FI = "Uusi käyttäjätunnus luotu - {{ user.date_joined|format_datetime('fi') }}"
USER_CREATED_HTML_BODY_FI = \
"""Tapahtumarajapintaan on luotu uusi käyttäjätunnus {{ user.date_joined|format_datetime('fi') }}:
<br/><br/>
<a href="mailto:{{ user.email }}">{{ user.email }}</a>
<br/><br/>
<a href="https://api.hel.fi/linkedevents/admin/django_orghierarchy/organization/">Siirry käyttäjien hallintaan »</a>"""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_create_default_templates'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration to create "user created" template<commit_after>
|
from django.db import migrations
NOTIFICATION_TYPES = ('user_created',)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
USER_CREATED_SUBJECT_FI = "Uusi käyttäjätunnus luotu - {{ user.date_joined|format_datetime('fi') }}"
USER_CREATED_HTML_BODY_FI = \
"""Tapahtumarajapintaan on luotu uusi käyttäjätunnus {{ user.date_joined|format_datetime('fi') }}:
<br/><br/>
<a href="mailto:{{ user.email }}">{{ user.email }}</a>
<br/><br/>
<a href="https://api.hel.fi/linkedevents/admin/django_orghierarchy/organization/">Siirry käyttäjien hallintaan »</a>"""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_create_default_templates'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add migration to create "user created" templatefrom django.db import migrations
NOTIFICATION_TYPES = ('user_created',)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
USER_CREATED_SUBJECT_FI = "Uusi käyttäjätunnus luotu - {{ user.date_joined|format_datetime('fi') }}"
USER_CREATED_HTML_BODY_FI = \
"""Tapahtumarajapintaan on luotu uusi käyttäjätunnus {{ user.date_joined|format_datetime('fi') }}:
<br/><br/>
<a href="mailto:{{ user.email }}">{{ user.email }}</a>
<br/><br/>
<a href="https://api.hel.fi/linkedevents/admin/django_orghierarchy/organization/">Siirry käyttäjien hallintaan »</a>"""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_create_default_templates'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration to create "user created" template<commit_after>from django.db import migrations
NOTIFICATION_TYPES = ('user_created',)
LANGUAGES = ['fi']
DEFAULT_LANGUAGE = 'fi'
FOOTER_FI = 'Tämä on automaattinen viesti Helsingin kaupungin tapahtumarajapinnasta. Viestiin ei voi vastata.\n'
HTML_SEPARATOR = '\n<br/><br/>\n'
USER_CREATED_SUBJECT_FI = "Uusi käyttäjätunnus luotu - {{ user.date_joined|format_datetime('fi') }}"
USER_CREATED_HTML_BODY_FI = \
"""Tapahtumarajapintaan on luotu uusi käyttäjätunnus {{ user.date_joined|format_datetime('fi') }}:
<br/><br/>
<a href="mailto:{{ user.email }}">{{ user.email }}</a>
<br/><br/>
<a href="https://api.hel.fi/linkedevents/admin/django_orghierarchy/organization/">Siirry käyttäjien hallintaan »</a>"""
def _append_footer(text, language, separator):
var_name = 'FOOTER_{}'.format(language).upper()
footer = globals().get(var_name)
assert footer, '{} undefined'.format(var_name)
return separator.join([text, footer])
def _get_text(notification_type, language, field):
var_name = '{}_{}_{}'.format(notification_type, field, language).upper()
text = globals().get(var_name)
assert text, '{} undefined'.format(var_name)
return text
def create_existing_notifications(NotificationTemplate):
for notification_type in NOTIFICATION_TYPES:
subject = _get_text(notification_type, DEFAULT_LANGUAGE, 'subject')
html_body = _get_text(notification_type, DEFAULT_LANGUAGE, 'html_body')
html_body = _append_footer(html_body, DEFAULT_LANGUAGE, HTML_SEPARATOR)
try:
notification = NotificationTemplate.objects.get(type=notification_type)
continue
except NotificationTemplate.DoesNotExist:
pass
notification, created = NotificationTemplate.objects.get_or_create(
type=notification_type,
subject=subject,
html_body=html_body)
if created:
for language in LANGUAGES:
subject = _get_text(notification_type, language, 'subject')
html_body = _get_text(notification_type, language, 'html_body')
html_body = _append_footer(html_body, language, HTML_SEPARATOR)
setattr(notification, 'subject_{}'.format(language), subject)
setattr(notification, 'html_body_{}'.format(language), html_body)
notification.save()
def forwards(apps, schema_editor):
NotificationTemplate = apps.get_model('notifications', 'NotificationTemplate')
create_existing_notifications(NotificationTemplate)
class Migration(migrations.Migration):
dependencies = [
('notifications', '0002_create_default_templates'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
|
1feb96590df3f10a7205f43d472ce27ec278360e
|
spraakbanken/s5/spr_local/reconstruct_corpus.py
|
spraakbanken/s5/spr_local/reconstruct_corpus.py
|
#!/usr/bin/env python3
import argparse
import collections
import random
import sys
def reconstruct(f_in, f_out):
sentence_starts = []
contexts = {}
for line in f_in:
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
if words[0] == "<s>" and words[-1] == "</s>":
for _ in range(count):
print(" ".join(words), file=f_out)
continue
context = tuple(words[:-1])
if context not in contexts:
contexts[context] = collections.Counter()
contexts[context][words[-1]] += count
random.shuffle(sentence_starts)
c = len(sentence_starts[0]) - 1
for start in sentence_starts:
line = list(start)
while line[-1] != "</s>":
context = line[:-c]
next_word = contexts[context].most_common(1)[0][0]
contexts[context][next_word] -= 1
line.append(next_word)
print(" ".join(line), file=f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Construct corpus')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
reconstruct(args.infile, args.outfile)
|
Add reconstruct corpus as a test
|
Add reconstruct corpus as a test
|
Python
|
apache-2.0
|
psmit/kaldi-recipes,psmit/kaldi-recipes,phsmit/kaldi-recipes,phsmit/kaldi-recipes,psmit/kaldi-recipes
|
Add reconstruct corpus as a test
|
#!/usr/bin/env python3
import argparse
import collections
import random
import sys
def reconstruct(f_in, f_out):
sentence_starts = []
contexts = {}
for line in f_in:
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
if words[0] == "<s>" and words[-1] == "</s>":
for _ in range(count):
print(" ".join(words), file=f_out)
continue
context = tuple(words[:-1])
if context not in contexts:
contexts[context] = collections.Counter()
contexts[context][words[-1]] += count
random.shuffle(sentence_starts)
c = len(sentence_starts[0]) - 1
for start in sentence_starts:
line = list(start)
while line[-1] != "</s>":
context = line[:-c]
next_word = contexts[context].most_common(1)[0][0]
contexts[context][next_word] -= 1
line.append(next_word)
print(" ".join(line), file=f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Construct corpus')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
reconstruct(args.infile, args.outfile)
|
<commit_before><commit_msg>Add reconstruct corpus as a test<commit_after>
|
#!/usr/bin/env python3
import argparse
import collections
import random
import sys
def reconstruct(f_in, f_out):
sentence_starts = []
contexts = {}
for line in f_in:
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
if words[0] == "<s>" and words[-1] == "</s>":
for _ in range(count):
print(" ".join(words), file=f_out)
continue
context = tuple(words[:-1])
if context not in contexts:
contexts[context] = collections.Counter()
contexts[context][words[-1]] += count
random.shuffle(sentence_starts)
c = len(sentence_starts[0]) - 1
for start in sentence_starts:
line = list(start)
while line[-1] != "</s>":
context = line[:-c]
next_word = contexts[context].most_common(1)[0][0]
contexts[context][next_word] -= 1
line.append(next_word)
print(" ".join(line), file=f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Construct corpus')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
reconstruct(args.infile, args.outfile)
|
Add reconstruct corpus as a test#!/usr/bin/env python3
import argparse
import collections
import random
import sys
def reconstruct(f_in, f_out):
sentence_starts = []
contexts = {}
for line in f_in:
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
if words[0] == "<s>" and words[-1] == "</s>":
for _ in range(count):
print(" ".join(words), file=f_out)
continue
context = tuple(words[:-1])
if context not in contexts:
contexts[context] = collections.Counter()
contexts[context][words[-1]] += count
random.shuffle(sentence_starts)
c = len(sentence_starts[0]) - 1
for start in sentence_starts:
line = list(start)
while line[-1] != "</s>":
context = line[:-c]
next_word = contexts[context].most_common(1)[0][0]
contexts[context][next_word] -= 1
line.append(next_word)
print(" ".join(line), file=f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Construct corpus')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
reconstruct(args.infile, args.outfile)
|
<commit_before><commit_msg>Add reconstruct corpus as a test<commit_after>#!/usr/bin/env python3
import argparse
import collections
import random
import sys
def reconstruct(f_in, f_out):
sentence_starts = []
contexts = {}
for line in f_in:
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
if words[0] == "<s>" and words[-1] == "</s>":
for _ in range(count):
print(" ".join(words), file=f_out)
continue
context = tuple(words[:-1])
if context not in contexts:
contexts[context] = collections.Counter()
contexts[context][words[-1]] += count
random.shuffle(sentence_starts)
c = len(sentence_starts[0]) - 1
for start in sentence_starts:
line = list(start)
while line[-1] != "</s>":
context = line[:-c]
next_word = contexts[context].most_common(1)[0][0]
contexts[context][next_word] -= 1
line.append(next_word)
print(" ".join(line), file=f_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Construct corpus')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
reconstruct(args.infile, args.outfile)
|
|
f497d51a9736cfac5ececaf1a729a04ad74ea8bd
|
huffman.py
|
huffman.py
|
class Node:
def __init__(self):
self.name = ''
self.weight = 0
self.code = ''
def initSet(self, name, weight):
self.name = name
self.weight = weight
|
Initialize and declare class Node
|
Initialize and declare class Node
|
Python
|
mit
|
hane1818/Algorithm_HW3_huffman_code
|
Initialize and declare class Node
|
class Node:
def __init__(self):
self.name = ''
self.weight = 0
self.code = ''
def initSet(self, name, weight):
self.name = name
self.weight = weight
|
<commit_before><commit_msg>Initialize and declare class Node<commit_after>
|
class Node:
def __init__(self):
self.name = ''
self.weight = 0
self.code = ''
def initSet(self, name, weight):
self.name = name
self.weight = weight
|
Initialize and declare class Nodeclass Node:
def __init__(self):
self.name = ''
self.weight = 0
self.code = ''
def initSet(self, name, weight):
self.name = name
self.weight = weight
|
<commit_before><commit_msg>Initialize and declare class Node<commit_after>class Node:
def __init__(self):
self.name = ''
self.weight = 0
self.code = ''
def initSet(self, name, weight):
self.name = name
self.weight = weight
|
|
ea8c2f9007c9356bf24f66119153c4e844e5483f
|
watcher.py
|
watcher.py
|
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class ScriptModifiedHandler(PatternMatchingEventHandler):
patterns = ['*.py']
def __init__(self):
super(ScriptModifiedHandler, self).__init__()
# you can add some init code here
def process(self, event):
print(event.src_path, event.event_type)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
def on_created(self, event):
pass
if __name__ == '__main__':
observer = Observer()
path = '.'
event_handler = ScriptModifiedHandler()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
Add watchdog that monitors scripts editing
|
Add watchdog that monitors scripts editing
|
Python
|
mit
|
duboviy/misc
|
Add watchdog that monitors scripts editing
|
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class ScriptModifiedHandler(PatternMatchingEventHandler):
patterns = ['*.py']
def __init__(self):
super(ScriptModifiedHandler, self).__init__()
# you can add some init code here
def process(self, event):
print(event.src_path, event.event_type)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
def on_created(self, event):
pass
if __name__ == '__main__':
observer = Observer()
path = '.'
event_handler = ScriptModifiedHandler()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before><commit_msg>Add watchdog that monitors scripts editing<commit_after>
|
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class ScriptModifiedHandler(PatternMatchingEventHandler):
patterns = ['*.py']
def __init__(self):
super(ScriptModifiedHandler, self).__init__()
# you can add some init code here
def process(self, event):
print(event.src_path, event.event_type)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
def on_created(self, event):
pass
if __name__ == '__main__':
observer = Observer()
path = '.'
event_handler = ScriptModifiedHandler()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
Add watchdog that monitors scripts editingimport time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class ScriptModifiedHandler(PatternMatchingEventHandler):
patterns = ['*.py']
def __init__(self):
super(ScriptModifiedHandler, self).__init__()
# you can add some init code here
def process(self, event):
print(event.src_path, event.event_type)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
def on_created(self, event):
pass
if __name__ == '__main__':
observer = Observer()
path = '.'
event_handler = ScriptModifiedHandler()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before><commit_msg>Add watchdog that monitors scripts editing<commit_after>import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class ScriptModifiedHandler(PatternMatchingEventHandler):
patterns = ['*.py']
def __init__(self):
super(ScriptModifiedHandler, self).__init__()
# you can add some init code here
def process(self, event):
print(event.src_path, event.event_type)
def on_modified(self, event):
self.process(event)
def on_moved(self, event):
pass
def on_deleted(self, event):
pass
def on_created(self, event):
pass
if __name__ == '__main__':
observer = Observer()
path = '.'
event_handler = ScriptModifiedHandler()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
|
6f28fc31a9734cc36f3e41759ce20852beb890f8
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandPatchPerson.py
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandPatchPerson.py
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.face.id is not None:
data.update({'peopleRecognitionId': entity.face.id})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
Add a state for patch a person in wonderland.
|
Add a state for patch a person in wonderland.
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a state for patch a person in wonderland.
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.face.id is not None:
data.update({'peopleRecognitionId': entity.face.id})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
<commit_before><commit_msg>Add a state for patch a person in wonderland.<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.face.id is not None:
data.update({'peopleRecognitionId': entity.face.id})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
Add a state for patch a person in wonderland.#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.face.id is not None:
data.update({'peopleRecognitionId': entity.face.id})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
<commit_before><commit_msg>Add a state for patch a person in wonderland.<commit_after>#!/usr/bin/env python
# encoding=utf8
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandPatchPerson(EventState):
'''
Patch (update) a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= dont_exist return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandPatchPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'dont_exist', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {}
if entity.wonderlandId is None and entity.face.id is None:
Logger.logwarn('Need wonderland ID or face ID !')
return 'bad_request'
if entity.wonderlandId is not None:
data.update({'peopleId': entity.wonderlandId})
if entity.face.id is not None:
data.update({'peopleRecognitionId': entity.face.id})
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.isOperator is not None:
data.update({'peopleIsOperator': entity.isOperator})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.patch(url, data=data)
if response.status_code == 200:
return 'done'
elif response.status_code == 404:
return 'dont_exist'
elif 400 <= response.status_code < 500:
Logger.logwarn(response.status_code)
return 'bad_request'
else:
Logger.logerr(response.status_code)
return 'error'
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
|
|
521888f703466375ac36fab8c53ddf0c242d73e8
|
deployment/ansible/callback_plugins/profile_tasks.py
|
deployment/ansible/callback_plugins/profile_tasks.py
|
"""
Author: Jharrod LaFon
See also: https://github.com/jlafon/ansible-profile
"""
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
print
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
print
|
Add Ansible callback plugin to profile tasks
|
Add Ansible callback plugin to profile tasks
In order to try and speed up provisioning, this changeset adds a plugin
that gives you the per task duration breakdown of a playbook. From here,
we can identify the outliers and spend a little time making them faster.
Example tiler output:
```
PLAY RECAP ********************************************************************
azavea.mapnik | Install Mapnik ----------------------------------------- 35.60s
azavea.nginx | Configure the Nginx PPA --------------------------------- 22.61s
Update APT cache ------------------------------------------------------- 11.47s
azavea.beaver | Install Beaver ------------------------------------------ 9.48s
azavea.nodejs | Upgrade NPM --------------------------------------------- 8.94s
azavea.collectd | Install Collectd -------------------------------------- 8.70s
azavea.nodejs | Download Node.js ---------------------------------------- 5.91s
model-my-watershed.tiler | Install tiler javascript dependencies -------- 4.55s
azavea.nginx | Install Nginx -------------------------------------------- 3.97s
model-my-watershed.tiler | Install canvas rendering dependencies -------- 3.95s
tiler : ok=62 changed=56 unreachable=0 failed=0
```
|
Python
|
apache-2.0
|
lliss/model-my-watershed,mmcfarland/model-my-watershed,lewfish/model-my-watershed,mmcfarland/model-my-watershed,lliss/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,kdeloach/model-my-watershed,kdeloach/model-my-watershed,lliss/model-my-watershed,project-icp/bee-pollinator-app,lliss/model-my-watershed,lewfish/model-my-watershed,mmcfarland/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,project-icp/bee-pollinator-app,WikiWatershed/model-my-watershed,lewfish/model-my-watershed,mmcfarland/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,lewfish/model-my-watershed,WikiWatershed/model-my-watershed,mmcfarland/model-my-watershed,project-icp/bee-pollinator-app,lewfish/model-my-watershed
|
Add Ansible callback plugin to profile tasks
In order to try and speed up provisioning, this changeset adds a plugin
that gives you the per task duration breakdown of a playbook. From here,
we can identify the outliers and spend a little time making them faster.
Example tiler output:
```
PLAY RECAP ********************************************************************
azavea.mapnik | Install Mapnik ----------------------------------------- 35.60s
azavea.nginx | Configure the Nginx PPA --------------------------------- 22.61s
Update APT cache ------------------------------------------------------- 11.47s
azavea.beaver | Install Beaver ------------------------------------------ 9.48s
azavea.nodejs | Upgrade NPM --------------------------------------------- 8.94s
azavea.collectd | Install Collectd -------------------------------------- 8.70s
azavea.nodejs | Download Node.js ---------------------------------------- 5.91s
model-my-watershed.tiler | Install tiler javascript dependencies -------- 4.55s
azavea.nginx | Install Nginx -------------------------------------------- 3.97s
model-my-watershed.tiler | Install canvas rendering dependencies -------- 3.95s
tiler : ok=62 changed=56 unreachable=0 failed=0
```
|
"""
Author: Jharrod LaFon
See also: https://github.com/jlafon/ansible-profile
"""
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
print
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
print
|
<commit_before><commit_msg>Add Ansible callback plugin to profile tasks
In order to try and speed up provisioning, this changeset adds a plugin
that gives you the per task duration breakdown of a playbook. From here,
we can identify the outliers and spend a little time making them faster.
Example tiler output:
```
PLAY RECAP ********************************************************************
azavea.mapnik | Install Mapnik ----------------------------------------- 35.60s
azavea.nginx | Configure the Nginx PPA --------------------------------- 22.61s
Update APT cache ------------------------------------------------------- 11.47s
azavea.beaver | Install Beaver ------------------------------------------ 9.48s
azavea.nodejs | Upgrade NPM --------------------------------------------- 8.94s
azavea.collectd | Install Collectd -------------------------------------- 8.70s
azavea.nodejs | Download Node.js ---------------------------------------- 5.91s
model-my-watershed.tiler | Install tiler javascript dependencies -------- 4.55s
azavea.nginx | Install Nginx -------------------------------------------- 3.97s
model-my-watershed.tiler | Install canvas rendering dependencies -------- 3.95s
tiler : ok=62 changed=56 unreachable=0 failed=0
```<commit_after>
|
"""
Author: Jharrod LaFon
See also: https://github.com/jlafon/ansible-profile
"""
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
print
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
print
|
Add Ansible callback plugin to profile tasks
In order to try and speed up provisioning, this changeset adds a plugin
that gives you the per task duration breakdown of a playbook. From here,
we can identify the outliers and spend a little time making them faster.
Example tiler output:
```
PLAY RECAP ********************************************************************
azavea.mapnik | Install Mapnik ----------------------------------------- 35.60s
azavea.nginx | Configure the Nginx PPA --------------------------------- 22.61s
Update APT cache ------------------------------------------------------- 11.47s
azavea.beaver | Install Beaver ------------------------------------------ 9.48s
azavea.nodejs | Upgrade NPM --------------------------------------------- 8.94s
azavea.collectd | Install Collectd -------------------------------------- 8.70s
azavea.nodejs | Download Node.js ---------------------------------------- 5.91s
model-my-watershed.tiler | Install tiler javascript dependencies -------- 4.55s
azavea.nginx | Install Nginx -------------------------------------------- 3.97s
model-my-watershed.tiler | Install canvas rendering dependencies -------- 3.95s
tiler : ok=62 changed=56 unreachable=0 failed=0
```"""
Author: Jharrod LaFon
See also: https://github.com/jlafon/ansible-profile
"""
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
print
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
print
|
<commit_before><commit_msg>Add Ansible callback plugin to profile tasks
In order to try and speed up provisioning, this changeset adds a plugin
that gives you the per task duration breakdown of a playbook. From here,
we can identify the outliers and spend a little time making them faster.
Example tiler output:
```
PLAY RECAP ********************************************************************
azavea.mapnik | Install Mapnik ----------------------------------------- 35.60s
azavea.nginx | Configure the Nginx PPA --------------------------------- 22.61s
Update APT cache ------------------------------------------------------- 11.47s
azavea.beaver | Install Beaver ------------------------------------------ 9.48s
azavea.nodejs | Upgrade NPM --------------------------------------------- 8.94s
azavea.collectd | Install Collectd -------------------------------------- 8.70s
azavea.nodejs | Download Node.js ---------------------------------------- 5.91s
model-my-watershed.tiler | Install tiler javascript dependencies -------- 4.55s
azavea.nginx | Install Nginx -------------------------------------------- 3.97s
model-my-watershed.tiler | Install canvas rendering dependencies -------- 3.95s
tiler : ok=62 changed=56 unreachable=0 failed=0
```<commit_after>"""
Author: Jharrod LaFon
See also: https://github.com/jlafon/ansible-profile
"""
import time
class CallbackModule(object):
"""
A plugin for timing tasks
"""
def __init__(self):
self.stats = {}
self.current = None
def playbook_on_task_start(self, name, is_conditional):
"""
Logs the start of each task
"""
if self.current is not None:
# Record the running time of the last executed task
self.stats[self.current] = time.time() - self.stats[self.current]
# Record the start time of the current task
self.current = name
self.stats[self.current] = time.time()
def playbook_on_stats(self, stats):
"""
Prints the timings
"""
# Record the timing of the very last task
if self.current is not None:
self.stats[self.current] = time.time() - self.stats[self.current]
# Sort the tasks by their running time
results = sorted(
self.stats.items(),
key=lambda value: value[1],
reverse=True,
)
# Just keep the top 10
results = results[:10]
print
# Print the timings
for name, elapsed in results:
print(
"{0:-<70}{1:->9}".format(
'{0} '.format(name),
' {0:.02f}s'.format(elapsed),
)
)
print
|
|
96777c29a8b9d27da8bc6098ee7cfc2ce9bf368f
|
polling_stations/apps/data_collection/management/commands/import_bromsgrove.py
|
polling_stations/apps/data_collection/management/commands/import_bromsgrove.py
|
"""
Import Bromsgrove
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Bromsgrove
"""
council_id = 'E07000234'
districts_name = 'Electoral Boundaries 2'
stations_name = 'Bromsgrove DC and Redditch BC Polling Stations - May 2015 Elections.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[1],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'postcode' : '(postcode not supplied)',
'address' : '(address not supplied)'
}
|
Add an importer for Bromsgrove.
|
Add an importer for Bromsgrove.
refs #24
|
Python
|
bsd-3-clause
|
andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
Add an importer for Bromsgrove.
refs #24
|
"""
Import Bromsgrove
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Bromsgrove
"""
council_id = 'E07000234'
districts_name = 'Electoral Boundaries 2'
stations_name = 'Bromsgrove DC and Redditch BC Polling Stations - May 2015 Elections.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[1],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'postcode' : '(postcode not supplied)',
'address' : '(address not supplied)'
}
|
<commit_before><commit_msg>Add an importer for Bromsgrove.
refs #24<commit_after>
|
"""
Import Bromsgrove
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Bromsgrove
"""
council_id = 'E07000234'
districts_name = 'Electoral Boundaries 2'
stations_name = 'Bromsgrove DC and Redditch BC Polling Stations - May 2015 Elections.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[1],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'postcode' : '(postcode not supplied)',
'address' : '(address not supplied)'
}
|
Add an importer for Bromsgrove.
refs #24"""
Import Bromsgrove
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Bromsgrove
"""
council_id = 'E07000234'
districts_name = 'Electoral Boundaries 2'
stations_name = 'Bromsgrove DC and Redditch BC Polling Stations - May 2015 Elections.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[1],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'postcode' : '(postcode not supplied)',
'address' : '(address not supplied)'
}
|
<commit_before><commit_msg>Add an importer for Bromsgrove.
refs #24<commit_after>"""
Import Bromsgrove
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Bromsgrove
"""
council_id = 'E07000234'
districts_name = 'Electoral Boundaries 2'
stations_name = 'Bromsgrove DC and Redditch BC Polling Stations - May 2015 Elections.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'name': record[1],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[1],
'postcode' : '(postcode not supplied)',
'address' : '(address not supplied)'
}
|
|
aa554e0a67d69518da5cec8d97799497e3d996c4
|
fetchwikidatadata.py
|
fetchwikidatadata.py
|
# coding=utf-8
import urllib, urllib2
import json
def fetch_wikidata_data():
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
param = {}
param['action'] = 'query'
param['format'] = 'json'
param['generator'] = 'allpages'
param['gapnamespace'] = 120
param['gaplimit'] = 'max'
param['prop'] = 'pageterms'
param['wbptterms'] = 'label|alias'
while True:
params = urllib.urlencode(param)
url = WIKIDATA_API_URL + '?' + params
urlobj = urllib2.urlopen(url)
json_data = json.load(urlobj)
for pageid, resp in json_data['query']['pages'].items():
labels = list(resp['terms']['label'])
if resp['terms'].get('alias'):
labels.extend(resp['terms']['alias'])
filename = resp['title'].replace('Property:', '')
filestream = open(filename, 'w')
content = '\n'.join(labels)
filestream.write(content.encode('utf-8'))
filestream.close()
if json_data.get('continue'):
param.update(json_data['continue'])
else:
break
if __name__ == '__main__':
fetch_wikidata_data()
|
Add script to download property data from Wikidata.
|
Add script to download property data from Wikidata.
|
Python
|
apache-2.0
|
jankohoener/asknow-UI,jankohoener/asknow-UI,jankohoener/asknow-UI
|
Add script to download property data from Wikidata.
|
# coding=utf-8
import urllib, urllib2
import json
def fetch_wikidata_data():
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
param = {}
param['action'] = 'query'
param['format'] = 'json'
param['generator'] = 'allpages'
param['gapnamespace'] = 120
param['gaplimit'] = 'max'
param['prop'] = 'pageterms'
param['wbptterms'] = 'label|alias'
while True:
params = urllib.urlencode(param)
url = WIKIDATA_API_URL + '?' + params
urlobj = urllib2.urlopen(url)
json_data = json.load(urlobj)
for pageid, resp in json_data['query']['pages'].items():
labels = list(resp['terms']['label'])
if resp['terms'].get('alias'):
labels.extend(resp['terms']['alias'])
filename = resp['title'].replace('Property:', '')
filestream = open(filename, 'w')
content = '\n'.join(labels)
filestream.write(content.encode('utf-8'))
filestream.close()
if json_data.get('continue'):
param.update(json_data['continue'])
else:
break
if __name__ == '__main__':
fetch_wikidata_data()
|
<commit_before><commit_msg>Add script to download property data from Wikidata.<commit_after>
|
# coding=utf-8
import urllib, urllib2
import json
def fetch_wikidata_data():
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
param = {}
param['action'] = 'query'
param['format'] = 'json'
param['generator'] = 'allpages'
param['gapnamespace'] = 120
param['gaplimit'] = 'max'
param['prop'] = 'pageterms'
param['wbptterms'] = 'label|alias'
while True:
params = urllib.urlencode(param)
url = WIKIDATA_API_URL + '?' + params
urlobj = urllib2.urlopen(url)
json_data = json.load(urlobj)
for pageid, resp in json_data['query']['pages'].items():
labels = list(resp['terms']['label'])
if resp['terms'].get('alias'):
labels.extend(resp['terms']['alias'])
filename = resp['title'].replace('Property:', '')
filestream = open(filename, 'w')
content = '\n'.join(labels)
filestream.write(content.encode('utf-8'))
filestream.close()
if json_data.get('continue'):
param.update(json_data['continue'])
else:
break
if __name__ == '__main__':
fetch_wikidata_data()
|
Add script to download property data from Wikidata.# coding=utf-8
import urllib, urllib2
import json
def fetch_wikidata_data():
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
param = {}
param['action'] = 'query'
param['format'] = 'json'
param['generator'] = 'allpages'
param['gapnamespace'] = 120
param['gaplimit'] = 'max'
param['prop'] = 'pageterms'
param['wbptterms'] = 'label|alias'
while True:
params = urllib.urlencode(param)
url = WIKIDATA_API_URL + '?' + params
urlobj = urllib2.urlopen(url)
json_data = json.load(urlobj)
for pageid, resp in json_data['query']['pages'].items():
labels = list(resp['terms']['label'])
if resp['terms'].get('alias'):
labels.extend(resp['terms']['alias'])
filename = resp['title'].replace('Property:', '')
filestream = open(filename, 'w')
content = '\n'.join(labels)
filestream.write(content.encode('utf-8'))
filestream.close()
if json_data.get('continue'):
param.update(json_data['continue'])
else:
break
if __name__ == '__main__':
fetch_wikidata_data()
|
<commit_before><commit_msg>Add script to download property data from Wikidata.<commit_after># coding=utf-8
import urllib, urllib2
import json
def fetch_wikidata_data():
WIKIDATA_API_URL = 'https://www.wikidata.org/w/api.php'
param = {}
param['action'] = 'query'
param['format'] = 'json'
param['generator'] = 'allpages'
param['gapnamespace'] = 120
param['gaplimit'] = 'max'
param['prop'] = 'pageterms'
param['wbptterms'] = 'label|alias'
while True:
params = urllib.urlencode(param)
url = WIKIDATA_API_URL + '?' + params
urlobj = urllib2.urlopen(url)
json_data = json.load(urlobj)
for pageid, resp in json_data['query']['pages'].items():
labels = list(resp['terms']['label'])
if resp['terms'].get('alias'):
labels.extend(resp['terms']['alias'])
filename = resp['title'].replace('Property:', '')
filestream = open(filename, 'w')
content = '\n'.join(labels)
filestream.write(content.encode('utf-8'))
filestream.close()
if json_data.get('continue'):
param.update(json_data['continue'])
else:
break
if __name__ == '__main__':
fetch_wikidata_data()
|
|
e6675dcda6721acb6ad70adc68f1e33ce8190fc4
|
doc/examples/plot_pyramid.py
|
doc/examples/plot_pyramid.py
|
"""
====================
Build image pyramids
====================
This example shows how to build image pyramids.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.transform import build_gaussian_pyramid
image = data.lena()
rows, cols, dim = image.shape
pyramid = tuple(build_gaussian_pyramid(image, downscale=2))
display = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
display[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
display[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
plt.imshow(display)
plt.show()
|
Add example script for image pyramids
|
Add example script for image pyramids
|
Python
|
bsd-3-clause
|
SamHames/scikit-image,oew1v07/scikit-image,vighneshbirodkar/scikit-image,robintw/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,emon10005/scikit-image,bsipocz/scikit-image,chintak/scikit-image,jwiggins/scikit-image,ofgulban/scikit-image,paalge/scikit-image,ClinicalGraphics/scikit-image,Midafi/scikit-image,Britefury/scikit-image,michaelaye/scikit-image,vighneshbirodkar/scikit-image,Hiyorimi/scikit-image,paalge/scikit-image,keflavich/scikit-image,youprofit/scikit-image,WarrenWeckesser/scikits-image,michaelaye/scikit-image,Midafi/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,almarklein/scikit-image,SamHames/scikit-image,dpshelio/scikit-image,youprofit/scikit-image,SamHames/scikit-image,juliusbierk/scikit-image,chintak/scikit-image,ajaybhat/scikit-image,keflavich/scikit-image,WarrenWeckesser/scikits-image,bennlich/scikit-image,chriscrosscutler/scikit-image,blink1073/scikit-image,almarklein/scikit-image,rjeli/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,pratapvardhan/scikit-image,juliusbierk/scikit-image,rjeli/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,michaelpacer/scikit-image,SamHames/scikit-image,robintw/scikit-image,bennlich/scikit-image,almarklein/scikit-image,chintak/scikit-image,newville/scikit-image,jwiggins/scikit-image,ajaybhat/scikit-image,Britefury/scikit-image,Hiyorimi/scikit-image,ofgulban/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,rjeli/scikit-image,blink1073/scikit-image,GaZ3ll3/scikit-image,chriscrosscutler/scikit-image,GaZ3ll3/scikit-image,newville/scikit-image,warmspringwinds/scikit-image,bsipocz/scikit-image,oew1v07/scikit-image
|
Add example script for image pyramids
|
"""
====================
Build image pyramids
====================
This example shows how to build image pyramids.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.transform import build_gaussian_pyramid
image = data.lena()
rows, cols, dim = image.shape
pyramid = tuple(build_gaussian_pyramid(image, downscale=2))
display = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
display[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
display[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
plt.imshow(display)
plt.show()
|
<commit_before><commit_msg>Add example script for image pyramids<commit_after>
|
"""
====================
Build image pyramids
====================
This example shows how to build image pyramids.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.transform import build_gaussian_pyramid
image = data.lena()
rows, cols, dim = image.shape
pyramid = tuple(build_gaussian_pyramid(image, downscale=2))
display = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
display[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
display[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
plt.imshow(display)
plt.show()
|
Add example script for image pyramids"""
====================
Build image pyramids
====================
This example shows how to build image pyramids.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.transform import build_gaussian_pyramid
image = data.lena()
rows, cols, dim = image.shape
pyramid = tuple(build_gaussian_pyramid(image, downscale=2))
display = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
display[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
display[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
plt.imshow(display)
plt.show()
|
<commit_before><commit_msg>Add example script for image pyramids<commit_after>"""
====================
Build image pyramids
====================
This example shows how to build image pyramids.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.transform import build_gaussian_pyramid
image = data.lena()
rows, cols, dim = image.shape
pyramid = tuple(build_gaussian_pyramid(image, downscale=2))
display = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
display[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
display[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
plt.imshow(display)
plt.show()
|
|
a979ae85c16fd9f6e9e3c1f4ad8fa0261d212b88
|
status.py
|
status.py
|
max_db_int = 4294967295
UNKNOWN_HTTP = max_db_int - 1
UNKNOWN_NON_HTTP = max_db_int - 3
BOT_TIMEOUT = 0.25
CLIENT_TIMEOUT = 2
|
Add file for constants and magic numbers
|
Add file for constants and magic numbers
This is the file where things like default timeouts, statuses to write
in a database and others will be stored.
The code is more readable and importable from any other file.
|
Python
|
mit
|
Zloool/manyfaced-honeypot
|
Add file for constants and magic numbers
This is the file where things like default timeouts, statuses to write
in a database and others will be stored.
The code is more readable and importable from any other file.
|
max_db_int = 4294967295
UNKNOWN_HTTP = max_db_int - 1
UNKNOWN_NON_HTTP = max_db_int - 3
BOT_TIMEOUT = 0.25
CLIENT_TIMEOUT = 2
|
<commit_before><commit_msg>Add file for constants and magic numbers
This is the file where things like default timeouts, statuses to write
in a database and others will be stored.
The code is more readable and importable from any other file.<commit_after>
|
max_db_int = 4294967295
UNKNOWN_HTTP = max_db_int - 1
UNKNOWN_NON_HTTP = max_db_int - 3
BOT_TIMEOUT = 0.25
CLIENT_TIMEOUT = 2
|
Add file for constants and magic numbers
This is the file where things like default timeouts, statuses to write
in a database and others will be stored.
The code is more readable and importable from any other file.max_db_int = 4294967295
UNKNOWN_HTTP = max_db_int - 1
UNKNOWN_NON_HTTP = max_db_int - 3
BOT_TIMEOUT = 0.25
CLIENT_TIMEOUT = 2
|
<commit_before><commit_msg>Add file for constants and magic numbers
This is the file where things like default timeouts, statuses to write
in a database and others will be stored.
The code is more readable and importable from any other file.<commit_after>max_db_int = 4294967295
UNKNOWN_HTTP = max_db_int - 1
UNKNOWN_NON_HTTP = max_db_int - 3
BOT_TIMEOUT = 0.25
CLIENT_TIMEOUT = 2
|
|
42415eb9bab1d4e5ac5b4c5bafe5234d0a617367
|
migrations/versions/770_set_submitted_at_for_old_brief_responses.py
|
migrations/versions/770_set_submitted_at_for_old_brief_responses.py
|
"""set submitted at for old brief responses
Revision ID: 770
Revises: 760
Create Date: 2016-10-25 11:10:53.245586
"""
# revision identifiers, used by Alembic.
revision = '770'
down_revision = '760'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("""
UPDATE brief_responses
SET submitted_at = created_at
WHERE submitted_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
Add migration that sets `submitted_at` for old brief responses
|
Add migration that sets `submitted_at` for old brief responses
For any brief response that does not have a `submitted_at` time,
we add one based on the `created_at` time. As we will now be looking
at `submitted_at` rather than `created_at` to indicate a submitted
brief response, we need to migrate all older brief responses to have
a `submitted_at` time.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add migration that sets `submitted_at` for old brief responses
For any brief response that does not have a `submitted_at` time,
we add one based on the `created_at` time. As we will now be looking
at `submitted_at` rather than `created_at` to indicate a submitted
brief response, we need to migrate all older brief responses to have
a `submitted_at` time.
|
"""set submitted at for old brief responses
Revision ID: 770
Revises: 760
Create Date: 2016-10-25 11:10:53.245586
"""
# revision identifiers, used by Alembic.
revision = '770'
down_revision = '760'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("""
UPDATE brief_responses
SET submitted_at = created_at
WHERE submitted_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
<commit_before><commit_msg>Add migration that sets `submitted_at` for old brief responses
For any brief response that does not have a `submitted_at` time,
we add one based on the `created_at` time. As we will now be looking
at `submitted_at` rather than `created_at` to indicate a submitted
brief response, we need to migrate all older brief responses to have
a `submitted_at` time.<commit_after>
|
"""set submitted at for old brief responses
Revision ID: 770
Revises: 760
Create Date: 2016-10-25 11:10:53.245586
"""
# revision identifiers, used by Alembic.
revision = '770'
down_revision = '760'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("""
UPDATE brief_responses
SET submitted_at = created_at
WHERE submitted_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
Add migration that sets `submitted_at` for old brief responses
For any brief response that does not have a `submitted_at` time,
we add one based on the `created_at` time. As we will now be looking
at `submitted_at` rather than `created_at` to indicate a submitted
brief response, we need to migrate all older brief responses to have
a `submitted_at` time."""set submitted at for old brief responses
Revision ID: 770
Revises: 760
Create Date: 2016-10-25 11:10:53.245586
"""
# revision identifiers, used by Alembic.
revision = '770'
down_revision = '760'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("""
UPDATE brief_responses
SET submitted_at = created_at
WHERE submitted_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
<commit_before><commit_msg>Add migration that sets `submitted_at` for old brief responses
For any brief response that does not have a `submitted_at` time,
we add one based on the `created_at` time. As we will now be looking
at `submitted_at` rather than `created_at` to indicate a submitted
brief response, we need to migrate all older brief responses to have
a `submitted_at` time.<commit_after>"""set submitted at for old brief responses
Revision ID: 770
Revises: 760
Create Date: 2016-10-25 11:10:53.245586
"""
# revision identifiers, used by Alembic.
revision = '770'
down_revision = '760'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("""
UPDATE brief_responses
SET submitted_at = created_at
WHERE submitted_at IS NULL
""")
def downgrade():
# No downgrade
pass
|
|
efdb5b328ccf2597ab05ff2c225c36793199ec50
|
webapp/request_api.py
|
webapp/request_api.py
|
from django.db import connection
import requests
def get(http_url, user_id):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.get(http_url, headers=headers)
return r.json()
def post(http_url, user_id, json_data):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.post(http_url, headers=headers, data=json_data)
return r.json()
def __auth_token(user_id):
cursor = connection.cursor()
cursor.execute('SELECT * FROM authtoken_token WHERE user_id = %s',
str(user_id))
return 'Token ' + cursor.fetchone()[0]
|
Implement libray to use API
|
Implement libray to use API
|
Python
|
apache-2.0
|
deka108/meas_deka,deka108/mathqa-server,deka108/meas_deka,deka108/mathqa-server,deka108/meas_deka,deka108/meas_deka,deka108/mathqa-server,deka108/mathqa-server
|
Implement libray to use API
|
from django.db import connection
import requests
def get(http_url, user_id):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.get(http_url, headers=headers)
return r.json()
def post(http_url, user_id, json_data):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.post(http_url, headers=headers, data=json_data)
return r.json()
def __auth_token(user_id):
cursor = connection.cursor()
cursor.execute('SELECT * FROM authtoken_token WHERE user_id = %s',
str(user_id))
return 'Token ' + cursor.fetchone()[0]
|
<commit_before><commit_msg>Implement libray to use API<commit_after>
|
from django.db import connection
import requests
def get(http_url, user_id):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.get(http_url, headers=headers)
return r.json()
def post(http_url, user_id, json_data):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.post(http_url, headers=headers, data=json_data)
return r.json()
def __auth_token(user_id):
cursor = connection.cursor()
cursor.execute('SELECT * FROM authtoken_token WHERE user_id = %s',
str(user_id))
return 'Token ' + cursor.fetchone()[0]
|
Implement libray to use APIfrom django.db import connection
import requests
def get(http_url, user_id):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.get(http_url, headers=headers)
return r.json()
def post(http_url, user_id, json_data):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.post(http_url, headers=headers, data=json_data)
return r.json()
def __auth_token(user_id):
cursor = connection.cursor()
cursor.execute('SELECT * FROM authtoken_token WHERE user_id = %s',
str(user_id))
return 'Token ' + cursor.fetchone()[0]
|
<commit_before><commit_msg>Implement libray to use API<commit_after>from django.db import connection
import requests
def get(http_url, user_id):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.get(http_url, headers=headers)
return r.json()
def post(http_url, user_id, json_data):
headers = {
'Authorization': __auth_token(user_id)
}
r = requests.post(http_url, headers=headers, data=json_data)
return r.json()
def __auth_token(user_id):
cursor = connection.cursor()
cursor.execute('SELECT * FROM authtoken_token WHERE user_id = %s',
str(user_id))
return 'Token ' + cursor.fetchone()[0]
|
|
11f92aaee9c8f9f902ddc56203dfa2e7af94ea63
|
tuneme/migrations/0002_add_language_relation.py
|
tuneme/migrations/0002_add_language_relation.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_language_relation(apps, schema_editor):
from molo.core.models import SiteLanguage, LanguageRelation
from wagtail.wagtailcore.models import Page
if not (SiteLanguage.objects.filter(is_main_language=True)).exists():
from molo.core.models import LanguagePage
current_language = LanguagePage.objects.live().first()
if current_language:
main_lang = SiteLanguage.objects.create(
locale=current_language.code)
for p in Page.objects.all().descendant_of(current_language):
LanguageRelation.objects.create(page=p, language=main_lang)
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0001_initial'),
('polls', '0025_create_polls_index_pages'),
('yourwords', '0006_create_your_words_index_pages'),
]
operations = [
migrations.RunPython(add_language_relation),
]
|
Add language relation to existing pages
|
Add language relation to existing pages
|
Python
|
bsd-2-clause
|
praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme
|
Add language relation to existing pages
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_language_relation(apps, schema_editor):
from molo.core.models import SiteLanguage, LanguageRelation
from wagtail.wagtailcore.models import Page
if not (SiteLanguage.objects.filter(is_main_language=True)).exists():
from molo.core.models import LanguagePage
current_language = LanguagePage.objects.live().first()
if current_language:
main_lang = SiteLanguage.objects.create(
locale=current_language.code)
for p in Page.objects.all().descendant_of(current_language):
LanguageRelation.objects.create(page=p, language=main_lang)
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0001_initial'),
('polls', '0025_create_polls_index_pages'),
('yourwords', '0006_create_your_words_index_pages'),
]
operations = [
migrations.RunPython(add_language_relation),
]
|
<commit_before><commit_msg>Add language relation to existing pages<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_language_relation(apps, schema_editor):
from molo.core.models import SiteLanguage, LanguageRelation
from wagtail.wagtailcore.models import Page
if not (SiteLanguage.objects.filter(is_main_language=True)).exists():
from molo.core.models import LanguagePage
current_language = LanguagePage.objects.live().first()
if current_language:
main_lang = SiteLanguage.objects.create(
locale=current_language.code)
for p in Page.objects.all().descendant_of(current_language):
LanguageRelation.objects.create(page=p, language=main_lang)
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0001_initial'),
('polls', '0025_create_polls_index_pages'),
('yourwords', '0006_create_your_words_index_pages'),
]
operations = [
migrations.RunPython(add_language_relation),
]
|
Add language relation to existing pages# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_language_relation(apps, schema_editor):
from molo.core.models import SiteLanguage, LanguageRelation
from wagtail.wagtailcore.models import Page
if not (SiteLanguage.objects.filter(is_main_language=True)).exists():
from molo.core.models import LanguagePage
current_language = LanguagePage.objects.live().first()
if current_language:
main_lang = SiteLanguage.objects.create(
locale=current_language.code)
for p in Page.objects.all().descendant_of(current_language):
LanguageRelation.objects.create(page=p, language=main_lang)
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0001_initial'),
('polls', '0025_create_polls_index_pages'),
('yourwords', '0006_create_your_words_index_pages'),
]
operations = [
migrations.RunPython(add_language_relation),
]
|
<commit_before><commit_msg>Add language relation to existing pages<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_language_relation(apps, schema_editor):
from molo.core.models import SiteLanguage, LanguageRelation
from wagtail.wagtailcore.models import Page
if not (SiteLanguage.objects.filter(is_main_language=True)).exists():
from molo.core.models import LanguagePage
current_language = LanguagePage.objects.live().first()
if current_language:
main_lang = SiteLanguage.objects.create(
locale=current_language.code)
for p in Page.objects.all().descendant_of(current_language):
LanguageRelation.objects.create(page=p, language=main_lang)
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0001_initial'),
('polls', '0025_create_polls_index_pages'),
('yourwords', '0006_create_your_words_index_pages'),
]
operations = [
migrations.RunPython(add_language_relation),
]
|
|
d3a4153edcc25b358cfe3b0f0b315cbf064266bd
|
lintcode/Medium/105_Copy_List_with_Random_Pointer.py
|
lintcode/Medium/105_Copy_List_with_Random_Pointer.py
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
# Solution 1
# dummy = RandomListNode(0)
# tmp = dummy
# tmp2 = head
# hashmap = {}
# while (tmp2):
# tmp.next = RandomListNode(tmp2.label)
# hashmap[tmp2.label] = tmp.next
# tmp = tmp.next
# tmp2 = tmp2.next
# tmp2 = head
# tmp = dummy
# while (tmp2):
# tmp.next.random = hashmap[tmp2.random.label] if tmp2.random else None
# tmp = tmp.next
# tmp2 = tmp2.next
# return dummy.next
# Solution 2
if (not head):
return None
tmp = head
while (tmp):
rest = tmp.next
tmp.next = RandomListNode(tmp.label)
tmp.next.next = rest
tmp = tmp.next.next
tmp = head
while (tmp):
if (tmp.random):
tmp.next.random = tmp.random.next
tmp = tmp.next.next
dummy = RandomListNode(0)
tmp = dummy
while (head):
tmp.next = head.next
head = head.next.next
tmp = tmp.next
return dummy.next
|
Add solution to lintcode question 105
|
Add solution to lintcode question 105
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 105
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
# Solution 1
# dummy = RandomListNode(0)
# tmp = dummy
# tmp2 = head
# hashmap = {}
# while (tmp2):
# tmp.next = RandomListNode(tmp2.label)
# hashmap[tmp2.label] = tmp.next
# tmp = tmp.next
# tmp2 = tmp2.next
# tmp2 = head
# tmp = dummy
# while (tmp2):
# tmp.next.random = hashmap[tmp2.random.label] if tmp2.random else None
# tmp = tmp.next
# tmp2 = tmp2.next
# return dummy.next
# Solution 2
if (not head):
return None
tmp = head
while (tmp):
rest = tmp.next
tmp.next = RandomListNode(tmp.label)
tmp.next.next = rest
tmp = tmp.next.next
tmp = head
while (tmp):
if (tmp.random):
tmp.next.random = tmp.random.next
tmp = tmp.next.next
dummy = RandomListNode(0)
tmp = dummy
while (head):
tmp.next = head.next
head = head.next.next
tmp = tmp.next
return dummy.next
|
<commit_before><commit_msg>Add solution to lintcode question 105<commit_after>
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
# Solution 1
# dummy = RandomListNode(0)
# tmp = dummy
# tmp2 = head
# hashmap = {}
# while (tmp2):
# tmp.next = RandomListNode(tmp2.label)
# hashmap[tmp2.label] = tmp.next
# tmp = tmp.next
# tmp2 = tmp2.next
# tmp2 = head
# tmp = dummy
# while (tmp2):
# tmp.next.random = hashmap[tmp2.random.label] if tmp2.random else None
# tmp = tmp.next
# tmp2 = tmp2.next
# return dummy.next
# Solution 2
if (not head):
return None
tmp = head
while (tmp):
rest = tmp.next
tmp.next = RandomListNode(tmp.label)
tmp.next.next = rest
tmp = tmp.next.next
tmp = head
while (tmp):
if (tmp.random):
tmp.next.random = tmp.random.next
tmp = tmp.next.next
dummy = RandomListNode(0)
tmp = dummy
while (head):
tmp.next = head.next
head = head.next.next
tmp = tmp.next
return dummy.next
|
Add solution to lintcode question 105# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
# Solution 1
# dummy = RandomListNode(0)
# tmp = dummy
# tmp2 = head
# hashmap = {}
# while (tmp2):
# tmp.next = RandomListNode(tmp2.label)
# hashmap[tmp2.label] = tmp.next
# tmp = tmp.next
# tmp2 = tmp2.next
# tmp2 = head
# tmp = dummy
# while (tmp2):
# tmp.next.random = hashmap[tmp2.random.label] if tmp2.random else None
# tmp = tmp.next
# tmp2 = tmp2.next
# return dummy.next
# Solution 2
if (not head):
return None
tmp = head
while (tmp):
rest = tmp.next
tmp.next = RandomListNode(tmp.label)
tmp.next.next = rest
tmp = tmp.next.next
tmp = head
while (tmp):
if (tmp.random):
tmp.next.random = tmp.random.next
tmp = tmp.next.next
dummy = RandomListNode(0)
tmp = dummy
while (head):
tmp.next = head.next
head = head.next.next
tmp = tmp.next
return dummy.next
|
<commit_before><commit_msg>Add solution to lintcode question 105<commit_after># Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head: A RandomListNode
# @return: A RandomListNode
def copyRandomList(self, head):
# write your code here
# Solution 1
# dummy = RandomListNode(0)
# tmp = dummy
# tmp2 = head
# hashmap = {}
# while (tmp2):
# tmp.next = RandomListNode(tmp2.label)
# hashmap[tmp2.label] = tmp.next
# tmp = tmp.next
# tmp2 = tmp2.next
# tmp2 = head
# tmp = dummy
# while (tmp2):
# tmp.next.random = hashmap[tmp2.random.label] if tmp2.random else None
# tmp = tmp.next
# tmp2 = tmp2.next
# return dummy.next
# Solution 2
if (not head):
return None
tmp = head
while (tmp):
rest = tmp.next
tmp.next = RandomListNode(tmp.label)
tmp.next.next = rest
tmp = tmp.next.next
tmp = head
while (tmp):
if (tmp.random):
tmp.next.random = tmp.random.next
tmp = tmp.next.next
dummy = RandomListNode(0)
tmp = dummy
while (head):
tmp.next = head.next
head = head.next.next
tmp = tmp.next
return dummy.next
|
|
53ac7ef266899651fa9b73b402baa35cf920a31d
|
scripts/fix_nodes_templated_from_registration.py
|
scripts/fix_nodes_templated_from_registration.py
|
# -*- coding: utf-8 -*-
import sys
import logging
from website.app import setup_django, init_app
from scripts import utils as script_utils
from django.db import transaction
setup_django()
from osf.models import AbstractNode
logger = logging.getLogger(__name__)
def do_migration():
nodes = AbstractNode.objects.filter(template_node__type='osf.registration', type='osf.registration')
# Avoid updating date_modified for migration
date_modified_field = AbstractNode._meta.get_field('date_modified')
date_modified_field.auto_now = False
for node in nodes:
logger.info('Casting Registration {} to a Node'.format(node._id))
node._is_templated_clone = True
node.recast('osf.node')
node.save()
date_modified_field.auto_now = True
logger.info('Migrated {} nodes'.format(nodes.count()))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
# Finally run the migration
main(dry=dry)
|
Add script to clean up nodes that were templated from registrations
|
Add script to clean up nodes that were templated from registrations
[OSF-7956]
|
Python
|
apache-2.0
|
TomBaxter/osf.io,caneruguz/osf.io,binoculars/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,aaxelb/osf.io,cslzchen/osf.io,aaxelb/osf.io,saradbowman/osf.io,caseyrollins/osf.io,mfraezz/osf.io,pattisdr/osf.io,felliott/osf.io,chrisseto/osf.io,leb2dg/osf.io,mattclark/osf.io,TomBaxter/osf.io,cslzchen/osf.io,icereval/osf.io,sloria/osf.io,caseyrollins/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,icereval/osf.io,leb2dg/osf.io,caneruguz/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,icereval/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,sloria/osf.io,cwisecarver/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,adlius/osf.io,baylee-d/osf.io,binoculars/osf.io,cwisecarver/osf.io,Johnetordoff/osf.io,caneruguz/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,baylee-d/osf.io,felliott/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,erinspace/osf.io,aaxelb/osf.io,caseyrollins/osf.io,adlius/osf.io,baylee-d/osf.io,felliott/osf.io,chrisseto/osf.io,laurenrevere/osf.io,mfraezz/osf.io,laurenrevere/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,laurenrevere/osf.io,mattclark/osf.io,adlius/osf.io,saradbowman/osf.io,aaxelb/osf.io,crcresearch/osf.io,mfraezz/osf.io,cwisecarver/osf.io,chennan47/osf.io,felliott/osf.io,crcresearch/osf.io,adlius/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,sloria/osf.io,erinspace/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,TomBaxter/osf.io
|
Add script to clean up nodes that were templated from registrations
[OSF-7956]
|
# -*- coding: utf-8 -*-
import sys
import logging
from website.app import setup_django, init_app
from scripts import utils as script_utils
from django.db import transaction
setup_django()
from osf.models import AbstractNode
logger = logging.getLogger(__name__)
def do_migration():
nodes = AbstractNode.objects.filter(template_node__type='osf.registration', type='osf.registration')
# Avoid updating date_modified for migration
date_modified_field = AbstractNode._meta.get_field('date_modified')
date_modified_field.auto_now = False
for node in nodes:
logger.info('Casting Registration {} to a Node'.format(node._id))
node._is_templated_clone = True
node.recast('osf.node')
node.save()
date_modified_field.auto_now = True
logger.info('Migrated {} nodes'.format(nodes.count()))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Add script to clean up nodes that were templated from registrations
[OSF-7956]<commit_after>
|
# -*- coding: utf-8 -*-
import sys
import logging
from website.app import setup_django, init_app
from scripts import utils as script_utils
from django.db import transaction
setup_django()
from osf.models import AbstractNode
logger = logging.getLogger(__name__)
def do_migration():
nodes = AbstractNode.objects.filter(template_node__type='osf.registration', type='osf.registration')
# Avoid updating date_modified for migration
date_modified_field = AbstractNode._meta.get_field('date_modified')
date_modified_field.auto_now = False
for node in nodes:
logger.info('Casting Registration {} to a Node'.format(node._id))
node._is_templated_clone = True
node.recast('osf.node')
node.save()
date_modified_field.auto_now = True
logger.info('Migrated {} nodes'.format(nodes.count()))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
# Finally run the migration
main(dry=dry)
|
Add script to clean up nodes that were templated from registrations
[OSF-7956]# -*- coding: utf-8 -*-
import sys
import logging
from website.app import setup_django, init_app
from scripts import utils as script_utils
from django.db import transaction
setup_django()
from osf.models import AbstractNode
logger = logging.getLogger(__name__)
def do_migration():
nodes = AbstractNode.objects.filter(template_node__type='osf.registration', type='osf.registration')
# Avoid updating date_modified for migration
date_modified_field = AbstractNode._meta.get_field('date_modified')
date_modified_field.auto_now = False
for node in nodes:
logger.info('Casting Registration {} to a Node'.format(node._id))
node._is_templated_clone = True
node.recast('osf.node')
node.save()
date_modified_field.auto_now = True
logger.info('Migrated {} nodes'.format(nodes.count()))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Add script to clean up nodes that were templated from registrations
[OSF-7956]<commit_after># -*- coding: utf-8 -*-
import sys
import logging
from website.app import setup_django, init_app
from scripts import utils as script_utils
from django.db import transaction
setup_django()
from osf.models import AbstractNode
logger = logging.getLogger(__name__)
def do_migration():
nodes = AbstractNode.objects.filter(template_node__type='osf.registration', type='osf.registration')
# Avoid updating date_modified for migration
date_modified_field = AbstractNode._meta.get_field('date_modified')
date_modified_field.auto_now = False
for node in nodes:
logger.info('Casting Registration {} to a Node'.format(node._id))
node._is_templated_clone = True
node.recast('osf.node')
node.save()
date_modified_field.auto_now = True
logger.info('Migrated {} nodes'.format(nodes.count()))
def main(dry=True):
init_app(routes=False)
with transaction.atomic():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
# Finally run the migration
main(dry=dry)
|
|
79aa5fb22c71590f367ee4b0e8906df2a8693c27
|
saleor/core/tests/test_anonymize.py
|
saleor/core/tests/test_anonymize.py
|
from ..anonymize import obfuscate_address, obfuscate_email, obfuscate_string
def test_obfuscate_email():
# given
email = "abc@gmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a...@example.com"
def test_obfuscate_email_example_email():
# given
email = "abc@example.com"
# when
result = obfuscate_email(email)
# then
assert result == "abc@example.com"
def test_obfuscate_email_no_at_in_email():
# given
email = "abcgmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a..........."
def test_obfuscate_string():
# given
value = "AbcDef"
# when
result = obfuscate_string(value)
# then
assert result == "A....."
def test_obfuscate_string_empty_string():
# given
value = ""
# when
result = obfuscate_string(value)
# then
assert result == value
def test_obfuscate_string_phone_string():
# given
value = "+40123123123"
# when
result = obfuscate_string(value, phone=True)
# then
assert result == "+40........."
def test_obfuscate_address(address):
# given
first_name = address.first_name
last_name = address.last_name
company_name = address.company_name
street_address_1 = address.street_address_1
phone = str(address.phone)
# when
result = obfuscate_address(address)
# then
assert result.first_name == first_name[0] + "." * (len(first_name) - 1)
assert result.last_name == last_name[0] + "." * (len(last_name) - 1)
assert result.company_name == company_name[0] + "." * (len(company_name) - 1)
assert result.street_address_1 == street_address_1[0] + "." * (
len(street_address_1) - 1
)
assert result.street_address_2 == ""
assert result.phone == phone[:3] + "." * (len(phone) - 3)
|
Add tests for anpnymize methods
|
Add tests for anpnymize methods
|
Python
|
bsd-3-clause
|
mociepka/saleor,mociepka/saleor,mociepka/saleor
|
Add tests for anpnymize methods
|
from ..anonymize import obfuscate_address, obfuscate_email, obfuscate_string
def test_obfuscate_email():
# given
email = "abc@gmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a...@example.com"
def test_obfuscate_email_example_email():
# given
email = "abc@example.com"
# when
result = obfuscate_email(email)
# then
assert result == "abc@example.com"
def test_obfuscate_email_no_at_in_email():
# given
email = "abcgmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a..........."
def test_obfuscate_string():
# given
value = "AbcDef"
# when
result = obfuscate_string(value)
# then
assert result == "A....."
def test_obfuscate_string_empty_string():
# given
value = ""
# when
result = obfuscate_string(value)
# then
assert result == value
def test_obfuscate_string_phone_string():
# given
value = "+40123123123"
# when
result = obfuscate_string(value, phone=True)
# then
assert result == "+40........."
def test_obfuscate_address(address):
# given
first_name = address.first_name
last_name = address.last_name
company_name = address.company_name
street_address_1 = address.street_address_1
phone = str(address.phone)
# when
result = obfuscate_address(address)
# then
assert result.first_name == first_name[0] + "." * (len(first_name) - 1)
assert result.last_name == last_name[0] + "." * (len(last_name) - 1)
assert result.company_name == company_name[0] + "." * (len(company_name) - 1)
assert result.street_address_1 == street_address_1[0] + "." * (
len(street_address_1) - 1
)
assert result.street_address_2 == ""
assert result.phone == phone[:3] + "." * (len(phone) - 3)
|
<commit_before><commit_msg>Add tests for anpnymize methods<commit_after>
|
from ..anonymize import obfuscate_address, obfuscate_email, obfuscate_string
def test_obfuscate_email():
# given
email = "abc@gmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a...@example.com"
def test_obfuscate_email_example_email():
# given
email = "abc@example.com"
# when
result = obfuscate_email(email)
# then
assert result == "abc@example.com"
def test_obfuscate_email_no_at_in_email():
# given
email = "abcgmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a..........."
def test_obfuscate_string():
# given
value = "AbcDef"
# when
result = obfuscate_string(value)
# then
assert result == "A....."
def test_obfuscate_string_empty_string():
# given
value = ""
# when
result = obfuscate_string(value)
# then
assert result == value
def test_obfuscate_string_phone_string():
# given
value = "+40123123123"
# when
result = obfuscate_string(value, phone=True)
# then
assert result == "+40........."
def test_obfuscate_address(address):
# given
first_name = address.first_name
last_name = address.last_name
company_name = address.company_name
street_address_1 = address.street_address_1
phone = str(address.phone)
# when
result = obfuscate_address(address)
# then
assert result.first_name == first_name[0] + "." * (len(first_name) - 1)
assert result.last_name == last_name[0] + "." * (len(last_name) - 1)
assert result.company_name == company_name[0] + "." * (len(company_name) - 1)
assert result.street_address_1 == street_address_1[0] + "." * (
len(street_address_1) - 1
)
assert result.street_address_2 == ""
assert result.phone == phone[:3] + "." * (len(phone) - 3)
|
Add tests for anpnymize methodsfrom ..anonymize import obfuscate_address, obfuscate_email, obfuscate_string
def test_obfuscate_email():
# given
email = "abc@gmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a...@example.com"
def test_obfuscate_email_example_email():
# given
email = "abc@example.com"
# when
result = obfuscate_email(email)
# then
assert result == "abc@example.com"
def test_obfuscate_email_no_at_in_email():
# given
email = "abcgmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a..........."
def test_obfuscate_string():
# given
value = "AbcDef"
# when
result = obfuscate_string(value)
# then
assert result == "A....."
def test_obfuscate_string_empty_string():
# given
value = ""
# when
result = obfuscate_string(value)
# then
assert result == value
def test_obfuscate_string_phone_string():
# given
value = "+40123123123"
# when
result = obfuscate_string(value, phone=True)
# then
assert result == "+40........."
def test_obfuscate_address(address):
# given
first_name = address.first_name
last_name = address.last_name
company_name = address.company_name
street_address_1 = address.street_address_1
phone = str(address.phone)
# when
result = obfuscate_address(address)
# then
assert result.first_name == first_name[0] + "." * (len(first_name) - 1)
assert result.last_name == last_name[0] + "." * (len(last_name) - 1)
assert result.company_name == company_name[0] + "." * (len(company_name) - 1)
assert result.street_address_1 == street_address_1[0] + "." * (
len(street_address_1) - 1
)
assert result.street_address_2 == ""
assert result.phone == phone[:3] + "." * (len(phone) - 3)
|
<commit_before><commit_msg>Add tests for anpnymize methods<commit_after>from ..anonymize import obfuscate_address, obfuscate_email, obfuscate_string
def test_obfuscate_email():
# given
email = "abc@gmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a...@example.com"
def test_obfuscate_email_example_email():
# given
email = "abc@example.com"
# when
result = obfuscate_email(email)
# then
assert result == "abc@example.com"
def test_obfuscate_email_no_at_in_email():
# given
email = "abcgmail.com"
# when
result = obfuscate_email(email)
# then
assert result == "a..........."
def test_obfuscate_string():
# given
value = "AbcDef"
# when
result = obfuscate_string(value)
# then
assert result == "A....."
def test_obfuscate_string_empty_string():
# given
value = ""
# when
result = obfuscate_string(value)
# then
assert result == value
def test_obfuscate_string_phone_string():
# given
value = "+40123123123"
# when
result = obfuscate_string(value, phone=True)
# then
assert result == "+40........."
def test_obfuscate_address(address):
# given
first_name = address.first_name
last_name = address.last_name
company_name = address.company_name
street_address_1 = address.street_address_1
phone = str(address.phone)
# when
result = obfuscate_address(address)
# then
assert result.first_name == first_name[0] + "." * (len(first_name) - 1)
assert result.last_name == last_name[0] + "." * (len(last_name) - 1)
assert result.company_name == company_name[0] + "." * (len(company_name) - 1)
assert result.street_address_1 == street_address_1[0] + "." * (
len(street_address_1) - 1
)
assert result.street_address_2 == ""
assert result.phone == phone[:3] + "." * (len(phone) - 3)
|
|
7bf86f0ef0572e86370726ff25479d051b3fbd3e
|
scripts/check_dataset_integrity.py
|
scripts/check_dataset_integrity.py
|
import os
from collections import defaultdict
import click
import dtoolcore
@click.command()
@click.argument('dataset_path')
def main(dataset_path):
uri = "disk:{}".format(dataset_path)
proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri)
overlays = defaultdict(dict)
for handle in proto_dataset._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = proto_dataset._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
print overlays.keys()
# for handle in proto_dataset._storage_broker.iter_item_handles():
# print(handle)
for overlay in overlays:
print len(overlays[overlay])
if __name__ == '__main__':
main()
|
Add script to check dataset integrity
|
Add script to check dataset integrity
|
Python
|
mit
|
JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field
|
Add script to check dataset integrity
|
import os
from collections import defaultdict
import click
import dtoolcore
@click.command()
@click.argument('dataset_path')
def main(dataset_path):
uri = "disk:{}".format(dataset_path)
proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri)
overlays = defaultdict(dict)
for handle in proto_dataset._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = proto_dataset._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
print overlays.keys()
# for handle in proto_dataset._storage_broker.iter_item_handles():
# print(handle)
for overlay in overlays:
print len(overlays[overlay])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to check dataset integrity<commit_after>
|
import os
from collections import defaultdict
import click
import dtoolcore
@click.command()
@click.argument('dataset_path')
def main(dataset_path):
uri = "disk:{}".format(dataset_path)
proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri)
overlays = defaultdict(dict)
for handle in proto_dataset._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = proto_dataset._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
print overlays.keys()
# for handle in proto_dataset._storage_broker.iter_item_handles():
# print(handle)
for overlay in overlays:
print len(overlays[overlay])
if __name__ == '__main__':
main()
|
Add script to check dataset integrityimport os
from collections import defaultdict
import click
import dtoolcore
@click.command()
@click.argument('dataset_path')
def main(dataset_path):
uri = "disk:{}".format(dataset_path)
proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri)
overlays = defaultdict(dict)
for handle in proto_dataset._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = proto_dataset._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
print overlays.keys()
# for handle in proto_dataset._storage_broker.iter_item_handles():
# print(handle)
for overlay in overlays:
print len(overlays[overlay])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to check dataset integrity<commit_after>import os
from collections import defaultdict
import click
import dtoolcore
@click.command()
@click.argument('dataset_path')
def main(dataset_path):
uri = "disk:{}".format(dataset_path)
proto_dataset = dtoolcore.ProtoDataSet.from_uri(uri)
overlays = defaultdict(dict)
for handle in proto_dataset._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = proto_dataset._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
print overlays.keys()
# for handle in proto_dataset._storage_broker.iter_item_handles():
# print(handle)
for overlay in overlays:
print len(overlays[overlay])
if __name__ == '__main__':
main()
|
|
34af85c3ed74ef40f20b22d874facbca48f89d13
|
adhocracy/migration/versions/029_add_user_badges.py
|
adhocracy/migration/versions/029_add_user_badges.py
|
from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import DateTime, Integer, Unicode
metadata = MetaData()
badge_table = Table('badge', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False))
user_badges_table = Table('user_badges', metadata,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
badge_table.create()
user_badges_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
Add migration script for user badges
|
Add migration script for user badges
|
Python
|
agpl-3.0
|
DanielNeugebauer/adhocracy,SysTheron/adhocracy,alkadis/vcv,SysTheron/adhocracy,liqd/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,phihag/adhocracy,alkadis/vcv,liqd/adhocracy,phihag/adhocracy,phihag/adhocracy,liqd/adhocracy,SysTheron/adhocracy,phihag/adhocracy,phihag/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy,liqd/adhocracy
|
Add migration script for user badges
|
from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import DateTime, Integer, Unicode
metadata = MetaData()
badge_table = Table('badge', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False))
user_badges_table = Table('user_badges', metadata,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
badge_table.create()
user_badges_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
<commit_before><commit_msg>Add migration script for user badges<commit_after>
|
from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import DateTime, Integer, Unicode
metadata = MetaData()
badge_table = Table('badge', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False))
user_badges_table = Table('user_badges', metadata,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
badge_table.create()
user_badges_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
Add migration script for user badgesfrom datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import DateTime, Integer, Unicode
metadata = MetaData()
badge_table = Table('badge', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False))
user_badges_table = Table('user_badges', metadata,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
badge_table.create()
user_badges_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
<commit_before><commit_msg>Add migration script for user badges<commit_after>from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import DateTime, Integer, Unicode
metadata = MetaData()
badge_table = Table('badge', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False))
user_badges_table = Table('user_badges', metadata,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
badge_table.create()
user_badges_table.create()
def downgrade(migrate_engine):
raise NotImplementedError()
|
|
a388af55a88602318159b8a52150a6a49e1be6f7
|
requests_oauth2/services.py
|
requests_oauth2/services.py
|
from requests_oauth2 import OAuth2
class GoogleClient(OAuth2):
site = "https://accounts.google.com"
authorization_url = "/o/oauth2/auth"
token_url = "/o/oauth2/token"
scope_sep = " "
class FacebookClient(OAuth2):
site = "https://www.facebook.com/"
authorization_url = "/dialog/oauth"
token_url = "/oauth/access_token"
scope_sep = " "
class InstagramClient(OAuth2):
site = "https://api.instagram.com"
authorization_url = "/oauth/authorize"
token_url = "/oauth/access_token"
scope_sep = " "
|
Add some shortcuts for Google, Facebook, Instagram
|
Add some shortcuts for Google, Facebook, Instagram
|
Python
|
bsd-3-clause
|
maraujop/requests-oauth2
|
Add some shortcuts for Google, Facebook, Instagram
|
from requests_oauth2 import OAuth2
class GoogleClient(OAuth2):
site = "https://accounts.google.com"
authorization_url = "/o/oauth2/auth"
token_url = "/o/oauth2/token"
scope_sep = " "
class FacebookClient(OAuth2):
site = "https://www.facebook.com/"
authorization_url = "/dialog/oauth"
token_url = "/oauth/access_token"
scope_sep = " "
class InstagramClient(OAuth2):
site = "https://api.instagram.com"
authorization_url = "/oauth/authorize"
token_url = "/oauth/access_token"
scope_sep = " "
|
<commit_before><commit_msg>Add some shortcuts for Google, Facebook, Instagram<commit_after>
|
from requests_oauth2 import OAuth2
class GoogleClient(OAuth2):
site = "https://accounts.google.com"
authorization_url = "/o/oauth2/auth"
token_url = "/o/oauth2/token"
scope_sep = " "
class FacebookClient(OAuth2):
site = "https://www.facebook.com/"
authorization_url = "/dialog/oauth"
token_url = "/oauth/access_token"
scope_sep = " "
class InstagramClient(OAuth2):
site = "https://api.instagram.com"
authorization_url = "/oauth/authorize"
token_url = "/oauth/access_token"
scope_sep = " "
|
Add some shortcuts for Google, Facebook, Instagramfrom requests_oauth2 import OAuth2
class GoogleClient(OAuth2):
site = "https://accounts.google.com"
authorization_url = "/o/oauth2/auth"
token_url = "/o/oauth2/token"
scope_sep = " "
class FacebookClient(OAuth2):
site = "https://www.facebook.com/"
authorization_url = "/dialog/oauth"
token_url = "/oauth/access_token"
scope_sep = " "
class InstagramClient(OAuth2):
site = "https://api.instagram.com"
authorization_url = "/oauth/authorize"
token_url = "/oauth/access_token"
scope_sep = " "
|
<commit_before><commit_msg>Add some shortcuts for Google, Facebook, Instagram<commit_after>from requests_oauth2 import OAuth2
class GoogleClient(OAuth2):
site = "https://accounts.google.com"
authorization_url = "/o/oauth2/auth"
token_url = "/o/oauth2/token"
scope_sep = " "
class FacebookClient(OAuth2):
site = "https://www.facebook.com/"
authorization_url = "/dialog/oauth"
token_url = "/oauth/access_token"
scope_sep = " "
class InstagramClient(OAuth2):
site = "https://api.instagram.com"
authorization_url = "/oauth/authorize"
token_url = "/oauth/access_token"
scope_sep = " "
|
|
0816398dae4fd7f35a4eba3b4a6545798703ca44
|
hacker-rank/fb-hack-2018/degenerate_triangle.py
|
hacker-rank/fb-hack-2018/degenerate_triangle.py
|
def triangleOrNot(a, b, c):
n = len(a)
result = []
for i in range(n):
side_a = a[i]
side_b = b[i]
side_c = c[i]
print(side_a,side_b,side_c)
sort_arr = qsort([side_a,side_b,side_c])
print(sort_arr)
result.append('No' if (sort_arr[0] + sort_arr[1] <= sort_arr[2]) else 'Yes')
return result
def qsort(inlist):
if inlist == []:
return []
else:
pivot = inlist[0]
lesser = qsort([x for x in inlist[1:] if x < pivot])
greater = qsort([x for x in inlist[1:] if x >= pivot])
return lesser + [pivot] + greater
print('Result : ',triangleOrNot([7,10,7],[2,3,4],[2,7,4]))
|
Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challenge
|
Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challenge
|
Python
|
mit
|
martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox,martindavid/code-sandbox
|
Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challenge
|
def triangleOrNot(a, b, c):
n = len(a)
result = []
for i in range(n):
side_a = a[i]
side_b = b[i]
side_c = c[i]
print(side_a,side_b,side_c)
sort_arr = qsort([side_a,side_b,side_c])
print(sort_arr)
result.append('No' if (sort_arr[0] + sort_arr[1] <= sort_arr[2]) else 'Yes')
return result
def qsort(inlist):
if inlist == []:
return []
else:
pivot = inlist[0]
lesser = qsort([x for x in inlist[1:] if x < pivot])
greater = qsort([x for x in inlist[1:] if x >= pivot])
return lesser + [pivot] + greater
print('Result : ',triangleOrNot([7,10,7],[2,3,4],[2,7,4]))
|
<commit_before><commit_msg>Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challenge<commit_after>
|
def triangleOrNot(a, b, c):
n = len(a)
result = []
for i in range(n):
side_a = a[i]
side_b = b[i]
side_c = c[i]
print(side_a,side_b,side_c)
sort_arr = qsort([side_a,side_b,side_c])
print(sort_arr)
result.append('No' if (sort_arr[0] + sort_arr[1] <= sort_arr[2]) else 'Yes')
return result
def qsort(inlist):
if inlist == []:
return []
else:
pivot = inlist[0]
lesser = qsort([x for x in inlist[1:] if x < pivot])
greater = qsort([x for x in inlist[1:] if x >= pivot])
return lesser + [pivot] + greater
print('Result : ',triangleOrNot([7,10,7],[2,3,4],[2,7,4]))
|
Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challengedef triangleOrNot(a, b, c):
n = len(a)
result = []
for i in range(n):
side_a = a[i]
side_b = b[i]
side_c = c[i]
print(side_a,side_b,side_c)
sort_arr = qsort([side_a,side_b,side_c])
print(sort_arr)
result.append('No' if (sort_arr[0] + sort_arr[1] <= sort_arr[2]) else 'Yes')
return result
def qsort(inlist):
if inlist == []:
return []
else:
pivot = inlist[0]
lesser = qsort([x for x in inlist[1:] if x < pivot])
greater = qsort([x for x in inlist[1:] if x >= pivot])
return lesser + [pivot] + greater
print('Result : ',triangleOrNot([7,10,7],[2,3,4],[2,7,4]))
|
<commit_before><commit_msg>Add solution for FB Melbourne 2018 Hackahton Pre-eliminary Challenge<commit_after>def triangleOrNot(a, b, c):
n = len(a)
result = []
for i in range(n):
side_a = a[i]
side_b = b[i]
side_c = c[i]
print(side_a,side_b,side_c)
sort_arr = qsort([side_a,side_b,side_c])
print(sort_arr)
result.append('No' if (sort_arr[0] + sort_arr[1] <= sort_arr[2]) else 'Yes')
return result
def qsort(inlist):
if inlist == []:
return []
else:
pivot = inlist[0]
lesser = qsort([x for x in inlist[1:] if x < pivot])
greater = qsort([x for x in inlist[1:] if x >= pivot])
return lesser + [pivot] + greater
print('Result : ',triangleOrNot([7,10,7],[2,3,4],[2,7,4]))
|
|
fae8889ae24ab5dcb8ea28af1664fb5a54fdbdfb
|
junction/schedule/migrations/0004_auto_20150917_2017.py
|
junction/schedule/migrations/0004_auto_20150917_2017.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_scheduleitemtype'),
]
operations = [
migrations.AlterField(
model_name='scheduleitem',
name='type',
field=models.CharField(default=b'Talk', max_length=20, choices=[(b'Talk', b'Talk'), (b'Lunch', b'Lunch'), (b'Break', b'Break'), (b'Workshop', b'Workshop'), (b'Poster', b'Poster'), (b'Open Space', b'Open Space'), (b'Introduction', b'Introduction')]),
preserve_default=True,
),
]
|
Add migration for session choices
|
Add migration for session choices
|
Python
|
mit
|
ChillarAnand/junction,farhaanbukhsh/junction,nava45/junction,pythonindia/junction,ChillarAnand/junction,nava45/junction,ChillarAnand/junction,pythonindia/junction,pythonindia/junction,farhaanbukhsh/junction,farhaanbukhsh/junction,pythonindia/junction,ChillarAnand/junction,nava45/junction,farhaanbukhsh/junction,nava45/junction
|
Add migration for session choices
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_scheduleitemtype'),
]
operations = [
migrations.AlterField(
model_name='scheduleitem',
name='type',
field=models.CharField(default=b'Talk', max_length=20, choices=[(b'Talk', b'Talk'), (b'Lunch', b'Lunch'), (b'Break', b'Break'), (b'Workshop', b'Workshop'), (b'Poster', b'Poster'), (b'Open Space', b'Open Space'), (b'Introduction', b'Introduction')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for session choices<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_scheduleitemtype'),
]
operations = [
migrations.AlterField(
model_name='scheduleitem',
name='type',
field=models.CharField(default=b'Talk', max_length=20, choices=[(b'Talk', b'Talk'), (b'Lunch', b'Lunch'), (b'Break', b'Break'), (b'Workshop', b'Workshop'), (b'Poster', b'Poster'), (b'Open Space', b'Open Space'), (b'Introduction', b'Introduction')]),
preserve_default=True,
),
]
|
Add migration for session choices# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_scheduleitemtype'),
]
operations = [
migrations.AlterField(
model_name='scheduleitem',
name='type',
field=models.CharField(default=b'Talk', max_length=20, choices=[(b'Talk', b'Talk'), (b'Lunch', b'Lunch'), (b'Break', b'Break'), (b'Workshop', b'Workshop'), (b'Poster', b'Poster'), (b'Open Space', b'Open Space'), (b'Introduction', b'Introduction')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for session choices<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_scheduleitemtype'),
]
operations = [
migrations.AlterField(
model_name='scheduleitem',
name='type',
field=models.CharField(default=b'Talk', max_length=20, choices=[(b'Talk', b'Talk'), (b'Lunch', b'Lunch'), (b'Break', b'Break'), (b'Workshop', b'Workshop'), (b'Poster', b'Poster'), (b'Open Space', b'Open Space'), (b'Introduction', b'Introduction')]),
preserve_default=True,
),
]
|
|
2a198df61a420e97b746d1a27a0f622be56e386c
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py
|
# -*- coding: utf-8 -*-
"""
{{cookiecutter.repo_name}}
============================
The root of :class:`{{cookiecutter.app_class_name}}` is created from the kv file.
"""
import kivy
kivy.require('{{cookiecutter.kivy_version}}')
from kivy.app import App
class {{cookiecutter.app_class_name}}(App):
"""Basic Kivy App with a user defined title."""
title = '{{cookiecutter.app_title}}'
def build(self):
return self.root
|
Implement a basic kivy application
|
Implement a basic kivy application
|
Python
|
mit
|
hackebrot/cookiedozer,hackebrot/cookiedozer
|
Implement a basic kivy application
|
# -*- coding: utf-8 -*-
"""
{{cookiecutter.repo_name}}
============================
The root of :class:`{{cookiecutter.app_class_name}}` is created from the kv file.
"""
import kivy
kivy.require('{{cookiecutter.kivy_version}}')
from kivy.app import App
class {{cookiecutter.app_class_name}}(App):
"""Basic Kivy App with a user defined title."""
title = '{{cookiecutter.app_title}}'
def build(self):
return self.root
|
<commit_before><commit_msg>Implement a basic kivy application<commit_after>
|
# -*- coding: utf-8 -*-
"""
{{cookiecutter.repo_name}}
============================
The root of :class:`{{cookiecutter.app_class_name}}` is created from the kv file.
"""
import kivy
kivy.require('{{cookiecutter.kivy_version}}')
from kivy.app import App
class {{cookiecutter.app_class_name}}(App):
"""Basic Kivy App with a user defined title."""
title = '{{cookiecutter.app_title}}'
def build(self):
return self.root
|
Implement a basic kivy application# -*- coding: utf-8 -*-
"""
{{cookiecutter.repo_name}}
============================
The root of :class:`{{cookiecutter.app_class_name}}` is created from the kv file.
"""
import kivy
kivy.require('{{cookiecutter.kivy_version}}')
from kivy.app import App
class {{cookiecutter.app_class_name}}(App):
"""Basic Kivy App with a user defined title."""
title = '{{cookiecutter.app_title}}'
def build(self):
return self.root
|
<commit_before><commit_msg>Implement a basic kivy application<commit_after># -*- coding: utf-8 -*-
"""
{{cookiecutter.repo_name}}
============================
The root of :class:`{{cookiecutter.app_class_name}}` is created from the kv file.
"""
import kivy
kivy.require('{{cookiecutter.kivy_version}}')
from kivy.app import App
class {{cookiecutter.app_class_name}}(App):
"""Basic Kivy App with a user defined title."""
title = '{{cookiecutter.app_title}}'
def build(self):
return self.root
|
|
0c79848797b5bbe89579144f16845daf3cee5da6
|
server/crashmanager/urls.py
|
server/crashmanager/urls.py
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/view/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
Remove the "view/" part for viewing signatures
|
Remove the "view/" part for viewing signatures
|
Python
|
mpl-2.0
|
lazyparser/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,lazyparser/FuzzManager,lazyparser/FuzzManager,MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager,lazyparser/FuzzManager
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/view/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)Remove the "view/" part for viewing signatures
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
<commit_before>from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/view/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)<commit_msg>Remove the "view/" part for viewing signatures<commit_after>
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/view/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)Remove the "view/" part for viewing signaturesfrom django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
<commit_before>from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/view/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)<commit_msg>Remove the "view/" part for viewing signatures<commit_after>from django.conf.urls import patterns, include, url
from rest_framework import routers
from crashmanager import views
router = routers.DefaultRouter()
router.register(r'signatures', views.BucketViewSet)
router.register(r'crashes', views.CrashEntryViewSet)
urlpatterns = patterns('',
url(r'^rest/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^$', views.index, name='index'),
url(r'^signatures/$', views.signatures, name='signatures'),
url(r'^signatures/new/$', views.newSignature, name='signew'),
url(r'^signatures/(?P<sigid>\d+)/edit/$', views.editSignature, name='sigedit'),
url(r'^signatures/(?P<sigid>\d+)/linkextbug/$', views.editSignature, name='linkextbug'),
url(r'^signatures/(?P<sigid>\d+)/$', views.viewSignature, name='sigview'),
url(r'^signatures/(?P<sigid>\d+)/delete/$', views.deleteSignature, name='sigdel'),
url(r'^crashes/$', views.crashes, name='crashes'),
url(r'^crashes/(?P<crashid>\d+)/$', views.viewCrashEntry, name='crashview'),
url(r'^rest/', include(router.urls)),
)
|
21e02b839b058fbc8069d9140278b9a2ffc7d6d6
|
tests/services/playlists_service.py
|
tests/services/playlists_service.py
|
from tests.base import ApiDBTestCase
from zou.app.models.playlist import Playlist
from zou.app.services import (
files_service,
playlists_service,
tasks_service
)
class PlaylistsServiceTestCase(ApiDBTestCase):
def setUp(self):
super(PlaylistsServiceTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project_standard()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.episode_2 = self.generate_fixture_episode("E02")
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.sequence_dict = self.sequence.serialize()
self.project_dict = self.sequence.serialize()
def generate_fixture_playlist(self):
self.playlist = Playlist.create(
name="Playlist 1",
shots={},
project_id=self.project.id,
episode_id=self.episode.id
)
Playlist.create(
name="Playlist 2",
shots={},
project_id=self.project_standard.id
)
self.playlist = Playlist.create(
name="Playlist 3",
shots={},
project_id=self.project.id,
episode_id=self.episode_2.id
)
return self.playlist.serialize()
def test_get_playlist_for_project(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_project(self.project.id)
self.assertEquals(len(playlists), 2)
self.assertTrue(
"Playlist 2" not in [playlists[0]["name"], playlists[1]["name"]]
)
def test_get_playlist_for_episode(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_episode(self.episode.id)
self.assertEquals(len(playlists), 1)
self.assertEquals(playlists[0]["name"], "Playlist 1")
def test_get_playlist(self):
pass
"""
def test_retrieve_playlist_tmp_files(self):
def test_get_playlist_with_preview_file_revisions(self):
def test_set_preview_files_for_shots(self):
def test_build_playlist_zip_file(self):
def test_build_playlist_movie_file(self):
def test_start_build_job(self):
def test_end_build_job(self):
def test_build_playlist_job(self):
def test_get_playlist_file_name(self):
def test_get_playlist_movie_file_path(self):
def test_get_playlist_zip_file_path(self):
def test_get_build_job(self):
def test_remove_build_job(self):
"""
|
Add some tests to playlists service
|
Add some tests to playlists service
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add some tests to playlists service
|
from tests.base import ApiDBTestCase
from zou.app.models.playlist import Playlist
from zou.app.services import (
files_service,
playlists_service,
tasks_service
)
class PlaylistsServiceTestCase(ApiDBTestCase):
def setUp(self):
super(PlaylistsServiceTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project_standard()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.episode_2 = self.generate_fixture_episode("E02")
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.sequence_dict = self.sequence.serialize()
self.project_dict = self.sequence.serialize()
def generate_fixture_playlist(self):
self.playlist = Playlist.create(
name="Playlist 1",
shots={},
project_id=self.project.id,
episode_id=self.episode.id
)
Playlist.create(
name="Playlist 2",
shots={},
project_id=self.project_standard.id
)
self.playlist = Playlist.create(
name="Playlist 3",
shots={},
project_id=self.project.id,
episode_id=self.episode_2.id
)
return self.playlist.serialize()
def test_get_playlist_for_project(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_project(self.project.id)
self.assertEquals(len(playlists), 2)
self.assertTrue(
"Playlist 2" not in [playlists[0]["name"], playlists[1]["name"]]
)
def test_get_playlist_for_episode(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_episode(self.episode.id)
self.assertEquals(len(playlists), 1)
self.assertEquals(playlists[0]["name"], "Playlist 1")
def test_get_playlist(self):
pass
"""
def test_retrieve_playlist_tmp_files(self):
def test_get_playlist_with_preview_file_revisions(self):
def test_set_preview_files_for_shots(self):
def test_build_playlist_zip_file(self):
def test_build_playlist_movie_file(self):
def test_start_build_job(self):
def test_end_build_job(self):
def test_build_playlist_job(self):
def test_get_playlist_file_name(self):
def test_get_playlist_movie_file_path(self):
def test_get_playlist_zip_file_path(self):
def test_get_build_job(self):
def test_remove_build_job(self):
"""
|
<commit_before><commit_msg>Add some tests to playlists service<commit_after>
|
from tests.base import ApiDBTestCase
from zou.app.models.playlist import Playlist
from zou.app.services import (
files_service,
playlists_service,
tasks_service
)
class PlaylistsServiceTestCase(ApiDBTestCase):
def setUp(self):
super(PlaylistsServiceTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project_standard()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.episode_2 = self.generate_fixture_episode("E02")
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.sequence_dict = self.sequence.serialize()
self.project_dict = self.sequence.serialize()
def generate_fixture_playlist(self):
self.playlist = Playlist.create(
name="Playlist 1",
shots={},
project_id=self.project.id,
episode_id=self.episode.id
)
Playlist.create(
name="Playlist 2",
shots={},
project_id=self.project_standard.id
)
self.playlist = Playlist.create(
name="Playlist 3",
shots={},
project_id=self.project.id,
episode_id=self.episode_2.id
)
return self.playlist.serialize()
def test_get_playlist_for_project(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_project(self.project.id)
self.assertEquals(len(playlists), 2)
self.assertTrue(
"Playlist 2" not in [playlists[0]["name"], playlists[1]["name"]]
)
def test_get_playlist_for_episode(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_episode(self.episode.id)
self.assertEquals(len(playlists), 1)
self.assertEquals(playlists[0]["name"], "Playlist 1")
def test_get_playlist(self):
pass
"""
def test_retrieve_playlist_tmp_files(self):
def test_get_playlist_with_preview_file_revisions(self):
def test_set_preview_files_for_shots(self):
def test_build_playlist_zip_file(self):
def test_build_playlist_movie_file(self):
def test_start_build_job(self):
def test_end_build_job(self):
def test_build_playlist_job(self):
def test_get_playlist_file_name(self):
def test_get_playlist_movie_file_path(self):
def test_get_playlist_zip_file_path(self):
def test_get_build_job(self):
def test_remove_build_job(self):
"""
|
Add some tests to playlists servicefrom tests.base import ApiDBTestCase
from zou.app.models.playlist import Playlist
from zou.app.services import (
files_service,
playlists_service,
tasks_service
)
class PlaylistsServiceTestCase(ApiDBTestCase):
def setUp(self):
super(PlaylistsServiceTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project_standard()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.episode_2 = self.generate_fixture_episode("E02")
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.sequence_dict = self.sequence.serialize()
self.project_dict = self.sequence.serialize()
def generate_fixture_playlist(self):
self.playlist = Playlist.create(
name="Playlist 1",
shots={},
project_id=self.project.id,
episode_id=self.episode.id
)
Playlist.create(
name="Playlist 2",
shots={},
project_id=self.project_standard.id
)
self.playlist = Playlist.create(
name="Playlist 3",
shots={},
project_id=self.project.id,
episode_id=self.episode_2.id
)
return self.playlist.serialize()
def test_get_playlist_for_project(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_project(self.project.id)
self.assertEquals(len(playlists), 2)
self.assertTrue(
"Playlist 2" not in [playlists[0]["name"], playlists[1]["name"]]
)
def test_get_playlist_for_episode(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_episode(self.episode.id)
self.assertEquals(len(playlists), 1)
self.assertEquals(playlists[0]["name"], "Playlist 1")
def test_get_playlist(self):
pass
"""
def test_retrieve_playlist_tmp_files(self):
def test_get_playlist_with_preview_file_revisions(self):
def test_set_preview_files_for_shots(self):
def test_build_playlist_zip_file(self):
def test_build_playlist_movie_file(self):
def test_start_build_job(self):
def test_end_build_job(self):
def test_build_playlist_job(self):
def test_get_playlist_file_name(self):
def test_get_playlist_movie_file_path(self):
def test_get_playlist_zip_file_path(self):
def test_get_build_job(self):
def test_remove_build_job(self):
"""
|
<commit_before><commit_msg>Add some tests to playlists service<commit_after>from tests.base import ApiDBTestCase
from zou.app.models.playlist import Playlist
from zou.app.services import (
files_service,
playlists_service,
tasks_service
)
class PlaylistsServiceTestCase(ApiDBTestCase):
def setUp(self):
super(PlaylistsServiceTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project_standard()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset()
self.episode_2 = self.generate_fixture_episode("E02")
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.sequence_dict = self.sequence.serialize()
self.project_dict = self.sequence.serialize()
def generate_fixture_playlist(self):
self.playlist = Playlist.create(
name="Playlist 1",
shots={},
project_id=self.project.id,
episode_id=self.episode.id
)
Playlist.create(
name="Playlist 2",
shots={},
project_id=self.project_standard.id
)
self.playlist = Playlist.create(
name="Playlist 3",
shots={},
project_id=self.project.id,
episode_id=self.episode_2.id
)
return self.playlist.serialize()
def test_get_playlist_for_project(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_project(self.project.id)
self.assertEquals(len(playlists), 2)
self.assertTrue(
"Playlist 2" not in [playlists[0]["name"], playlists[1]["name"]]
)
def test_get_playlist_for_episode(self):
self.generate_fixture_playlist()
playlists = playlists_service.all_playlists_for_episode(self.episode.id)
self.assertEquals(len(playlists), 1)
self.assertEquals(playlists[0]["name"], "Playlist 1")
def test_get_playlist(self):
pass
"""
def test_retrieve_playlist_tmp_files(self):
def test_get_playlist_with_preview_file_revisions(self):
def test_set_preview_files_for_shots(self):
def test_build_playlist_zip_file(self):
def test_build_playlist_movie_file(self):
def test_start_build_job(self):
def test_end_build_job(self):
def test_build_playlist_job(self):
def test_get_playlist_file_name(self):
def test_get_playlist_movie_file_path(self):
def test_get_playlist_zip_file_path(self):
def test_get_build_job(self):
def test_remove_build_job(self):
"""
|
|
6ab9f5c047c5cb0d76ed5115fa4307e436696699
|
tests/test_composite_association.py
|
tests/test_composite_association.py
|
# flake8: noqa F401,F811
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import (
connect,
create,
diagram,
element_factory,
event_manager,
)
from gaphor.UML.classes import AssociationItem, ClassItem
from gaphor.UML.classes.classespropertypages import AssociationPropertyPage
from gaphor.UML.classes.classestoolbox import composite_association_config
def test_connect_composite_association(create, diagram):
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem, UML.Association)
composite_association_config(a)
property_page = AssociationPropertyPage(a)
widget = property_page.construct()
connect(a, a.head, c1)
connect(a, a.tail, c2)
|
Add test for composite association with property pages
|
Add test for composite association with property pages
|
Python
|
lgpl-2.1
|
amolenaar/gaphor,amolenaar/gaphor
|
Add test for composite association with property pages
|
# flake8: noqa F401,F811
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import (
connect,
create,
diagram,
element_factory,
event_manager,
)
from gaphor.UML.classes import AssociationItem, ClassItem
from gaphor.UML.classes.classespropertypages import AssociationPropertyPage
from gaphor.UML.classes.classestoolbox import composite_association_config
def test_connect_composite_association(create, diagram):
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem, UML.Association)
composite_association_config(a)
property_page = AssociationPropertyPage(a)
widget = property_page.construct()
connect(a, a.head, c1)
connect(a, a.tail, c2)
|
<commit_before><commit_msg>Add test for composite association with property pages<commit_after>
|
# flake8: noqa F401,F811
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import (
connect,
create,
diagram,
element_factory,
event_manager,
)
from gaphor.UML.classes import AssociationItem, ClassItem
from gaphor.UML.classes.classespropertypages import AssociationPropertyPage
from gaphor.UML.classes.classestoolbox import composite_association_config
def test_connect_composite_association(create, diagram):
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem, UML.Association)
composite_association_config(a)
property_page = AssociationPropertyPage(a)
widget = property_page.construct()
connect(a, a.head, c1)
connect(a, a.tail, c2)
|
Add test for composite association with property pages# flake8: noqa F401,F811
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import (
connect,
create,
diagram,
element_factory,
event_manager,
)
from gaphor.UML.classes import AssociationItem, ClassItem
from gaphor.UML.classes.classespropertypages import AssociationPropertyPage
from gaphor.UML.classes.classestoolbox import composite_association_config
def test_connect_composite_association(create, diagram):
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem, UML.Association)
composite_association_config(a)
property_page = AssociationPropertyPage(a)
widget = property_page.construct()
connect(a, a.head, c1)
connect(a, a.tail, c2)
|
<commit_before><commit_msg>Add test for composite association with property pages<commit_after># flake8: noqa F401,F811
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.diagram.tests.fixtures import (
connect,
create,
diagram,
element_factory,
event_manager,
)
from gaphor.UML.classes import AssociationItem, ClassItem
from gaphor.UML.classes.classespropertypages import AssociationPropertyPage
from gaphor.UML.classes.classestoolbox import composite_association_config
def test_connect_composite_association(create, diagram):
c1 = create(ClassItem, UML.Class)
c2 = create(ClassItem, UML.Class)
a = create(AssociationItem, UML.Association)
composite_association_config(a)
property_page = AssociationPropertyPage(a)
widget = property_page.construct()
connect(a, a.head, c1)
connect(a, a.tail, c2)
|
|
7435cf43180e32fe0b17f3ca839c030d09ab09d5
|
skyfield/tests/test_topos.py
|
skyfield/tests/test_topos.py
|
from skyfield.api import load
from skyfield.positionlib import Geocentric
from skyfield.toposlib import Topos
def ts():
yield load.timescale()
def test_beneath(ts):
t = ts.utc(2018, 1, 19, 14, 37, 55)
def f(xyz): return str(Topos.beneath(Geocentric(xyz, None, t)))
assert f([1, 0, 0]) == 'Topos 00deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([0, 0, 1]) == 'Topos 90deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([1, 0, 1]) == 'Topos 45deg 00\' 00.0" N 21deg 46\' 59.4" E'
|
Add basic test of Topos.beneath()
|
Add basic test of Topos.beneath()
|
Python
|
mit
|
skyfielders/python-skyfield,skyfielders/python-skyfield
|
Add basic test of Topos.beneath()
|
from skyfield.api import load
from skyfield.positionlib import Geocentric
from skyfield.toposlib import Topos
def ts():
yield load.timescale()
def test_beneath(ts):
t = ts.utc(2018, 1, 19, 14, 37, 55)
def f(xyz): return str(Topos.beneath(Geocentric(xyz, None, t)))
assert f([1, 0, 0]) == 'Topos 00deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([0, 0, 1]) == 'Topos 90deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([1, 0, 1]) == 'Topos 45deg 00\' 00.0" N 21deg 46\' 59.4" E'
|
<commit_before><commit_msg>Add basic test of Topos.beneath()<commit_after>
|
from skyfield.api import load
from skyfield.positionlib import Geocentric
from skyfield.toposlib import Topos
def ts():
yield load.timescale()
def test_beneath(ts):
t = ts.utc(2018, 1, 19, 14, 37, 55)
def f(xyz): return str(Topos.beneath(Geocentric(xyz, None, t)))
assert f([1, 0, 0]) == 'Topos 00deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([0, 0, 1]) == 'Topos 90deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([1, 0, 1]) == 'Topos 45deg 00\' 00.0" N 21deg 46\' 59.4" E'
|
Add basic test of Topos.beneath()from skyfield.api import load
from skyfield.positionlib import Geocentric
from skyfield.toposlib import Topos
def ts():
yield load.timescale()
def test_beneath(ts):
t = ts.utc(2018, 1, 19, 14, 37, 55)
def f(xyz): return str(Topos.beneath(Geocentric(xyz, None, t)))
assert f([1, 0, 0]) == 'Topos 00deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([0, 0, 1]) == 'Topos 90deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([1, 0, 1]) == 'Topos 45deg 00\' 00.0" N 21deg 46\' 59.4" E'
|
<commit_before><commit_msg>Add basic test of Topos.beneath()<commit_after>from skyfield.api import load
from skyfield.positionlib import Geocentric
from skyfield.toposlib import Topos
def ts():
yield load.timescale()
def test_beneath(ts):
t = ts.utc(2018, 1, 19, 14, 37, 55)
def f(xyz): return str(Topos.beneath(Geocentric(xyz, None, t)))
assert f([1, 0, 0]) == 'Topos 00deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([0, 0, 1]) == 'Topos 90deg 00\' 00.0" N 21deg 46\' 59.4" E'
assert f([1, 0, 1]) == 'Topos 45deg 00\' 00.0" N 21deg 46\' 59.4" E'
|
|
9e067b8f53c8ee8afae63996e725614e5766059f
|
tests/aggregate/test_many_to_many_relationships.py
|
tests/aggregate/test_many_to_many_relationships.py
|
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
|
Add tests for many to many aggregates
|
Add tests for many to many aggregates
|
Python
|
bsd-3-clause
|
JackWink/sqlalchemy-utils,rmoorman/sqlalchemy-utils,marrybird/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils,cheungpat/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,spoqa/sqlalchemy-utils
|
Add tests for many to many aggregates
|
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
|
<commit_before><commit_msg>Add tests for many to many aggregates<commit_after>
|
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
|
Add tests for many to many aggregatesimport sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
|
<commit_before><commit_msg>Add tests for many to many aggregates<commit_after>import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestAggregatesWithManyToManyRelationships(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
user_group = sa.Table('user_group', self.Base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('groups', sa.Column(sa.Integer, default=0))
def group_count(self):
return sa.func.count('1')
groups = sa.orm.relationship(
'Group',
backref='users',
secondary=user_group
)
class Group(self.Base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.User = User
self.Group = Group
def test_assigns_aggregates_on_insert(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
assert user.group_count == 1
def test_updates_aggregates_on_delete(self):
user = self.User(
name=u'John Matrix'
)
self.session.add(user)
self.session.commit()
group = self.Group(
name=u'Some group',
users=[user]
)
self.session.add(group)
self.session.commit()
self.session.refresh(user)
user.groups = []
self.session.commit()
self.session.refresh(user)
assert user.group_count == 0
|
|
06d6f989abacb048e54585847dec37cb55064685
|
benchmark/datasets/musicbrainz/extract-random-queries.py
|
benchmark/datasets/musicbrainz/extract-random-queries.py
|
#!/usr/bin/env python
"""
Script to extract and then generate random queries for fuzzy searching.
Usage:
./extract-random-queries.py <infile> <outfile>
"""
import os
from random import choice, randint, random
import string
from subprocess import call
import sys
from tempfile import mkstemp
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
# Randomly select 1000 lines from the input file and store them temporarily.
temp_f, temp_file = mkstemp()
call(['shuf', '-n', '1000', input_file, '-o', temp_file])
# Modifiy these queries so that they have a non-zero edit distance.
with open(temp_file, 'r') as f:
with open(output_file, 'w') as out:
for line in f.readlines():
if random() > 0.75:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
if random() > 0.25:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
out.write(line)
# Remove the temporary file again.
os.unlink(temp_file)
|
Add script to generate random Levenshtein queries from a set of strings
|
Add script to generate random Levenshtein queries from a set of strings
|
Python
|
mit
|
xhochy/libfuzzymatch,xhochy/libfuzzymatch
|
Add script to generate random Levenshtein queries from a set of strings
|
#!/usr/bin/env python
"""
Script to extract and then generate random queries for fuzzy searching.
Usage:
./extract-random-queries.py <infile> <outfile>
"""
import os
from random import choice, randint, random
import string
from subprocess import call
import sys
from tempfile import mkstemp
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
# Randomly select 1000 lines from the input file and store them temporarily.
temp_f, temp_file = mkstemp()
call(['shuf', '-n', '1000', input_file, '-o', temp_file])
# Modifiy these queries so that they have a non-zero edit distance.
with open(temp_file, 'r') as f:
with open(output_file, 'w') as out:
for line in f.readlines():
if random() > 0.75:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
if random() > 0.25:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
out.write(line)
# Remove the temporary file again.
os.unlink(temp_file)
|
<commit_before><commit_msg>Add script to generate random Levenshtein queries from a set of strings<commit_after>
|
#!/usr/bin/env python
"""
Script to extract and then generate random queries for fuzzy searching.
Usage:
./extract-random-queries.py <infile> <outfile>
"""
import os
from random import choice, randint, random
import string
from subprocess import call
import sys
from tempfile import mkstemp
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
# Randomly select 1000 lines from the input file and store them temporarily.
temp_f, temp_file = mkstemp()
call(['shuf', '-n', '1000', input_file, '-o', temp_file])
# Modifiy these queries so that they have a non-zero edit distance.
with open(temp_file, 'r') as f:
with open(output_file, 'w') as out:
for line in f.readlines():
if random() > 0.75:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
if random() > 0.25:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
out.write(line)
# Remove the temporary file again.
os.unlink(temp_file)
|
Add script to generate random Levenshtein queries from a set of strings#!/usr/bin/env python
"""
Script to extract and then generate random queries for fuzzy searching.
Usage:
./extract-random-queries.py <infile> <outfile>
"""
import os
from random import choice, randint, random
import string
from subprocess import call
import sys
from tempfile import mkstemp
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
# Randomly select 1000 lines from the input file and store them temporarily.
temp_f, temp_file = mkstemp()
call(['shuf', '-n', '1000', input_file, '-o', temp_file])
# Modifiy these queries so that they have a non-zero edit distance.
with open(temp_file, 'r') as f:
with open(output_file, 'w') as out:
for line in f.readlines():
if random() > 0.75:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
if random() > 0.25:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
out.write(line)
# Remove the temporary file again.
os.unlink(temp_file)
|
<commit_before><commit_msg>Add script to generate random Levenshtein queries from a set of strings<commit_after>#!/usr/bin/env python
"""
Script to extract and then generate random queries for fuzzy searching.
Usage:
./extract-random-queries.py <infile> <outfile>
"""
import os
from random import choice, randint, random
import string
from subprocess import call
import sys
from tempfile import mkstemp
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
# Randomly select 1000 lines from the input file and store them temporarily.
temp_f, temp_file = mkstemp()
call(['shuf', '-n', '1000', input_file, '-o', temp_file])
# Modifiy these queries so that they have a non-zero edit distance.
with open(temp_file, 'r') as f:
with open(output_file, 'w') as out:
for line in f.readlines():
if random() > 0.75:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
if random() > 0.25:
pos = randint(0, len(line) - 2)
line = line[0:pos] + choice(string.ascii_lowercase) + line[pos + 1:]
out.write(line)
# Remove the temporary file again.
os.unlink(temp_file)
|
|
02568861b778728f53fbec3a2d06875add0861de
|
csunplugged/tests/general/urls/test_health_check.py
|
csunplugged/tests/general/urls/test_health_check.py
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.urls import reverse
class HealthCheckURLTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = 'en'
def test_valid_health_check_request(self):
response = self.client.get('/_ah/health')
self.assertEqual(200, response.status_code)
|
Test health check URL response
|
Test health check URL response
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Test health check URL response
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.urls import reverse
class HealthCheckURLTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = 'en'
def test_valid_health_check_request(self):
response = self.client.get('/_ah/health')
self.assertEqual(200, response.status_code)
|
<commit_before><commit_msg>Test health check URL response<commit_after>
|
from tests.BaseTestWithDB import BaseTestWithDB
from django.urls import reverse
class HealthCheckURLTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = 'en'
def test_valid_health_check_request(self):
response = self.client.get('/_ah/health')
self.assertEqual(200, response.status_code)
|
Test health check URL responsefrom tests.BaseTestWithDB import BaseTestWithDB
from django.urls import reverse
class HealthCheckURLTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = 'en'
def test_valid_health_check_request(self):
response = self.client.get('/_ah/health')
self.assertEqual(200, response.status_code)
|
<commit_before><commit_msg>Test health check URL response<commit_after>from tests.BaseTestWithDB import BaseTestWithDB
from django.urls import reverse
class HealthCheckURLTest(BaseTestWithDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = 'en'
def test_valid_health_check_request(self):
response = self.client.get('/_ah/health')
self.assertEqual(200, response.status_code)
|
|
fa77d7d83ed9150670ac374f1494b38f2338217a
|
migrations/versions/0028_add_default_permissions.py
|
migrations/versions/0028_add_default_permissions.py
|
"""empty message
Revision ID: 0028_add_default_permissions
Revises: 0027_add_service_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0028_add_default_permissions'
down_revision = '0027_add_service_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_service', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'send_messages', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_api_keys', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions")
### end Alembic commands ###
|
Add default permissions for existing services.
|
Add default permissions for existing services.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add default permissions for existing services.
|
"""empty message
Revision ID: 0028_add_default_permissions
Revises: 0027_add_service_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0028_add_default_permissions'
down_revision = '0027_add_service_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_service', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'send_messages', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_api_keys', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions")
### end Alembic commands ###
|
<commit_before><commit_msg>Add default permissions for existing services.<commit_after>
|
"""empty message
Revision ID: 0028_add_default_permissions
Revises: 0027_add_service_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0028_add_default_permissions'
down_revision = '0027_add_service_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_service', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'send_messages', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_api_keys', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions")
### end Alembic commands ###
|
Add default permissions for existing services."""empty message
Revision ID: 0028_add_default_permissions
Revises: 0027_add_service_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0028_add_default_permissions'
down_revision = '0027_add_service_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_service', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'send_messages', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_api_keys', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions")
### end Alembic commands ###
|
<commit_before><commit_msg>Add default permissions for existing services.<commit_after>"""empty message
Revision ID: 0028_add_default_permissions
Revises: 0027_add_service_permission
Create Date: 2016-02-26 10:33:20.536362
"""
# revision identifiers, used by Alembic.
revision = '0028_add_default_permissions'
down_revision = '0027_add_service_permission'
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
user_services = conn.execute("SELECT * FROM user_to_service").fetchall()
for entry in user_services:
id_ = uuid.uuid4()
created_at = datetime.now().isoformat().replace('T', ' ')
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_service', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'send_messages', '{}')").format(id_, entry[0], entry[1], created_at))
id_ = uuid.uuid4()
conn.execute((
"INSERT INTO permissions (id, user_id, service_id, permission, created_at)"
" VALUES ('{}', '{}', '{}', 'manage_api_keys', '{}')").format(id_, entry[0], entry[1], created_at))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
conn.execute("DELETE FROM permissions")
### end Alembic commands ###
|
|
8c84bbc10a08c783fed22209402ead4672754f57
|
tests/test_iati_standard.py
|
tests/test_iati_standard.py
|
from web_test_base import *
class TestIATIStandard(WebTestBase):
requests_to_load = {
'IATI Standard Homepage - no www': {
'url': 'http://iatistandard.org'
},
'IATI Standard Homepage - with www': {
'url': 'http://www.iatistandard.org'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
# Selection of header links
assert "/en/news/" in result
assert "/en/about/" in result
assert "/en/iati-standard/" in result
assert "/en/using-data/" in result
# Selection of footer links
assert "/en/contact/" in result
assert "/en/terms-and-conditions/" in result
assert "/en/privacy-policy/" in result
|
Add basic tests for expected links on main site homepage
|
Add basic tests for expected links on main site homepage
|
Python
|
mit
|
IATI/IATI-Website-Tests
|
Add basic tests for expected links on main site homepage
|
from web_test_base import *
class TestIATIStandard(WebTestBase):
requests_to_load = {
'IATI Standard Homepage - no www': {
'url': 'http://iatistandard.org'
},
'IATI Standard Homepage - with www': {
'url': 'http://www.iatistandard.org'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
# Selection of header links
assert "/en/news/" in result
assert "/en/about/" in result
assert "/en/iati-standard/" in result
assert "/en/using-data/" in result
# Selection of footer links
assert "/en/contact/" in result
assert "/en/terms-and-conditions/" in result
assert "/en/privacy-policy/" in result
|
<commit_before><commit_msg>Add basic tests for expected links on main site homepage<commit_after>
|
from web_test_base import *
class TestIATIStandard(WebTestBase):
requests_to_load = {
'IATI Standard Homepage - no www': {
'url': 'http://iatistandard.org'
},
'IATI Standard Homepage - with www': {
'url': 'http://www.iatistandard.org'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
# Selection of header links
assert "/en/news/" in result
assert "/en/about/" in result
assert "/en/iati-standard/" in result
assert "/en/using-data/" in result
# Selection of footer links
assert "/en/contact/" in result
assert "/en/terms-and-conditions/" in result
assert "/en/privacy-policy/" in result
|
Add basic tests for expected links on main site homepagefrom web_test_base import *
class TestIATIStandard(WebTestBase):
requests_to_load = {
'IATI Standard Homepage - no www': {
'url': 'http://iatistandard.org'
},
'IATI Standard Homepage - with www': {
'url': 'http://www.iatistandard.org'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
# Selection of header links
assert "/en/news/" in result
assert "/en/about/" in result
assert "/en/iati-standard/" in result
assert "/en/using-data/" in result
# Selection of footer links
assert "/en/contact/" in result
assert "/en/terms-and-conditions/" in result
assert "/en/privacy-policy/" in result
|
<commit_before><commit_msg>Add basic tests for expected links on main site homepage<commit_after>from web_test_base import *
class TestIATIStandard(WebTestBase):
requests_to_load = {
'IATI Standard Homepage - no www': {
'url': 'http://iatistandard.org'
},
'IATI Standard Homepage - with www': {
'url': 'http://www.iatistandard.org'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
# Selection of header links
assert "/en/news/" in result
assert "/en/about/" in result
assert "/en/iati-standard/" in result
assert "/en/using-data/" in result
# Selection of footer links
assert "/en/contact/" in result
assert "/en/terms-and-conditions/" in result
assert "/en/privacy-policy/" in result
|
|
7961d4f6b37c8cb40d46793b8016706340779825
|
zerver/lib/push_notifications.py
|
zerver/lib/push_notifications.py
|
from __future__ import absolute_import
from zerver.models import UserProfile, AppleDeviceToken
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from apnsclient import Session, Connection, Message, APNs
from django.conf import settings
import base64, binascii, logging
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
session = Session()
connection = session.get_connection(settings.APNS_SANDBOX, cert_file=settings.APNS_CERT_FILE)
def num_push_devices_for_user(user_profile):
return AppleDeviceToken.objects.filter(user=user_profile).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
return binascii.hexlify(base64.b64decode(data))
def hex_to_b64(data):
return base64.b64encode(binascii.unhexlify(data))
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# Sends a push notifications to all the PushClients
# Only Apple Push Notifications clients are supported at the moment
tokens = [b64_to_hex(device.token) for device in AppleDeviceToken.objects.filter(user=user)]
logging.info("Sending apple push notification to devices: %s" % (tokens,))
message = Message(tokens, alert=alert, **extra_data)
apns_client = APNs(connection)
ret = apns_client.send(message)
if not ret:
logging.warning("Failed to send push notification for clients %s" % (tokens,))
return
for token, reason in ret.failed.items():
code, errmsg = reason
logging.warning("Failed to deliver APNS notification to %s, reason: %s" % (token, errmsg))
# Check failures not related to devices.
for code, errmsg in ret.errors:
logging.warning("Unknown error when delivering APNS: %s" % (errmsg,))
if ret.needs_retry():
# TODO handle retrying by potentially scheduling a background job
# or re-queueing
logging.warning("APNS delivery needs a retry but ignoring")
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
apns_client = APNs(connection, tail_timeout=20)
for token, since in apns_client.feedback():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
AppleDeviceToken.objects.filter(token=hex_to_b64(token), last_updates__lt=since_date).delete()
logging.info("Finished checking feedback for stale tokens")
|
Add a push notification module to handle mobile client notifications
|
Add a push notification module to handle mobile client notifications
(imported from commit 3061a6e2d845226d3dce5bb262deb3a896e54f07)
|
Python
|
apache-2.0
|
deer-hope/zulip,susansls/zulip,ashwinirudrappa/zulip,wangdeshui/zulip,zachallaun/zulip,johnny9/zulip,bluesea/zulip,paxapy/zulip,vikas-parashar/zulip,deer-hope/zulip,nicholasbs/zulip,saitodisse/zulip,jeffcao/zulip,dawran6/zulip,zorojean/zulip,wdaher/zulip,Cheppers/zulip,jimmy54/zulip,sharmaeklavya2/zulip,krtkmj/zulip,PhilSk/zulip,seapasulli/zulip,fw1121/zulip,mdavid/zulip,zacps/zulip,jessedhillon/zulip,shaunstanislaus/zulip,MariaFaBella85/zulip,easyfmxu/zulip,calvinleenyc/zulip,LeeRisk/zulip,j831/zulip,bowlofstew/zulip,ufosky-server/zulip,ericzhou2008/zulip,arpitpanwar/zulip,jerryge/zulip,babbage/zulip,Galexrt/zulip,souravbadami/zulip,firstblade/zulip,dxq-git/zulip,seapasulli/zulip,shaunstanislaus/zulip,natanovia/zulip,punchagan/zulip,j831/zulip,Juanvulcano/zulip,dotcool/zulip,adnanh/zulip,ryanbackman/zulip,Suninus/zulip,vakila/zulip,andersk/zulip,calvinleenyc/zulip,calvinleenyc/zulip,christi3k/zulip,johnnygaddarr/zulip,hackerkid/zulip,hafeez3000/zulip,yocome/zulip,ApsOps/zulip,jerryge/zulip,karamcnair/zulip,synicalsyntax/zulip,kaiyuanheshang/zulip,TigorC/zulip,dhcrzf/zulip,JPJPJPOPOP/zulip,dotcool/zulip,itnihao/zulip,saitodisse/zulip,vikas-parashar/zulip,brainwane/zulip,brainwane/zulip,yuvipanda/zulip,Diptanshu8/zulip,bastianh/zulip,DazWorrall/zulip,souravbadami/zulip,bitemyapp/zulip,ericzhou2008/zulip,aliceriot/zulip,itnihao/zulip,luyifan/zulip,tommyip/zulip,voidException/zulip,umkay/zulip,isht3/zulip,xuxiao/zulip,showell/zulip,zulip/zulip,ipernet/zulip,qq1012803704/zulip,suxinde2009/zulip,praveenaki/zulip,stamhe/zulip,Batterfii/zulip,hackerkid/zulip,wweiradio/zulip,krtkmj/zulip,Galexrt/zulip,ikasumiwt/zulip,pradiptad/zulip,arpitpanwar/zulip,EasonYi/zulip,ApsOps/zulip,akuseru/zulip,avastu/zulip,Galexrt/zulip,ApsOps/zulip,stamhe/zulip,timabbott/zulip,seapasulli/zulip,luyifan/zulip,deer-hope/zulip,swinghu/zulip,ericzhou2008/zulip,ipernet/zulip,MayB/zulip,gigawhitlocks/zulip,technicalpickles/zulip,so0k/zulip,willingc/zulip,jackrzhang/zulip,jrowan/zulip,xuanhan863/zulip,KJin99/zulip,hayderimran7/zulip,hengqujushi/zulip,jonesgithub/zulip,noroot/zulip,hj3938/zulip,shrikrishnaholla/zulip,ApsOps/zulip,sharmaeklavya2/zulip,zwily/zulip,saitodisse/zulip,susansls/zulip,MariaFaBella85/zulip,DazWorrall/zulip,glovebx/zulip,bowlofstew/zulip,huangkebo/zulip,peiwei/zulip,JPJPJPOPOP/zulip,LAndreas/zulip,shubhamdhama/zulip,PaulPetring/zulip,PhilSk/zulip,rishig/zulip,hackerkid/zulip,zorojean/zulip,ashwinirudrappa/zulip,ApsOps/zulip,littledogboy/zulip,praveenaki/zulip,technicalpickles/zulip,hustlzp/zulip,developerfm/zulip,easyfmxu/zulip,mohsenSy/zulip,arpith/zulip,zwily/zulip,KingxBanana/zulip,JanzTam/zulip,verma-varsha/zulip,tdr130/zulip,amallia/zulip,AZtheAsian/zulip,Drooids/zulip,hengqujushi/zulip,amyliu345/zulip,shubhamdhama/zulip,shaunstanislaus/zulip,jonesgithub/zulip,Qgap/zulip,Vallher/zulip,jphilipsen05/zulip,jeffcao/zulip,KingxBanana/zulip,karamcnair/zulip,ufosky-server/zulip,so0k/zulip,lfranchi/zulip,noroot/zulip,aps-sids/zulip,themass/zulip,qq1012803704/zulip,aakash-cr7/zulip,firstblade/zulip,Jianchun1/zulip,hj3938/zulip,zofuthan/zulip,brainwane/zulip,ufosky-server/zulip,atomic-labs/zulip,MariaFaBella85/zulip,bitemyapp/zulip,vikas-parashar/zulip,noroot/zulip,mohsenSy/zulip,saitodisse/zulip,proliming/zulip,mansilladev/zulip,punchagan/zulip,qq1012803704/zulip,KJin99/zulip,guiquanz/zulip,dattatreya303/zulip,ryanbackman/zulip,dotcool/zulip,tiansiyuan/zulip,ashwinirudrappa/zulip,paxapy/zulip,tdr130/zulip,peguin40/zulip,AZtheAsian/zulip,grave-w-grave/zulip,yuvipanda/zulip,susansls/zulip,hengqujushi/zulip,tdr130/zulip,thomasboyt/zulip,wdaher/zulip,peguin40/zulip,umkay/zulip,ahmadassaf/zulip,zhaoweigg/zulip,blaze225/zulip,zorojean/zulip,zorojean/zulip,praveenaki/zulip,hustlzp/zulip,dwrpayne/zulip,pradiptad/zulip,moria/zulip,christi3k/zulip,adnanh/zulip,themass/zulip,xuxiao/zulip,easyfmxu/zulip,tdr130/zulip,jeffcao/zulip,jphilipsen05/zulip,bowlofstew/zulip,jerryge/zulip,levixie/zulip,sonali0901/zulip,bastianh/zulip,hafeez3000/zulip,EasonYi/zulip,natanovia/zulip,lfranchi/zulip,ufosky-server/zulip,Suninus/zulip,krtkmj/zulip,JPJPJPOPOP/zulip,SmartPeople/zulip,niftynei/zulip,johnnygaddarr/zulip,LAndreas/zulip,qq1012803704/zulip,aliceriot/zulip,zhaoweigg/zulip,xuanhan863/zulip,kokoar/zulip,thomasboyt/zulip,hafeez3000/zulip,Frouk/zulip,sharmaeklavya2/zulip,karamcnair/zulip,ahmadassaf/zulip,schatt/zulip,m1ssou/zulip,avastu/zulip,jimmy54/zulip,KingxBanana/zulip,Suninus/zulip,wangdeshui/zulip,joshisa/zulip,joshisa/zulip,itnihao/zulip,levixie/zulip,jerryge/zulip,wweiradio/zulip,KJin99/zulip,isht3/zulip,he15his/zulip,JPJPJPOPOP/zulip,zulip/zulip,sup95/zulip,andersk/zulip,paxapy/zulip,rishig/zulip,dattatreya303/zulip,sonali0901/zulip,arpith/zulip,yuvipanda/zulip,hustlzp/zulip,fw1121/zulip,SmartPeople/zulip,firstblade/zulip,akuseru/zulip,he15his/zulip,jphilipsen05/zulip,wdaher/zulip,fw1121/zulip,dnmfarrell/zulip,thomasboyt/zulip,shaunstanislaus/zulip,hustlzp/zulip,jonesgithub/zulip,susansls/zulip,RobotCaleb/zulip,souravbadami/zulip,yuvipanda/zulip,rishig/zulip,proliming/zulip,susansls/zulip,bastianh/zulip,amanharitsh123/zulip,hackerkid/zulip,xuanhan863/zulip,JanzTam/zulip,Batterfii/zulip,Vallher/zulip,dhcrzf/zulip,stamhe/zulip,alliejones/zulip,moria/zulip,LeeRisk/zulip,DazWorrall/zulip,souravbadami/zulip,jainayush975/zulip,natanovia/zulip,qq1012803704/zulip,ashwinirudrappa/zulip,thomasboyt/zulip,hustlzp/zulip,EasonYi/zulip,natanovia/zulip,ahmadassaf/zulip,zofuthan/zulip,amanharitsh123/zulip,wavelets/zulip,kou/zulip,rht/zulip,JanzTam/zulip,wdaher/zulip,willingc/zulip,sharmaeklavya2/zulip,seapasulli/zulip,yocome/zulip,amyliu345/zulip,Jianchun1/zulip,eeshangarg/zulip,zorojean/zulip,bluesea/zulip,SmartPeople/zulip,aps-sids/zulip,easyfmxu/zulip,dxq-git/zulip,ashwinirudrappa/zulip,KJin99/zulip,nicholasbs/zulip,Frouk/zulip,jainayush975/zulip,pradiptad/zulip,zofuthan/zulip,kaiyuanheshang/zulip,arpith/zulip,itnihao/zulip,dxq-git/zulip,Cheppers/zulip,ahmadassaf/zulip,hayderimran7/zulip,shrikrishnaholla/zulip,amyliu345/zulip,Juanvulcano/zulip,so0k/zulip,firstblade/zulip,peguin40/zulip,nicholasbs/zulip,saitodisse/zulip,praveenaki/zulip,technicalpickles/zulip,JanzTam/zulip,jackrzhang/zulip,luyifan/zulip,mdavid/zulip,he15his/zulip,vaidap/zulip,ericzhou2008/zulip,amallia/zulip,saitodisse/zulip,swinghu/zulip,vaidap/zulip,arpitpanwar/zulip,rishig/zulip,ipernet/zulip,reyha/zulip,johnny9/zulip,aakash-cr7/zulip,MariaFaBella85/zulip,gkotian/zulip,babbage/zulip,timabbott/zulip,verma-varsha/zulip,hj3938/zulip,PhilSk/zulip,themass/zulip,moria/zulip,mohsenSy/zulip,eastlhu/zulip,littledogboy/zulip,gkotian/zulip,KJin99/zulip,Galexrt/zulip,joshisa/zulip,zacps/zulip,suxinde2009/zulip,firstblade/zulip,johnny9/zulip,isht3/zulip,hj3938/zulip,nicholasbs/zulip,ryansnowboarder/zulip,levixie/zulip,zwily/zulip,cosmicAsymmetry/zulip,showell/zulip,moria/zulip,lfranchi/zulip,andersk/zulip,paxapy/zulip,punchagan/zulip,suxinde2009/zulip,noroot/zulip,m1ssou/zulip,jphilipsen05/zulip,bitemyapp/zulip,bastianh/zulip,synicalsyntax/zulip,esander91/zulip,eeshangarg/zulip,shrikrishnaholla/zulip,joshisa/zulip,schatt/zulip,JanzTam/zulip,willingc/zulip,suxinde2009/zulip,jimmy54/zulip,huangkebo/zulip,DazWorrall/zulip,developerfm/zulip,dattatreya303/zulip,hackerkid/zulip,esander91/zulip,MayB/zulip,Diptanshu8/zulip,udxxabp/zulip,sup95/zulip,moria/zulip,mahim97/zulip,jimmy54/zulip,MariaFaBella85/zulip,samatdav/zulip,brainwane/zulip,bluesea/zulip,SmartPeople/zulip,zachallaun/zulip,vakila/zulip,vabs22/zulip,ApsOps/zulip,vikas-parashar/zulip,praveenaki/zulip,avastu/zulip,proliming/zulip,zofuthan/zulip,ryansnowboarder/zulip,mahim97/zulip,so0k/zulip,Qgap/zulip,atomic-labs/zulip,dxq-git/zulip,babbage/zulip,wweiradio/zulip,vabs22/zulip,zwily/zulip,huangkebo/zulip,souravbadami/zulip,Juanvulcano/zulip,suxinde2009/zulip,esander91/zulip,kaiyuanheshang/zulip,grave-w-grave/zulip,kokoar/zulip,punchagan/zulip,joyhchen/zulip,yuvipanda/zulip,codeKonami/zulip,vaidap/zulip,susansls/zulip,alliejones/zulip,jerryge/zulip,Drooids/zulip,dxq-git/zulip,ikasumiwt/zulip,saitodisse/zulip,karamcnair/zulip,gigawhitlocks/zulip,hackerkid/zulip,zofuthan/zulip,bowlofstew/zulip,udxxabp/zulip,mahim97/zulip,arpitpanwar/zulip,mansilladev/zulip,dnmfarrell/zulip,aps-sids/zulip,MayB/zulip,amanharitsh123/zulip,ipernet/zulip,tiansiyuan/zulip,AZtheAsian/zulip,thomasboyt/zulip,schatt/zulip,Batterfii/zulip,jeffcao/zulip,Vallher/zulip,avastu/zulip,atomic-labs/zulip,ufosky-server/zulip,ryansnowboarder/zulip,DazWorrall/zulip,Frouk/zulip,xuxiao/zulip,shubhamdhama/zulip,jessedhillon/zulip,alliejones/zulip,dhcrzf/zulip,wangdeshui/zulip,Qgap/zulip,timabbott/zulip,guiquanz/zulip,jackrzhang/zulip,bowlofstew/zulip,hayderimran7/zulip,proliming/zulip,Suninus/zulip,sonali0901/zulip,TigorC/zulip,RobotCaleb/zulip,Diptanshu8/zulip,zwily/zulip,jerryge/zulip,sup95/zulip,alliejones/zulip,dattatreya303/zulip,ikasumiwt/zulip,synicalsyntax/zulip,niftynei/zulip,zofuthan/zulip,gkotian/zulip,PaulPetring/zulip,shaunstanislaus/zulip,AZtheAsian/zulip,niftynei/zulip,tommyip/zulip,thomasboyt/zulip,arpitpanwar/zulip,rht/zulip,rishig/zulip,DazWorrall/zulip,jessedhillon/zulip,christi3k/zulip,rht/zulip,so0k/zulip,tdr130/zulip,joyhchen/zulip,eastlhu/zulip,m1ssou/zulip,noroot/zulip,mdavid/zulip,Gabriel0402/zulip,glovebx/zulip,synicalsyntax/zulip,ikasumiwt/zulip,jonesgithub/zulip,samatdav/zulip,udxxabp/zulip,peiwei/zulip,wweiradio/zulip,LAndreas/zulip,karamcnair/zulip,kokoar/zulip,udxxabp/zulip,zorojean/zulip,armooo/zulip,rishig/zulip,gigawhitlocks/zulip,shaunstanislaus/zulip,glovebx/zulip,gigawhitlocks/zulip,akuseru/zulip,kou/zulip,ahmadassaf/zulip,mansilladev/zulip,so0k/zulip,dotcool/zulip,wweiradio/zulip,jessedhillon/zulip,isht3/zulip,easyfmxu/zulip,huangkebo/zulip,littledogboy/zulip,bssrdf/zulip,noroot/zulip,RobotCaleb/zulip,eeshangarg/zulip,fw1121/zulip,JanzTam/zulip,amyliu345/zulip,PaulPetring/zulip,wavelets/zulip,qq1012803704/zulip,zachallaun/zulip,reyha/zulip,itnihao/zulip,dhcrzf/zulip,johnnygaddarr/zulip,bssrdf/zulip,kaiyuanheshang/zulip,ufosky-server/zulip,showell/zulip,fw1121/zulip,wdaher/zulip,rishig/zulip,kou/zulip,kaiyuanheshang/zulip,shubhamdhama/zulip,easyfmxu/zulip,dotcool/zulip,xuxiao/zulip,zachallaun/zulip,developerfm/zulip,shrikrishnaholla/zulip,amallia/zulip,ryanbackman/zulip,tommyip/zulip,souravbadami/zulip,jrowan/zulip,mdavid/zulip,alliejones/zulip,kokoar/zulip,christi3k/zulip,Cheppers/zulip,seapasulli/zulip,littledogboy/zulip,brockwhittaker/zulip,dattatreya303/zulip,mdavid/zulip,SmartPeople/zulip,seapasulli/zulip,aakash-cr7/zulip,punchagan/zulip,mahim97/zulip,m1ssou/zulip,verma-varsha/zulip,yocome/zulip,babbage/zulip,verma-varsha/zulip,hayderimran7/zulip,Frouk/zulip,udxxabp/zulip,bssrdf/zulip,joyhchen/zulip,EasonYi/zulip,PaulPetring/zulip,gigawhitlocks/zulip,johnnygaddarr/zulip,vakila/zulip,EasonYi/zulip,yuvipanda/zulip,vakila/zulip,PaulPetring/zulip,aliceriot/zulip,Frouk/zulip,jeffcao/zulip,m1ssou/zulip,pradiptad/zulip,peguin40/zulip,jainayush975/zulip,proliming/zulip,jainayush975/zulip,dwrpayne/zulip,zacps/zulip,ipernet/zulip,kou/zulip,armooo/zulip,johnny9/zulip,udxxabp/zulip,tbutter/zulip,pradiptad/zulip,jimmy54/zulip,themass/zulip,babbage/zulip,hj3938/zulip,willingc/zulip,arpith/zulip,suxinde2009/zulip,vakila/zulip,mohsenSy/zulip,DazWorrall/zulip,kou/zulip,itnihao/zulip,vabs22/zulip,jimmy54/zulip,johnnygaddarr/zulip,JPJPJPOPOP/zulip,tbutter/zulip,rht/zulip,zulip/zulip,wdaher/zulip,eeshangarg/zulip,jphilipsen05/zulip,PhilSk/zulip,guiquanz/zulip,sup95/zulip,vaidap/zulip,dawran6/zulip,nicholasbs/zulip,nicholasbs/zulip,hafeez3000/zulip,showell/zulip,jackrzhang/zulip,kou/zulip,peiwei/zulip,RobotCaleb/zulip,bluesea/zulip,Galexrt/zulip,dawran6/zulip,amanharitsh123/zulip,natanovia/zulip,proliming/zulip,proliming/zulip,sonali0901/zulip,andersk/zulip,Vallher/zulip,PaulPetring/zulip,niftynei/zulip,firstblade/zulip,dwrpayne/zulip,xuanhan863/zulip,johnnygaddarr/zulip,jessedhillon/zulip,Juanvulcano/zulip,rht/zulip,armooo/zulip,fw1121/zulip,lfranchi/zulip,JPJPJPOPOP/zulip,tbutter/zulip,zachallaun/zulip,adnanh/zulip,vikas-parashar/zulip,yocome/zulip,nicholasbs/zulip,joshisa/zulip,mdavid/zulip,zulip/zulip,he15his/zulip,jphilipsen05/zulip,xuxiao/zulip,wangdeshui/zulip,zorojean/zulip,hj3938/zulip,bastianh/zulip,jainayush975/zulip,tommyip/zulip,arpitpanwar/zulip,ipernet/zulip,guiquanz/zulip,RobotCaleb/zulip,dxq-git/zulip,peiwei/zulip,praveenaki/zulip,amallia/zulip,LAndreas/zulip,voidException/zulip,Cheppers/zulip,amyliu345/zulip,eastlhu/zulip,yocome/zulip,Batterfii/zulip,voidException/zulip,wavelets/zulip,SmartPeople/zulip,Jianchun1/zulip,aps-sids/zulip,hayderimran7/zulip,eeshangarg/zulip,deer-hope/zulip,Cheppers/zulip,samatdav/zulip,zachallaun/zulip,thomasboyt/zulip,andersk/zulip,reyha/zulip,zhaoweigg/zulip,bitemyapp/zulip,hafeez3000/zulip,codeKonami/zulip,jackrzhang/zulip,jrowan/zulip,swinghu/zulip,eeshangarg/zulip,jackrzhang/zulip,timabbott/zulip,hengqujushi/zulip,stamhe/zulip,Suninus/zulip,zacps/zulip,esander91/zulip,dawran6/zulip,ahmadassaf/zulip,tommyip/zulip,ipernet/zulip,technicalpickles/zulip,brainwane/zulip,mansilladev/zulip,aliceriot/zulip,alliejones/zulip,calvinleenyc/zulip,easyfmxu/zulip,Juanvulcano/zulip,shubhamdhama/zulip,hackerkid/zulip,Vallher/zulip,avastu/zulip,tbutter/zulip,aliceriot/zulip,wavelets/zulip,hayderimran7/zulip,Gabriel0402/zulip,eeshangarg/zulip,sonali0901/zulip,tbutter/zulip,wangdeshui/zulip,samatdav/zulip,adnanh/zulip,dhcrzf/zulip,he15his/zulip,Jianchun1/zulip,PhilSk/zulip,eastlhu/zulip,umkay/zulip,RobotCaleb/zulip,ikasumiwt/zulip,jerryge/zulip,ashwinirudrappa/zulip,johnny9/zulip,bitemyapp/zulip,timabbott/zulip,eastlhu/zulip,Drooids/zulip,Qgap/zulip,amyliu345/zulip,akuseru/zulip,aps-sids/zulip,MayB/zulip,voidException/zulip,Drooids/zulip,tiansiyuan/zulip,voidException/zulip,umkay/zulip,armooo/zulip,zwily/zulip,vabs22/zulip,niftynei/zulip,KJin99/zulip,verma-varsha/zulip,joshisa/zulip,hustlzp/zulip,jeffcao/zulip,vabs22/zulip,eastlhu/zulip,LAndreas/zulip,akuseru/zulip,luyifan/zulip,EasonYi/zulip,atomic-labs/zulip,reyha/zulip,deer-hope/zulip,lfranchi/zulip,Cheppers/zulip,synicalsyntax/zulip,brockwhittaker/zulip,vakila/zulip,kokoar/zulip,Galexrt/zulip,armooo/zulip,willingc/zulip,dnmfarrell/zulip,akuseru/zulip,peguin40/zulip,developerfm/zulip,MayB/zulip,glovebx/zulip,ryansnowboarder/zulip,babbage/zulip,zulip/zulip,tbutter/zulip,zulip/zulip,dhcrzf/zulip,johnny9/zulip,grave-w-grave/zulip,peguin40/zulip,themass/zulip,shubhamdhama/zulip,dattatreya303/zulip,dwrpayne/zulip,LAndreas/zulip,hafeez3000/zulip,cosmicAsymmetry/zulip,Batterfii/zulip,ahmadassaf/zulip,jrowan/zulip,zhaoweigg/zulip,LeeRisk/zulip,Suninus/zulip,esander91/zulip,deer-hope/zulip,hj3938/zulip,qq1012803704/zulip,levixie/zulip,glovebx/zulip,yocome/zulip,vikas-parashar/zulip,AZtheAsian/zulip,grave-w-grave/zulip,MayB/zulip,cosmicAsymmetry/zulip,LeeRisk/zulip,glovebx/zulip,ikasumiwt/zulip,swinghu/zulip,ashwinirudrappa/zulip,dawran6/zulip,krtkmj/zulip,gkotian/zulip,calvinleenyc/zulip,jonesgithub/zulip,adnanh/zulip,aakash-cr7/zulip,technicalpickles/zulip,sharmaeklavya2/zulip,voidException/zulip,jonesgithub/zulip,brockwhittaker/zulip,praveenaki/zulip,deer-hope/zulip,technicalpickles/zulip,timabbott/zulip,shrikrishnaholla/zulip,Galexrt/zulip,pradiptad/zulip,gkotian/zulip,brockwhittaker/zulip,dawran6/zulip,lfranchi/zulip,mohsenSy/zulip,samatdav/zulip,reyha/zulip,umkay/zulip,natanovia/zulip,gigawhitlocks/zulip,reyha/zulip,sonali0901/zulip,schatt/zulip,Diptanshu8/zulip,paxapy/zulip,mahim97/zulip,kokoar/zulip,isht3/zulip,ryanbackman/zulip,TigorC/zulip,KJin99/zulip,jackrzhang/zulip,joyhchen/zulip,wavelets/zulip,technicalpickles/zulip,Diptanshu8/zulip,Batterfii/zulip,codeKonami/zulip,grave-w-grave/zulip,schatt/zulip,zulip/zulip,sharmaeklavya2/zulip,christi3k/zulip,Gabriel0402/zulip,dwrpayne/zulip,avastu/zulip,bastianh/zulip,Gabriel0402/zulip,punchagan/zulip,levixie/zulip,littledogboy/zulip,shaunstanislaus/zulip,bssrdf/zulip,akuseru/zulip,synicalsyntax/zulip,paxapy/zulip,JanzTam/zulip,joyhchen/zulip,zwily/zulip,Qgap/zulip,jimmy54/zulip,aakash-cr7/zulip,shrikrishnaholla/zulip,Suninus/zulip,cosmicAsymmetry/zulip,bluesea/zulip,showell/zulip,wangdeshui/zulip,LeeRisk/zulip,ryansnowboarder/zulip,ericzhou2008/zulip,gigawhitlocks/zulip,jeffcao/zulip,johnny9/zulip,he15his/zulip,luyifan/zulip,he15his/zulip,xuanhan863/zulip,shubhamdhama/zulip,ericzhou2008/zulip,bssrdf/zulip,Drooids/zulip,tbutter/zulip,MayB/zulip,brockwhittaker/zulip,suxinde2009/zulip,TigorC/zulip,dnmfarrell/zulip,codeKonami/zulip,babbage/zulip,dhcrzf/zulip,niftynei/zulip,kaiyuanheshang/zulip,timabbott/zulip,blaze225/zulip,arpitpanwar/zulip,hengqujushi/zulip,tiansiyuan/zulip,sup95/zulip,wangdeshui/zulip,karamcnair/zulip,gkotian/zulip,Juanvulcano/zulip,hengqujushi/zulip,synicalsyntax/zulip,kou/zulip,Drooids/zulip,huangkebo/zulip,aliceriot/zulip,bowlofstew/zulip,fw1121/zulip,xuanhan863/zulip,dnmfarrell/zulip,sup95/zulip,wdaher/zulip,verma-varsha/zulip,stamhe/zulip,swinghu/zulip,stamhe/zulip,Qgap/zulip,levixie/zulip,Drooids/zulip,j831/zulip,levixie/zulip,KingxBanana/zulip,andersk/zulip,hengqujushi/zulip,littledogboy/zulip,udxxabp/zulip,tiansiyuan/zulip,krtkmj/zulip,dwrpayne/zulip,bitemyapp/zulip,atomic-labs/zulip,amallia/zulip,wavelets/zulip,andersk/zulip,voidException/zulip,ryanbackman/zulip,xuanhan863/zulip,vakila/zulip,codeKonami/zulip,peiwei/zulip,willingc/zulip,Gabriel0402/zulip,bowlofstew/zulip,willingc/zulip,huangkebo/zulip,KingxBanana/zulip,wavelets/zulip,Vallher/zulip,peiwei/zulip,moria/zulip,dnmfarrell/zulip,arpith/zulip,mdavid/zulip,pradiptad/zulip,lfranchi/zulip,tdr130/zulip,MariaFaBella85/zulip,joyhchen/zulip,TigorC/zulip,hayderimran7/zulip,guiquanz/zulip,Gabriel0402/zulip,zofuthan/zulip,umkay/zulip,cosmicAsymmetry/zulip,Jianchun1/zulip,showell/zulip,grave-w-grave/zulip,LeeRisk/zulip,PaulPetring/zulip,rht/zulip,esander91/zulip,zhaoweigg/zulip,ufosky-server/zulip,LeeRisk/zulip,guiquanz/zulip,zachallaun/zulip,Qgap/zulip,peiwei/zulip,zhaoweigg/zulip,m1ssou/zulip,moria/zulip,vaidap/zulip,dnmfarrell/zulip,amallia/zulip,isht3/zulip,tommyip/zulip,zhaoweigg/zulip,zacps/zulip,natanovia/zulip,shrikrishnaholla/zulip,atomic-labs/zulip,hustlzp/zulip,ryansnowboarder/zulip,blaze225/zulip,tommyip/zulip,dwrpayne/zulip,jessedhillon/zulip,blaze225/zulip,Frouk/zulip,bitemyapp/zulip,j831/zulip,schatt/zulip,swinghu/zulip,yuvipanda/zulip,rht/zulip,noroot/zulip,vaidap/zulip,j831/zulip,bluesea/zulip,Vallher/zulip,Diptanshu8/zulip,j831/zulip,zacps/zulip,amanharitsh123/zulip,jrowan/zulip,KingxBanana/zulip,christi3k/zulip,punchagan/zulip,m1ssou/zulip,armooo/zulip,huangkebo/zulip,ryansnowboarder/zulip,kokoar/zulip,johnnygaddarr/zulip,AZtheAsian/zulip,mansilladev/zulip,alliejones/zulip,armooo/zulip,developerfm/zulip,mansilladev/zulip,Gabriel0402/zulip,itnihao/zulip,gkotian/zulip,RobotCaleb/zulip,calvinleenyc/zulip,bastianh/zulip,brockwhittaker/zulip,developerfm/zulip,developerfm/zulip,joshisa/zulip,Frouk/zulip,bluesea/zulip,tiansiyuan/zulip,esander91/zulip,avastu/zulip,blaze225/zulip,ryanbackman/zulip,luyifan/zulip,eastlhu/zulip,luyifan/zulip,mohsenSy/zulip,xuxiao/zulip,LAndreas/zulip,schatt/zulip,MariaFaBella85/zulip,atomic-labs/zulip,guiquanz/zulip,themass/zulip,PhilSk/zulip,tdr130/zulip,krtkmj/zulip,codeKonami/zulip,brainwane/zulip,dotcool/zulip,kaiyuanheshang/zulip,yocome/zulip,glovebx/zulip,karamcnair/zulip,swinghu/zulip,Cheppers/zulip,Batterfii/zulip,aps-sids/zulip,jainayush975/zulip,blaze225/zulip,mahim97/zulip,amallia/zulip,stamhe/zulip,xuxiao/zulip,krtkmj/zulip,adnanh/zulip,littledogboy/zulip,wweiradio/zulip,so0k/zulip,bssrdf/zulip,ericzhou2008/zulip,dxq-git/zulip,showell/zulip,samatdav/zulip,jessedhillon/zulip,vabs22/zulip,brainwane/zulip,Jianchun1/zulip,arpith/zulip,jonesgithub/zulip,themass/zulip,cosmicAsymmetry/zulip,seapasulli/zulip,hafeez3000/zulip,aakash-cr7/zulip,codeKonami/zulip,jrowan/zulip,ApsOps/zulip,aps-sids/zulip,mansilladev/zulip,tiansiyuan/zulip,dotcool/zulip,bssrdf/zulip,EasonYi/zulip,umkay/zulip,wweiradio/zulip,aliceriot/zulip,ikasumiwt/zulip,amanharitsh123/zulip,TigorC/zulip,adnanh/zulip,firstblade/zulip
|
Add a push notification module to handle mobile client notifications
(imported from commit 3061a6e2d845226d3dce5bb262deb3a896e54f07)
|
from __future__ import absolute_import
from zerver.models import UserProfile, AppleDeviceToken
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from apnsclient import Session, Connection, Message, APNs
from django.conf import settings
import base64, binascii, logging
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
session = Session()
connection = session.get_connection(settings.APNS_SANDBOX, cert_file=settings.APNS_CERT_FILE)
def num_push_devices_for_user(user_profile):
return AppleDeviceToken.objects.filter(user=user_profile).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
return binascii.hexlify(base64.b64decode(data))
def hex_to_b64(data):
return base64.b64encode(binascii.unhexlify(data))
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# Sends a push notifications to all the PushClients
# Only Apple Push Notifications clients are supported at the moment
tokens = [b64_to_hex(device.token) for device in AppleDeviceToken.objects.filter(user=user)]
logging.info("Sending apple push notification to devices: %s" % (tokens,))
message = Message(tokens, alert=alert, **extra_data)
apns_client = APNs(connection)
ret = apns_client.send(message)
if not ret:
logging.warning("Failed to send push notification for clients %s" % (tokens,))
return
for token, reason in ret.failed.items():
code, errmsg = reason
logging.warning("Failed to deliver APNS notification to %s, reason: %s" % (token, errmsg))
# Check failures not related to devices.
for code, errmsg in ret.errors:
logging.warning("Unknown error when delivering APNS: %s" % (errmsg,))
if ret.needs_retry():
# TODO handle retrying by potentially scheduling a background job
# or re-queueing
logging.warning("APNS delivery needs a retry but ignoring")
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
apns_client = APNs(connection, tail_timeout=20)
for token, since in apns_client.feedback():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
AppleDeviceToken.objects.filter(token=hex_to_b64(token), last_updates__lt=since_date).delete()
logging.info("Finished checking feedback for stale tokens")
|
<commit_before><commit_msg>Add a push notification module to handle mobile client notifications
(imported from commit 3061a6e2d845226d3dce5bb262deb3a896e54f07)<commit_after>
|
from __future__ import absolute_import
from zerver.models import UserProfile, AppleDeviceToken
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from apnsclient import Session, Connection, Message, APNs
from django.conf import settings
import base64, binascii, logging
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
session = Session()
connection = session.get_connection(settings.APNS_SANDBOX, cert_file=settings.APNS_CERT_FILE)
def num_push_devices_for_user(user_profile):
return AppleDeviceToken.objects.filter(user=user_profile).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
return binascii.hexlify(base64.b64decode(data))
def hex_to_b64(data):
return base64.b64encode(binascii.unhexlify(data))
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# Sends a push notifications to all the PushClients
# Only Apple Push Notifications clients are supported at the moment
tokens = [b64_to_hex(device.token) for device in AppleDeviceToken.objects.filter(user=user)]
logging.info("Sending apple push notification to devices: %s" % (tokens,))
message = Message(tokens, alert=alert, **extra_data)
apns_client = APNs(connection)
ret = apns_client.send(message)
if not ret:
logging.warning("Failed to send push notification for clients %s" % (tokens,))
return
for token, reason in ret.failed.items():
code, errmsg = reason
logging.warning("Failed to deliver APNS notification to %s, reason: %s" % (token, errmsg))
# Check failures not related to devices.
for code, errmsg in ret.errors:
logging.warning("Unknown error when delivering APNS: %s" % (errmsg,))
if ret.needs_retry():
# TODO handle retrying by potentially scheduling a background job
# or re-queueing
logging.warning("APNS delivery needs a retry but ignoring")
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
apns_client = APNs(connection, tail_timeout=20)
for token, since in apns_client.feedback():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
AppleDeviceToken.objects.filter(token=hex_to_b64(token), last_updates__lt=since_date).delete()
logging.info("Finished checking feedback for stale tokens")
|
Add a push notification module to handle mobile client notifications
(imported from commit 3061a6e2d845226d3dce5bb262deb3a896e54f07)from __future__ import absolute_import
from zerver.models import UserProfile, AppleDeviceToken
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from apnsclient import Session, Connection, Message, APNs
from django.conf import settings
import base64, binascii, logging
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
session = Session()
connection = session.get_connection(settings.APNS_SANDBOX, cert_file=settings.APNS_CERT_FILE)
def num_push_devices_for_user(user_profile):
return AppleDeviceToken.objects.filter(user=user_profile).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
return binascii.hexlify(base64.b64decode(data))
def hex_to_b64(data):
return base64.b64encode(binascii.unhexlify(data))
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# Sends a push notifications to all the PushClients
# Only Apple Push Notifications clients are supported at the moment
tokens = [b64_to_hex(device.token) for device in AppleDeviceToken.objects.filter(user=user)]
logging.info("Sending apple push notification to devices: %s" % (tokens,))
message = Message(tokens, alert=alert, **extra_data)
apns_client = APNs(connection)
ret = apns_client.send(message)
if not ret:
logging.warning("Failed to send push notification for clients %s" % (tokens,))
return
for token, reason in ret.failed.items():
code, errmsg = reason
logging.warning("Failed to deliver APNS notification to %s, reason: %s" % (token, errmsg))
# Check failures not related to devices.
for code, errmsg in ret.errors:
logging.warning("Unknown error when delivering APNS: %s" % (errmsg,))
if ret.needs_retry():
# TODO handle retrying by potentially scheduling a background job
# or re-queueing
logging.warning("APNS delivery needs a retry but ignoring")
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
apns_client = APNs(connection, tail_timeout=20)
for token, since in apns_client.feedback():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
AppleDeviceToken.objects.filter(token=hex_to_b64(token), last_updates__lt=since_date).delete()
logging.info("Finished checking feedback for stale tokens")
|
<commit_before><commit_msg>Add a push notification module to handle mobile client notifications
(imported from commit 3061a6e2d845226d3dce5bb262deb3a896e54f07)<commit_after>from __future__ import absolute_import
from zerver.models import UserProfile, AppleDeviceToken
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from apnsclient import Session, Connection, Message, APNs
from django.conf import settings
import base64, binascii, logging
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
session = Session()
connection = session.get_connection(settings.APNS_SANDBOX, cert_file=settings.APNS_CERT_FILE)
def num_push_devices_for_user(user_profile):
return AppleDeviceToken.objects.filter(user=user_profile).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
return binascii.hexlify(base64.b64decode(data))
def hex_to_b64(data):
return base64.b64encode(binascii.unhexlify(data))
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# Sends a push notifications to all the PushClients
# Only Apple Push Notifications clients are supported at the moment
tokens = [b64_to_hex(device.token) for device in AppleDeviceToken.objects.filter(user=user)]
logging.info("Sending apple push notification to devices: %s" % (tokens,))
message = Message(tokens, alert=alert, **extra_data)
apns_client = APNs(connection)
ret = apns_client.send(message)
if not ret:
logging.warning("Failed to send push notification for clients %s" % (tokens,))
return
for token, reason in ret.failed.items():
code, errmsg = reason
logging.warning("Failed to deliver APNS notification to %s, reason: %s" % (token, errmsg))
# Check failures not related to devices.
for code, errmsg in ret.errors:
logging.warning("Unknown error when delivering APNS: %s" % (errmsg,))
if ret.needs_retry():
# TODO handle retrying by potentially scheduling a background job
# or re-queueing
logging.warning("APNS delivery needs a retry but ignoring")
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
apns_client = APNs(connection, tail_timeout=20)
for token, since in apns_client.feedback():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
AppleDeviceToken.objects.filter(token=hex_to_b64(token), last_updates__lt=since_date).delete()
logging.info("Finished checking feedback for stale tokens")
|
|
4cb217e56b9c65d8411fc1e315922ac7c7c6848c
|
edx_proctoring/migrations/0010_update_backend.py
|
edx_proctoring/migrations/0010_update_backend.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-29 15:44
from __future__ import unicode_literals
import logging
from django.db import migrations
def update_backend(apps, schema_editor):
from django.conf import settings
log = logging.getLogger(__name__)
ProctoredExam = apps.get_model('edx_proctoring', 'ProctoredExam')
default_backend = settings.PROCTORING_BACKENDS.get('DEFAULT', None)
if default_backend:
for exam in ProctoredExam.objects.filter(backend__isnull=True):
exam.backend = default_backend
exam.save()
log.info("updated exam %d with backend %d", exam.id, default_backend)
else:
log.warning("settings.PROCTORING_BACKENDS['DEFAULT'] is not set. Unable to do migration.")
class Migration(migrations.Migration):
dependencies = [
('edx_proctoring', '0009_proctoredexamreviewpolicy_remove_rules'),
]
operations = [
migrations.RunPython(update_backend, migrations.RunPython.noop)
]
|
Add migration for missing backend data
|
Add migration for missing backend data
|
Python
|
agpl-3.0
|
edx/edx-proctoring,edx/edx-proctoring,edx/edx-proctoring
|
Add migration for missing backend data
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-29 15:44
from __future__ import unicode_literals
import logging
from django.db import migrations
def update_backend(apps, schema_editor):
from django.conf import settings
log = logging.getLogger(__name__)
ProctoredExam = apps.get_model('edx_proctoring', 'ProctoredExam')
default_backend = settings.PROCTORING_BACKENDS.get('DEFAULT', None)
if default_backend:
for exam in ProctoredExam.objects.filter(backend__isnull=True):
exam.backend = default_backend
exam.save()
log.info("updated exam %d with backend %d", exam.id, default_backend)
else:
log.warning("settings.PROCTORING_BACKENDS['DEFAULT'] is not set. Unable to do migration.")
class Migration(migrations.Migration):
dependencies = [
('edx_proctoring', '0009_proctoredexamreviewpolicy_remove_rules'),
]
operations = [
migrations.RunPython(update_backend, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration for missing backend data<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-29 15:44
from __future__ import unicode_literals
import logging
from django.db import migrations
def update_backend(apps, schema_editor):
from django.conf import settings
log = logging.getLogger(__name__)
ProctoredExam = apps.get_model('edx_proctoring', 'ProctoredExam')
default_backend = settings.PROCTORING_BACKENDS.get('DEFAULT', None)
if default_backend:
for exam in ProctoredExam.objects.filter(backend__isnull=True):
exam.backend = default_backend
exam.save()
log.info("updated exam %d with backend %d", exam.id, default_backend)
else:
log.warning("settings.PROCTORING_BACKENDS['DEFAULT'] is not set. Unable to do migration.")
class Migration(migrations.Migration):
dependencies = [
('edx_proctoring', '0009_proctoredexamreviewpolicy_remove_rules'),
]
operations = [
migrations.RunPython(update_backend, migrations.RunPython.noop)
]
|
Add migration for missing backend data# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-29 15:44
from __future__ import unicode_literals
import logging
from django.db import migrations
def update_backend(apps, schema_editor):
from django.conf import settings
log = logging.getLogger(__name__)
ProctoredExam = apps.get_model('edx_proctoring', 'ProctoredExam')
default_backend = settings.PROCTORING_BACKENDS.get('DEFAULT', None)
if default_backend:
for exam in ProctoredExam.objects.filter(backend__isnull=True):
exam.backend = default_backend
exam.save()
log.info("updated exam %d with backend %d", exam.id, default_backend)
else:
log.warning("settings.PROCTORING_BACKENDS['DEFAULT'] is not set. Unable to do migration.")
class Migration(migrations.Migration):
dependencies = [
('edx_proctoring', '0009_proctoredexamreviewpolicy_remove_rules'),
]
operations = [
migrations.RunPython(update_backend, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration for missing backend data<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-29 15:44
from __future__ import unicode_literals
import logging
from django.db import migrations
def update_backend(apps, schema_editor):
from django.conf import settings
log = logging.getLogger(__name__)
ProctoredExam = apps.get_model('edx_proctoring', 'ProctoredExam')
default_backend = settings.PROCTORING_BACKENDS.get('DEFAULT', None)
if default_backend:
for exam in ProctoredExam.objects.filter(backend__isnull=True):
exam.backend = default_backend
exam.save()
log.info("updated exam %d with backend %d", exam.id, default_backend)
else:
log.warning("settings.PROCTORING_BACKENDS['DEFAULT'] is not set. Unable to do migration.")
class Migration(migrations.Migration):
dependencies = [
('edx_proctoring', '0009_proctoredexamreviewpolicy_remove_rules'),
]
operations = [
migrations.RunPython(update_backend, migrations.RunPython.noop)
]
|
|
26213446116ffef0ee8d528a1a58aab918b16aa7
|
nettests/myip.py
|
nettests/myip.py
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class MyIP(httpt.HTTPTest):
inputs = ['https://check.torproject.org']
def processResponseBody(self, body):
print "FOOOO"
import re
regexp = "Your IP address appears to be: <b>(.+?)<\/b>"
match = re.search(regexp, body)
try:
self.report['myip'] = match.group(1)
except:
self.report['myip'] = None
|
Add test to obtain the clients IP address via check.tpo
|
Add test to obtain the clients IP address via check.tpo
|
Python
|
bsd-2-clause
|
lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,hackerberry/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,hackerberry/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,juga0/ooni-probe
|
Add test to obtain the clients IP address via check.tpo
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class MyIP(httpt.HTTPTest):
inputs = ['https://check.torproject.org']
def processResponseBody(self, body):
print "FOOOO"
import re
regexp = "Your IP address appears to be: <b>(.+?)<\/b>"
match = re.search(regexp, body)
try:
self.report['myip'] = match.group(1)
except:
self.report['myip'] = None
|
<commit_before><commit_msg>Add test to obtain the clients IP address via check.tpo<commit_after>
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class MyIP(httpt.HTTPTest):
inputs = ['https://check.torproject.org']
def processResponseBody(self, body):
print "FOOOO"
import re
regexp = "Your IP address appears to be: <b>(.+?)<\/b>"
match = re.search(regexp, body)
try:
self.report['myip'] = match.group(1)
except:
self.report['myip'] = None
|
Add test to obtain the clients IP address via check.tpo# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class MyIP(httpt.HTTPTest):
inputs = ['https://check.torproject.org']
def processResponseBody(self, body):
print "FOOOO"
import re
regexp = "Your IP address appears to be: <b>(.+?)<\/b>"
match = re.search(regexp, body)
try:
self.report['myip'] = match.group(1)
except:
self.report['myip'] = None
|
<commit_before><commit_msg>Add test to obtain the clients IP address via check.tpo<commit_after># -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from ooni.templates import httpt
class MyIP(httpt.HTTPTest):
inputs = ['https://check.torproject.org']
def processResponseBody(self, body):
print "FOOOO"
import re
regexp = "Your IP address appears to be: <b>(.+?)<\/b>"
match = re.search(regexp, body)
try:
self.report['myip'] = match.group(1)
except:
self.report['myip'] = None
|
|
6149a527c34b29713028ad0be4dcbe39f5ee0457
|
test/test_rpc.py
|
test/test_rpc.py
|
# -*- coding: utf8 -*-
# Low-resource message queue framework
# Do RPC
# Copyright (c) 2016 Roman Kharin <romiq.kh@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import json
from lrmq.client.sync import AgentIO
class AgentIOCaller(AgentIO):
"Caller agent"
def init_loop(self):
super().init_loop(True)
print("CALLER", self.myid, file = sys.stderr)
#self.reg_callback(self.myid, self.msg_self)
self.subscribe_check(self.myid + "/.*")
print("CALL",
self.call_aid, self.call_func, self.call_args, file = sys.stderr)
ans = self.call(self.call_aid, self.call_func, self.call_args)
print("RET", ans, file = sys.stderr)
# leave hub
self.exit_check()
self.end_loop()
def start_loop(self, aid, func, args):
self.call_aid = aid
self.call_func = func
self.call_args = args
super().start_loop()
def main():
if len(sys.argv) < 3:
print()
print("Usage:")
print("\tpython3 -m lrmq -a python test_rpc.py <aid> <func> <args>")
print("Where:")
print("\taid - agent id")
print("\tfunc - function name")
print("\targs - json formatted aguments ('[1, 2, 3]')")
else:
a = AgentIOCaller()
args = json.loads(sys.argv[3])
a.start_loop(sys.argv[1], sys.argv[2], args)
if __name__ == "__main__":
main()
|
Add test for calling rpc from command line
|
Add test for calling rpc from command line
|
Python
|
mit
|
RomanKharin/lrmq
|
Add test for calling rpc from command line
|
# -*- coding: utf8 -*-
# Low-resource message queue framework
# Do RPC
# Copyright (c) 2016 Roman Kharin <romiq.kh@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import json
from lrmq.client.sync import AgentIO
class AgentIOCaller(AgentIO):
"Caller agent"
def init_loop(self):
super().init_loop(True)
print("CALLER", self.myid, file = sys.stderr)
#self.reg_callback(self.myid, self.msg_self)
self.subscribe_check(self.myid + "/.*")
print("CALL",
self.call_aid, self.call_func, self.call_args, file = sys.stderr)
ans = self.call(self.call_aid, self.call_func, self.call_args)
print("RET", ans, file = sys.stderr)
# leave hub
self.exit_check()
self.end_loop()
def start_loop(self, aid, func, args):
self.call_aid = aid
self.call_func = func
self.call_args = args
super().start_loop()
def main():
if len(sys.argv) < 3:
print()
print("Usage:")
print("\tpython3 -m lrmq -a python test_rpc.py <aid> <func> <args>")
print("Where:")
print("\taid - agent id")
print("\tfunc - function name")
print("\targs - json formatted aguments ('[1, 2, 3]')")
else:
a = AgentIOCaller()
args = json.loads(sys.argv[3])
a.start_loop(sys.argv[1], sys.argv[2], args)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test for calling rpc from command line<commit_after>
|
# -*- coding: utf8 -*-
# Low-resource message queue framework
# Do RPC
# Copyright (c) 2016 Roman Kharin <romiq.kh@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import json
from lrmq.client.sync import AgentIO
class AgentIOCaller(AgentIO):
"Caller agent"
def init_loop(self):
super().init_loop(True)
print("CALLER", self.myid, file = sys.stderr)
#self.reg_callback(self.myid, self.msg_self)
self.subscribe_check(self.myid + "/.*")
print("CALL",
self.call_aid, self.call_func, self.call_args, file = sys.stderr)
ans = self.call(self.call_aid, self.call_func, self.call_args)
print("RET", ans, file = sys.stderr)
# leave hub
self.exit_check()
self.end_loop()
def start_loop(self, aid, func, args):
self.call_aid = aid
self.call_func = func
self.call_args = args
super().start_loop()
def main():
if len(sys.argv) < 3:
print()
print("Usage:")
print("\tpython3 -m lrmq -a python test_rpc.py <aid> <func> <args>")
print("Where:")
print("\taid - agent id")
print("\tfunc - function name")
print("\targs - json formatted aguments ('[1, 2, 3]')")
else:
a = AgentIOCaller()
args = json.loads(sys.argv[3])
a.start_loop(sys.argv[1], sys.argv[2], args)
if __name__ == "__main__":
main()
|
Add test for calling rpc from command line# -*- coding: utf8 -*-
# Low-resource message queue framework
# Do RPC
# Copyright (c) 2016 Roman Kharin <romiq.kh@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import json
from lrmq.client.sync import AgentIO
class AgentIOCaller(AgentIO):
"Caller agent"
def init_loop(self):
super().init_loop(True)
print("CALLER", self.myid, file = sys.stderr)
#self.reg_callback(self.myid, self.msg_self)
self.subscribe_check(self.myid + "/.*")
print("CALL",
self.call_aid, self.call_func, self.call_args, file = sys.stderr)
ans = self.call(self.call_aid, self.call_func, self.call_args)
print("RET", ans, file = sys.stderr)
# leave hub
self.exit_check()
self.end_loop()
def start_loop(self, aid, func, args):
self.call_aid = aid
self.call_func = func
self.call_args = args
super().start_loop()
def main():
if len(sys.argv) < 3:
print()
print("Usage:")
print("\tpython3 -m lrmq -a python test_rpc.py <aid> <func> <args>")
print("Where:")
print("\taid - agent id")
print("\tfunc - function name")
print("\targs - json formatted aguments ('[1, 2, 3]')")
else:
a = AgentIOCaller()
args = json.loads(sys.argv[3])
a.start_loop(sys.argv[1], sys.argv[2], args)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test for calling rpc from command line<commit_after># -*- coding: utf8 -*-
# Low-resource message queue framework
# Do RPC
# Copyright (c) 2016 Roman Kharin <romiq.kh@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import json
from lrmq.client.sync import AgentIO
class AgentIOCaller(AgentIO):
"Caller agent"
def init_loop(self):
super().init_loop(True)
print("CALLER", self.myid, file = sys.stderr)
#self.reg_callback(self.myid, self.msg_self)
self.subscribe_check(self.myid + "/.*")
print("CALL",
self.call_aid, self.call_func, self.call_args, file = sys.stderr)
ans = self.call(self.call_aid, self.call_func, self.call_args)
print("RET", ans, file = sys.stderr)
# leave hub
self.exit_check()
self.end_loop()
def start_loop(self, aid, func, args):
self.call_aid = aid
self.call_func = func
self.call_args = args
super().start_loop()
def main():
if len(sys.argv) < 3:
print()
print("Usage:")
print("\tpython3 -m lrmq -a python test_rpc.py <aid> <func> <args>")
print("Where:")
print("\taid - agent id")
print("\tfunc - function name")
print("\targs - json formatted aguments ('[1, 2, 3]')")
else:
a = AgentIOCaller()
args = json.loads(sys.argv[3])
a.start_loop(sys.argv[1], sys.argv[2], args)
if __name__ == "__main__":
main()
|
|
777d01e82a63a2480d8ae2f53096d4a4c338409a
|
tests/unit/modules/test_mandrill.py
|
tests/unit/modules/test_mandrill.py
|
# -*- coding: utf-8 -*-
'''
Tests for the Mandrill execution module.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mandrill as mandrill
# Test data
TEST_SEND = {
'result': True,
'comment': '',
'out': [
{
'status': 'sent',
'_id': 'c4353540a3c123eca112bbdd704ab6',
'email': 'recv@example.com',
'reject_reason': None
}
]
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MandrillModuleTest(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mandrill.
'''
def setup_loader_modules(self):
module_globals = {
mandrill: {
'__salt__': {
'config.merge': MagicMock(return_value={
'mandrill': {
'key': '2orgk34kgk34g'
}
})
}
}
}
if mandrill.HAS_REQUESTS is False:
module_globals['sys.modules'] = {'requests': MagicMock()}
return module_globals
def test_send(self):
'''
Test the send function.
'''
mock_cmd = MagicMock(return_value=TEST_SEND)
with patch.object(mandrill, 'send', mock_cmd) as send:
self.assertEqual(
send(message={
'subject': 'Hi',
'from_email': 'test@example.com',
'to': [
{'email': 'recv@example.com', 'type': 'to'}
]
}
),
TEST_SEND
)
|
Add tests for the mandrill execution module
|
Add tests for the mandrill execution module
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests for the mandrill execution module
|
# -*- coding: utf-8 -*-
'''
Tests for the Mandrill execution module.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mandrill as mandrill
# Test data
TEST_SEND = {
'result': True,
'comment': '',
'out': [
{
'status': 'sent',
'_id': 'c4353540a3c123eca112bbdd704ab6',
'email': 'recv@example.com',
'reject_reason': None
}
]
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MandrillModuleTest(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mandrill.
'''
def setup_loader_modules(self):
module_globals = {
mandrill: {
'__salt__': {
'config.merge': MagicMock(return_value={
'mandrill': {
'key': '2orgk34kgk34g'
}
})
}
}
}
if mandrill.HAS_REQUESTS is False:
module_globals['sys.modules'] = {'requests': MagicMock()}
return module_globals
def test_send(self):
'''
Test the send function.
'''
mock_cmd = MagicMock(return_value=TEST_SEND)
with patch.object(mandrill, 'send', mock_cmd) as send:
self.assertEqual(
send(message={
'subject': 'Hi',
'from_email': 'test@example.com',
'to': [
{'email': 'recv@example.com', 'type': 'to'}
]
}
),
TEST_SEND
)
|
<commit_before><commit_msg>Add tests for the mandrill execution module<commit_after>
|
# -*- coding: utf-8 -*-
'''
Tests for the Mandrill execution module.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mandrill as mandrill
# Test data
TEST_SEND = {
'result': True,
'comment': '',
'out': [
{
'status': 'sent',
'_id': 'c4353540a3c123eca112bbdd704ab6',
'email': 'recv@example.com',
'reject_reason': None
}
]
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MandrillModuleTest(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mandrill.
'''
def setup_loader_modules(self):
module_globals = {
mandrill: {
'__salt__': {
'config.merge': MagicMock(return_value={
'mandrill': {
'key': '2orgk34kgk34g'
}
})
}
}
}
if mandrill.HAS_REQUESTS is False:
module_globals['sys.modules'] = {'requests': MagicMock()}
return module_globals
def test_send(self):
'''
Test the send function.
'''
mock_cmd = MagicMock(return_value=TEST_SEND)
with patch.object(mandrill, 'send', mock_cmd) as send:
self.assertEqual(
send(message={
'subject': 'Hi',
'from_email': 'test@example.com',
'to': [
{'email': 'recv@example.com', 'type': 'to'}
]
}
),
TEST_SEND
)
|
Add tests for the mandrill execution module# -*- coding: utf-8 -*-
'''
Tests for the Mandrill execution module.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mandrill as mandrill
# Test data
TEST_SEND = {
'result': True,
'comment': '',
'out': [
{
'status': 'sent',
'_id': 'c4353540a3c123eca112bbdd704ab6',
'email': 'recv@example.com',
'reject_reason': None
}
]
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MandrillModuleTest(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mandrill.
'''
def setup_loader_modules(self):
module_globals = {
mandrill: {
'__salt__': {
'config.merge': MagicMock(return_value={
'mandrill': {
'key': '2orgk34kgk34g'
}
})
}
}
}
if mandrill.HAS_REQUESTS is False:
module_globals['sys.modules'] = {'requests': MagicMock()}
return module_globals
def test_send(self):
'''
Test the send function.
'''
mock_cmd = MagicMock(return_value=TEST_SEND)
with patch.object(mandrill, 'send', mock_cmd) as send:
self.assertEqual(
send(message={
'subject': 'Hi',
'from_email': 'test@example.com',
'to': [
{'email': 'recv@example.com', 'type': 'to'}
]
}
),
TEST_SEND
)
|
<commit_before><commit_msg>Add tests for the mandrill execution module<commit_after># -*- coding: utf-8 -*-
'''
Tests for the Mandrill execution module.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mandrill as mandrill
# Test data
TEST_SEND = {
'result': True,
'comment': '',
'out': [
{
'status': 'sent',
'_id': 'c4353540a3c123eca112bbdd704ab6',
'email': 'recv@example.com',
'reject_reason': None
}
]
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MandrillModuleTest(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mandrill.
'''
def setup_loader_modules(self):
module_globals = {
mandrill: {
'__salt__': {
'config.merge': MagicMock(return_value={
'mandrill': {
'key': '2orgk34kgk34g'
}
})
}
}
}
if mandrill.HAS_REQUESTS is False:
module_globals['sys.modules'] = {'requests': MagicMock()}
return module_globals
def test_send(self):
'''
Test the send function.
'''
mock_cmd = MagicMock(return_value=TEST_SEND)
with patch.object(mandrill, 'send', mock_cmd) as send:
self.assertEqual(
send(message={
'subject': 'Hi',
'from_email': 'test@example.com',
'to': [
{'email': 'recv@example.com', 'type': 'to'}
]
}
),
TEST_SEND
)
|
|
2b02519f521f9e3cf177ed9970e9958a0eeb43c3
|
tests/framework/test_setup.py
|
tests/framework/test_setup.py
|
import pytest
from pymt.framework.bmi_setup import _parse_author_info
@pytest.mark.parametrize("key", ("author", "authors"))
def test_author(key):
assert _parse_author_info({key: "John Cleese"}) == ("John Cleese",)
def test_author_empty_list():
assert _parse_author_info({}) == ("",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_as_list(key, iter):
assert _parse_author_info({key: iter(("John Cleese",))}) == ("John Cleese",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_multiple_authors(key, iter):
assert _parse_author_info({key: iter(("John Cleese", "Eric Idle"))}) == ("John Cleese", "Eric Idle")
|
Add unit tests for _parse_author_info.
|
Add unit tests for _parse_author_info.
|
Python
|
mit
|
csdms/coupling,csdms/pymt,csdms/coupling
|
Add unit tests for _parse_author_info.
|
import pytest
from pymt.framework.bmi_setup import _parse_author_info
@pytest.mark.parametrize("key", ("author", "authors"))
def test_author(key):
assert _parse_author_info({key: "John Cleese"}) == ("John Cleese",)
def test_author_empty_list():
assert _parse_author_info({}) == ("",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_as_list(key, iter):
assert _parse_author_info({key: iter(("John Cleese",))}) == ("John Cleese",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_multiple_authors(key, iter):
assert _parse_author_info({key: iter(("John Cleese", "Eric Idle"))}) == ("John Cleese", "Eric Idle")
|
<commit_before><commit_msg>Add unit tests for _parse_author_info.<commit_after>
|
import pytest
from pymt.framework.bmi_setup import _parse_author_info
@pytest.mark.parametrize("key", ("author", "authors"))
def test_author(key):
assert _parse_author_info({key: "John Cleese"}) == ("John Cleese",)
def test_author_empty_list():
assert _parse_author_info({}) == ("",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_as_list(key, iter):
assert _parse_author_info({key: iter(("John Cleese",))}) == ("John Cleese",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_multiple_authors(key, iter):
assert _parse_author_info({key: iter(("John Cleese", "Eric Idle"))}) == ("John Cleese", "Eric Idle")
|
Add unit tests for _parse_author_info.import pytest
from pymt.framework.bmi_setup import _parse_author_info
@pytest.mark.parametrize("key", ("author", "authors"))
def test_author(key):
assert _parse_author_info({key: "John Cleese"}) == ("John Cleese",)
def test_author_empty_list():
assert _parse_author_info({}) == ("",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_as_list(key, iter):
assert _parse_author_info({key: iter(("John Cleese",))}) == ("John Cleese",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_multiple_authors(key, iter):
assert _parse_author_info({key: iter(("John Cleese", "Eric Idle"))}) == ("John Cleese", "Eric Idle")
|
<commit_before><commit_msg>Add unit tests for _parse_author_info.<commit_after>import pytest
from pymt.framework.bmi_setup import _parse_author_info
@pytest.mark.parametrize("key", ("author", "authors"))
def test_author(key):
assert _parse_author_info({key: "John Cleese"}) == ("John Cleese",)
def test_author_empty_list():
assert _parse_author_info({}) == ("",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_as_list(key, iter):
assert _parse_author_info({key: iter(("John Cleese",))}) == ("John Cleese",)
@pytest.mark.parametrize("key", ("author", "authors"))
@pytest.mark.parametrize("iter", (tuple, list))
def test_author_multiple_authors(key, iter):
assert _parse_author_info({key: iter(("John Cleese", "Eric Idle"))}) == ("John Cleese", "Eric Idle")
|
|
2b5930ad60c091bef5bb92683b73542b89ab5845
|
viewer_examples/plugins/lineprofile_rgb.py
|
viewer_examples/plugins/lineprofile_rgb.py
|
from skimage import data
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.lineprofile import LineProfile
image = data.chelsea()
viewer = ImageViewer(image)
viewer += LineProfile()
viewer.show()
|
Add viewer example for RGB line profile
|
DOC: Add viewer example for RGB line profile
|
Python
|
bsd-3-clause
|
vighneshbirodkar/scikit-image,michaelpacer/scikit-image,vighneshbirodkar/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,michaelaye/scikit-image,robintw/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,bsipocz/scikit-image,juliusbierk/scikit-image,ofgulban/scikit-image,warmspringwinds/scikit-image,chriscrosscutler/scikit-image,GaZ3ll3/scikit-image,ajaybhat/scikit-image,GaZ3ll3/scikit-image,robintw/scikit-image,youprofit/scikit-image,vighneshbirodkar/scikit-image,newville/scikit-image,ClinicalGraphics/scikit-image,michaelaye/scikit-image,almarklein/scikit-image,warmspringwinds/scikit-image,rjeli/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,newville/scikit-image,oew1v07/scikit-image,bennlich/scikit-image,Britefury/scikit-image,chintak/scikit-image,SamHames/scikit-image,ajaybhat/scikit-image,juliusbierk/scikit-image,chintak/scikit-image,michaelpacer/scikit-image,rjeli/scikit-image,bsipocz/scikit-image,ofgulban/scikit-image,bennlich/scikit-image,chintak/scikit-image,Midafi/scikit-image,oew1v07/scikit-image,emon10005/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,paalge/scikit-image,WarrenWeckesser/scikits-image,keflavich/scikit-image,chintak/scikit-image,keflavich/scikit-image,Britefury/scikit-image,emon10005/scikit-image,dpshelio/scikit-image,WarrenWeckesser/scikits-image,jwiggins/scikit-image,SamHames/scikit-image,almarklein/scikit-image,rjeli/scikit-image,blink1073/scikit-image,ofgulban/scikit-image,chriscrosscutler/scikit-image,Midafi/scikit-image,blink1073/scikit-image,almarklein/scikit-image,almarklein/scikit-image,Hiyorimi/scikit-image,SamHames/scikit-image,Hiyorimi/scikit-image,youprofit/scikit-image
|
DOC: Add viewer example for RGB line profile
|
from skimage import data
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.lineprofile import LineProfile
image = data.chelsea()
viewer = ImageViewer(image)
viewer += LineProfile()
viewer.show()
|
<commit_before><commit_msg>DOC: Add viewer example for RGB line profile<commit_after>
|
from skimage import data
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.lineprofile import LineProfile
image = data.chelsea()
viewer = ImageViewer(image)
viewer += LineProfile()
viewer.show()
|
DOC: Add viewer example for RGB line profilefrom skimage import data
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.lineprofile import LineProfile
image = data.chelsea()
viewer = ImageViewer(image)
viewer += LineProfile()
viewer.show()
|
<commit_before><commit_msg>DOC: Add viewer example for RGB line profile<commit_after>from skimage import data
from skimage.viewer import ImageViewer
from skimage.viewer.plugins.lineprofile import LineProfile
image = data.chelsea()
viewer = ImageViewer(image)
viewer += LineProfile()
viewer.show()
|
|
3d86c45c74f71dfcc3aede082ba966f677beb934
|
tests/test_notification_messages.py
|
tests/test_notification_messages.py
|
from . import TheInternetTestCase
from helium.api import Text, click
class NotificationMessagesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/notification_message_rendered"
def test_load_new_message(self):
success = False
while not success:
click("Click here")
failure = Text("Action unsuccesful, please try again").exists()
success = Text("Action successful").exists()
self.assertTrue(failure or success)
|
Add test case for notification messages.
|
Add test case for notification messages.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for notification messages.
|
from . import TheInternetTestCase
from helium.api import Text, click
class NotificationMessagesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/notification_message_rendered"
def test_load_new_message(self):
success = False
while not success:
click("Click here")
failure = Text("Action unsuccesful, please try again").exists()
success = Text("Action successful").exists()
self.assertTrue(failure or success)
|
<commit_before><commit_msg>Add test case for notification messages.<commit_after>
|
from . import TheInternetTestCase
from helium.api import Text, click
class NotificationMessagesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/notification_message_rendered"
def test_load_new_message(self):
success = False
while not success:
click("Click here")
failure = Text("Action unsuccesful, please try again").exists()
success = Text("Action successful").exists()
self.assertTrue(failure or success)
|
Add test case for notification messages.from . import TheInternetTestCase
from helium.api import Text, click
class NotificationMessagesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/notification_message_rendered"
def test_load_new_message(self):
success = False
while not success:
click("Click here")
failure = Text("Action unsuccesful, please try again").exists()
success = Text("Action successful").exists()
self.assertTrue(failure or success)
|
<commit_before><commit_msg>Add test case for notification messages.<commit_after>from . import TheInternetTestCase
from helium.api import Text, click
class NotificationMessagesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/notification_message_rendered"
def test_load_new_message(self):
success = False
while not success:
click("Click here")
failure = Text("Action unsuccesful, please try again").exists()
success = Text("Action successful").exists()
self.assertTrue(failure or success)
|
|
4a1c0ab4e1425b1bfe2ff2843f6ae1ce242997e9
|
st2api/tests/unit/controllers/v1/test_pack_configs.py
|
st2api/tests/unit/controllers/v1/test_pack_configs.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigsControllerTestCase'
]
class PackConfigsControllerTestCase(FunctionalTest):
register_packs = True
register_pack_configs = True
def test_get_all(self):
resp = self.app.get('/v1/configs')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/configs did not return all configs.')
def test_get_one_success(self):
resp = self.app.get('/v1/configs/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertEqual(resp.json['values']['api_key'], '{{user.api_key}}')
self.assertEqual(resp.json['values']['region'], 'us-west-1')
def test_get_one_pack_config_doesnt_exist(self):
# Pack exists, config doesnt
resp = self.app.get('/v1/configs/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Pack doesn't exist
resp = self.app.get('/v1/configs/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
|
Add some initial tests for pack configs API endpoints.
|
Add some initial tests for pack configs API endpoints.
|
Python
|
apache-2.0
|
nzlosh/st2,peak6/st2,Plexxi/st2,pixelrebel/st2,StackStorm/st2,emedvedev/st2,pixelrebel/st2,lakshmi-kannan/st2,lakshmi-kannan/st2,punalpatel/st2,lakshmi-kannan/st2,nzlosh/st2,pixelrebel/st2,StackStorm/st2,nzlosh/st2,emedvedev/st2,Plexxi/st2,emedvedev/st2,tonybaloney/st2,StackStorm/st2,StackStorm/st2,punalpatel/st2,tonybaloney/st2,tonybaloney/st2,punalpatel/st2,peak6/st2,peak6/st2,nzlosh/st2,Plexxi/st2,Plexxi/st2
|
Add some initial tests for pack configs API endpoints.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigsControllerTestCase'
]
class PackConfigsControllerTestCase(FunctionalTest):
register_packs = True
register_pack_configs = True
def test_get_all(self):
resp = self.app.get('/v1/configs')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/configs did not return all configs.')
def test_get_one_success(self):
resp = self.app.get('/v1/configs/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertEqual(resp.json['values']['api_key'], '{{user.api_key}}')
self.assertEqual(resp.json['values']['region'], 'us-west-1')
def test_get_one_pack_config_doesnt_exist(self):
# Pack exists, config doesnt
resp = self.app.get('/v1/configs/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Pack doesn't exist
resp = self.app.get('/v1/configs/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
|
<commit_before><commit_msg>Add some initial tests for pack configs API endpoints.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigsControllerTestCase'
]
class PackConfigsControllerTestCase(FunctionalTest):
register_packs = True
register_pack_configs = True
def test_get_all(self):
resp = self.app.get('/v1/configs')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/configs did not return all configs.')
def test_get_one_success(self):
resp = self.app.get('/v1/configs/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertEqual(resp.json['values']['api_key'], '{{user.api_key}}')
self.assertEqual(resp.json['values']['region'], 'us-west-1')
def test_get_one_pack_config_doesnt_exist(self):
# Pack exists, config doesnt
resp = self.app.get('/v1/configs/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Pack doesn't exist
resp = self.app.get('/v1/configs/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
|
Add some initial tests for pack configs API endpoints.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigsControllerTestCase'
]
class PackConfigsControllerTestCase(FunctionalTest):
register_packs = True
register_pack_configs = True
def test_get_all(self):
resp = self.app.get('/v1/configs')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/configs did not return all configs.')
def test_get_one_success(self):
resp = self.app.get('/v1/configs/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertEqual(resp.json['values']['api_key'], '{{user.api_key}}')
self.assertEqual(resp.json['values']['region'], 'us-west-1')
def test_get_one_pack_config_doesnt_exist(self):
# Pack exists, config doesnt
resp = self.app.get('/v1/configs/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Pack doesn't exist
resp = self.app.get('/v1/configs/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
|
<commit_before><commit_msg>Add some initial tests for pack configs API endpoints.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import FunctionalTest
__all__ = [
'PackConfigsControllerTestCase'
]
class PackConfigsControllerTestCase(FunctionalTest):
register_packs = True
register_pack_configs = True
def test_get_all(self):
resp = self.app.get('/v1/configs')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/configs did not return all configs.')
def test_get_one_success(self):
resp = self.app.get('/v1/configs/dummy_pack_1')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['pack'], 'dummy_pack_1')
self.assertEqual(resp.json['values']['api_key'], '{{user.api_key}}')
self.assertEqual(resp.json['values']['region'], 'us-west-1')
def test_get_one_pack_config_doesnt_exist(self):
# Pack exists, config doesnt
resp = self.app.get('/v1/configs/dummy_pack_2',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Pack doesn't exist
resp = self.app.get('/v1/configs/pack_doesnt_exist',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
|
|
21d9df797f04cfc2aad4e79d5845486414cffb63
|
tests/astroplpython/data/test_PowerFreqMeasurement.py
|
tests/astroplpython/data/test_PowerFreqMeasurement.py
|
'''
Created on Jul 16, 2014
@author: thomas
'''
import unittest
class TestPowerFrequency (unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strToXTArray (self):
import astroplpython.data.PowerFrequencyMeasurement as PF
# test data strarr has combination of integer, floats.
#
strarr = ['(1,1)', '(2,2.)', '(2.1,3.)', '(2.018,4.)']
p_f_list = PF.p_f.dbStrToArray(strarr)
#print (str(p_f_list))
self.assertEqual(4, len(p_f_list), "list has right number of elements")
# Check class, return values. In checking values be sure
# to check that we cast back to float
x = [1., 2.0, 2.1, 2.018]
t = [1.000, 2.0, 3.0, 4.0]
i = 0
while (i < 4):
self.assertIsInstance(p_f_list[i], PF.p_f, "is class of PowerFrequency")
self.assertEquals(x[i], p_f_list[i].power, " power value is correct")
self.assertEquals(t[i], p_f_list[i].frequency, "freq value is correct")
i += 1
self.assertEqual(str(p_f_list[0]), "p_f(p:1.0 f:1.0)", "String rep is correct")
if __name__ == "__main__":
unittest.main()
|
Add unit tests for PowerFreqMeasurement and increase
|
Add unit tests for PowerFreqMeasurement and increase
coverage
|
Python
|
mit
|
brianthomas/astroplpython,brianthomas/astroplpython
|
Add unit tests for PowerFreqMeasurement and increase
coverage
|
'''
Created on Jul 16, 2014
@author: thomas
'''
import unittest
class TestPowerFrequency (unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strToXTArray (self):
import astroplpython.data.PowerFrequencyMeasurement as PF
# test data strarr has combination of integer, floats.
#
strarr = ['(1,1)', '(2,2.)', '(2.1,3.)', '(2.018,4.)']
p_f_list = PF.p_f.dbStrToArray(strarr)
#print (str(p_f_list))
self.assertEqual(4, len(p_f_list), "list has right number of elements")
# Check class, return values. In checking values be sure
# to check that we cast back to float
x = [1., 2.0, 2.1, 2.018]
t = [1.000, 2.0, 3.0, 4.0]
i = 0
while (i < 4):
self.assertIsInstance(p_f_list[i], PF.p_f, "is class of PowerFrequency")
self.assertEquals(x[i], p_f_list[i].power, " power value is correct")
self.assertEquals(t[i], p_f_list[i].frequency, "freq value is correct")
i += 1
self.assertEqual(str(p_f_list[0]), "p_f(p:1.0 f:1.0)", "String rep is correct")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for PowerFreqMeasurement and increase
coverage<commit_after>
|
'''
Created on Jul 16, 2014
@author: thomas
'''
import unittest
class TestPowerFrequency (unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strToXTArray (self):
import astroplpython.data.PowerFrequencyMeasurement as PF
# test data strarr has combination of integer, floats.
#
strarr = ['(1,1)', '(2,2.)', '(2.1,3.)', '(2.018,4.)']
p_f_list = PF.p_f.dbStrToArray(strarr)
#print (str(p_f_list))
self.assertEqual(4, len(p_f_list), "list has right number of elements")
# Check class, return values. In checking values be sure
# to check that we cast back to float
x = [1., 2.0, 2.1, 2.018]
t = [1.000, 2.0, 3.0, 4.0]
i = 0
while (i < 4):
self.assertIsInstance(p_f_list[i], PF.p_f, "is class of PowerFrequency")
self.assertEquals(x[i], p_f_list[i].power, " power value is correct")
self.assertEquals(t[i], p_f_list[i].frequency, "freq value is correct")
i += 1
self.assertEqual(str(p_f_list[0]), "p_f(p:1.0 f:1.0)", "String rep is correct")
if __name__ == "__main__":
unittest.main()
|
Add unit tests for PowerFreqMeasurement and increase
coverage'''
Created on Jul 16, 2014
@author: thomas
'''
import unittest
class TestPowerFrequency (unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strToXTArray (self):
import astroplpython.data.PowerFrequencyMeasurement as PF
# test data strarr has combination of integer, floats.
#
strarr = ['(1,1)', '(2,2.)', '(2.1,3.)', '(2.018,4.)']
p_f_list = PF.p_f.dbStrToArray(strarr)
#print (str(p_f_list))
self.assertEqual(4, len(p_f_list), "list has right number of elements")
# Check class, return values. In checking values be sure
# to check that we cast back to float
x = [1., 2.0, 2.1, 2.018]
t = [1.000, 2.0, 3.0, 4.0]
i = 0
while (i < 4):
self.assertIsInstance(p_f_list[i], PF.p_f, "is class of PowerFrequency")
self.assertEquals(x[i], p_f_list[i].power, " power value is correct")
self.assertEquals(t[i], p_f_list[i].frequency, "freq value is correct")
i += 1
self.assertEqual(str(p_f_list[0]), "p_f(p:1.0 f:1.0)", "String rep is correct")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for PowerFreqMeasurement and increase
coverage<commit_after>'''
Created on Jul 16, 2014
@author: thomas
'''
import unittest
class TestPowerFrequency (unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strToXTArray (self):
import astroplpython.data.PowerFrequencyMeasurement as PF
# test data strarr has combination of integer, floats.
#
strarr = ['(1,1)', '(2,2.)', '(2.1,3.)', '(2.018,4.)']
p_f_list = PF.p_f.dbStrToArray(strarr)
#print (str(p_f_list))
self.assertEqual(4, len(p_f_list), "list has right number of elements")
# Check class, return values. In checking values be sure
# to check that we cast back to float
x = [1., 2.0, 2.1, 2.018]
t = [1.000, 2.0, 3.0, 4.0]
i = 0
while (i < 4):
self.assertIsInstance(p_f_list[i], PF.p_f, "is class of PowerFrequency")
self.assertEquals(x[i], p_f_list[i].power, " power value is correct")
self.assertEquals(t[i], p_f_list[i].frequency, "freq value is correct")
i += 1
self.assertEqual(str(p_f_list[0]), "p_f(p:1.0 f:1.0)", "String rep is correct")
if __name__ == "__main__":
unittest.main()
|
|
9cfff243c95490ea09a9ac1eeec47d7089d40d59
|
packages/syft/src/syft/core/node/common/node_manager/ledger_manager.py
|
packages/syft/src/syft/core/node/common/node_manager/ledger_manager.py
|
# stdlib
from typing import Any
from typing import List
# third party
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
# relative
from ..node_table.ledger import Ledger
# from ..exceptions import SetupNotFoundError
from .database_manager import DatabaseManager
class LedgerManager(DatabaseManager):
schema = Ledger
def __init__(self, database: Engine) -> None:
super().__init__(db=database, schema=LedgerManager.schema)
def __setitem__(self, key, value):
if super().contain(entity_name=key):
super().delete(entity_name=key)
super().register(value)
def __getitem__(self, key):
if super().contain(entity_name=key):
super().first(entity_name=key)
else:
return None
|
Create new node manager: LedgerManager
|
Create new node manager: LedgerManager
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
Create new node manager: LedgerManager
|
# stdlib
from typing import Any
from typing import List
# third party
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
# relative
from ..node_table.ledger import Ledger
# from ..exceptions import SetupNotFoundError
from .database_manager import DatabaseManager
class LedgerManager(DatabaseManager):
schema = Ledger
def __init__(self, database: Engine) -> None:
super().__init__(db=database, schema=LedgerManager.schema)
def __setitem__(self, key, value):
if super().contain(entity_name=key):
super().delete(entity_name=key)
super().register(value)
def __getitem__(self, key):
if super().contain(entity_name=key):
super().first(entity_name=key)
else:
return None
|
<commit_before><commit_msg>Create new node manager: LedgerManager<commit_after>
|
# stdlib
from typing import Any
from typing import List
# third party
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
# relative
from ..node_table.ledger import Ledger
# from ..exceptions import SetupNotFoundError
from .database_manager import DatabaseManager
class LedgerManager(DatabaseManager):
schema = Ledger
def __init__(self, database: Engine) -> None:
super().__init__(db=database, schema=LedgerManager.schema)
def __setitem__(self, key, value):
if super().contain(entity_name=key):
super().delete(entity_name=key)
super().register(value)
def __getitem__(self, key):
if super().contain(entity_name=key):
super().first(entity_name=key)
else:
return None
|
Create new node manager: LedgerManager# stdlib
from typing import Any
from typing import List
# third party
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
# relative
from ..node_table.ledger import Ledger
# from ..exceptions import SetupNotFoundError
from .database_manager import DatabaseManager
class LedgerManager(DatabaseManager):
schema = Ledger
def __init__(self, database: Engine) -> None:
super().__init__(db=database, schema=LedgerManager.schema)
def __setitem__(self, key, value):
if super().contain(entity_name=key):
super().delete(entity_name=key)
super().register(value)
def __getitem__(self, key):
if super().contain(entity_name=key):
super().first(entity_name=key)
else:
return None
|
<commit_before><commit_msg>Create new node manager: LedgerManager<commit_after># stdlib
from typing import Any
from typing import List
# third party
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
# relative
from ..node_table.ledger import Ledger
# from ..exceptions import SetupNotFoundError
from .database_manager import DatabaseManager
class LedgerManager(DatabaseManager):
schema = Ledger
def __init__(self, database: Engine) -> None:
super().__init__(db=database, schema=LedgerManager.schema)
def __setitem__(self, key, value):
if super().contain(entity_name=key):
super().delete(entity_name=key)
super().register(value)
def __getitem__(self, key):
if super().contain(entity_name=key):
super().first(entity_name=key)
else:
return None
|
|
da287a8dd661405b97abefbd209b98dea89388cb
|
temba/flows/migrations/0024_advance_stuck_runs.py
|
temba/flows/migrations/0024_advance_stuck_runs.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def advance_stuck_runs(apps, schema_editor):
# this data migration is not forward-compatible
from temba.flows.models import Flow, FlowStep, FlowRun, RuleSet
from temba.msgs.models import Msg
flows = Flow.objects.filter(flow_type='F')
for flow in flows:
# looking for flows that start with a passive ruleset
ruleset = RuleSet.objects.filter(uuid=flow.entry_uuid, flow=flow).first()
if ruleset and not ruleset.is_pause():
# now see if there are any active steps at our current flow
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
flow.ensure_current_version()
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
print '\nAdvancing %d steps for %s:%s' % (len(steps), flow.org.name, flow.name)
for idx, step in enumerate(steps):
if (idx+1) % 100 == 0:
print '\n\n *** Step %d of %d\n\n' % (idx+1, len(steps))
# force them to be handled again
msg = Msg(contact=step.contact, text='', id=0)
Flow.handle_destination(ruleset, step, step.run, msg)
class Migration(migrations.Migration):
dependencies = [
('flows', '0023_new_split_dialog'),
]
operations = [
migrations.RunPython(advance_stuck_runs)
]
|
Add migration for stuck runs
|
Add migration for stuck runs
|
Python
|
agpl-3.0
|
tsotetsi/textily-web,ewheeler/rapidpro,tsotetsi/textily-web,reyrodrigues/EU-SMS,praekelt/rapidpro,pulilab/rapidpro,pulilab/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,praekelt/rapidpro,ewheeler/rapidpro,pulilab/rapidpro,pulilab/rapidpro,reyrodrigues/EU-SMS,tsotetsi/textily-web,reyrodrigues/EU-SMS,praekelt/rapidpro,pulilab/rapidpro,praekelt/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web
|
Add migration for stuck runs
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def advance_stuck_runs(apps, schema_editor):
# this data migration is not forward-compatible
from temba.flows.models import Flow, FlowStep, FlowRun, RuleSet
from temba.msgs.models import Msg
flows = Flow.objects.filter(flow_type='F')
for flow in flows:
# looking for flows that start with a passive ruleset
ruleset = RuleSet.objects.filter(uuid=flow.entry_uuid, flow=flow).first()
if ruleset and not ruleset.is_pause():
# now see if there are any active steps at our current flow
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
flow.ensure_current_version()
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
print '\nAdvancing %d steps for %s:%s' % (len(steps), flow.org.name, flow.name)
for idx, step in enumerate(steps):
if (idx+1) % 100 == 0:
print '\n\n *** Step %d of %d\n\n' % (idx+1, len(steps))
# force them to be handled again
msg = Msg(contact=step.contact, text='', id=0)
Flow.handle_destination(ruleset, step, step.run, msg)
class Migration(migrations.Migration):
dependencies = [
('flows', '0023_new_split_dialog'),
]
operations = [
migrations.RunPython(advance_stuck_runs)
]
|
<commit_before><commit_msg>Add migration for stuck runs<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def advance_stuck_runs(apps, schema_editor):
# this data migration is not forward-compatible
from temba.flows.models import Flow, FlowStep, FlowRun, RuleSet
from temba.msgs.models import Msg
flows = Flow.objects.filter(flow_type='F')
for flow in flows:
# looking for flows that start with a passive ruleset
ruleset = RuleSet.objects.filter(uuid=flow.entry_uuid, flow=flow).first()
if ruleset and not ruleset.is_pause():
# now see if there are any active steps at our current flow
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
flow.ensure_current_version()
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
print '\nAdvancing %d steps for %s:%s' % (len(steps), flow.org.name, flow.name)
for idx, step in enumerate(steps):
if (idx+1) % 100 == 0:
print '\n\n *** Step %d of %d\n\n' % (idx+1, len(steps))
# force them to be handled again
msg = Msg(contact=step.contact, text='', id=0)
Flow.handle_destination(ruleset, step, step.run, msg)
class Migration(migrations.Migration):
dependencies = [
('flows', '0023_new_split_dialog'),
]
operations = [
migrations.RunPython(advance_stuck_runs)
]
|
Add migration for stuck runs# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def advance_stuck_runs(apps, schema_editor):
# this data migration is not forward-compatible
from temba.flows.models import Flow, FlowStep, FlowRun, RuleSet
from temba.msgs.models import Msg
flows = Flow.objects.filter(flow_type='F')
for flow in flows:
# looking for flows that start with a passive ruleset
ruleset = RuleSet.objects.filter(uuid=flow.entry_uuid, flow=flow).first()
if ruleset and not ruleset.is_pause():
# now see if there are any active steps at our current flow
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
flow.ensure_current_version()
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
print '\nAdvancing %d steps for %s:%s' % (len(steps), flow.org.name, flow.name)
for idx, step in enumerate(steps):
if (idx+1) % 100 == 0:
print '\n\n *** Step %d of %d\n\n' % (idx+1, len(steps))
# force them to be handled again
msg = Msg(contact=step.contact, text='', id=0)
Flow.handle_destination(ruleset, step, step.run, msg)
class Migration(migrations.Migration):
dependencies = [
('flows', '0023_new_split_dialog'),
]
operations = [
migrations.RunPython(advance_stuck_runs)
]
|
<commit_before><commit_msg>Add migration for stuck runs<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def advance_stuck_runs(apps, schema_editor):
# this data migration is not forward-compatible
from temba.flows.models import Flow, FlowStep, FlowRun, RuleSet
from temba.msgs.models import Msg
flows = Flow.objects.filter(flow_type='F')
for flow in flows:
# looking for flows that start with a passive ruleset
ruleset = RuleSet.objects.filter(uuid=flow.entry_uuid, flow=flow).first()
if ruleset and not ruleset.is_pause():
# now see if there are any active steps at our current flow
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
flow.ensure_current_version()
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=ruleset.uuid, rule_value=None, left_on=None).select_related('contact')
if steps:
print '\nAdvancing %d steps for %s:%s' % (len(steps), flow.org.name, flow.name)
for idx, step in enumerate(steps):
if (idx+1) % 100 == 0:
print '\n\n *** Step %d of %d\n\n' % (idx+1, len(steps))
# force them to be handled again
msg = Msg(contact=step.contact, text='', id=0)
Flow.handle_destination(ruleset, step, step.run, msg)
class Migration(migrations.Migration):
dependencies = [
('flows', '0023_new_split_dialog'),
]
operations = [
migrations.RunPython(advance_stuck_runs)
]
|
|
fe5a5e4a601a332a2067b8cfe080c6e5cea25dae
|
quilt/cli/meta.py
|
quilt/cli/meta.py
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
import sys
from optparse import OptionParser
command_map = dict()
def register_command(name, command_class):
command_map[name] = command_class
def find_command(name):
return command_map.get(name, None)
def list_commands():
return sorted(command_map.items())
class CommandMetaClass(type):
def __new__(meta, name, bases, dict):
cls = type.__new__(meta, name, bases, dict)
if cls.name is not None:
register_command(cls.name, cls)
return cls
class Command(object):
__metaclass__ = CommandMetaClass
min_args = 0
usage = ""
patches_dir = "patches"
pc_dir = ".pc"
name = None
def parse(self, args):
parser = OptionParser(usage=usage)
self.add_args(parser)
(options, pargs) = parser.parse_args(args)
if len(args) != self.min_args:
parser.print_usage()
sys.exit(1)
self.run(options, pargs)
def run(self, options, args):
pass
def add_args(self, parser):
pass
def get_patches_dir(self):
patches_dir = os.environ.get("QUILT_PATCHES")
if not patches_dir:
patches_dir = self.patches_dir
return patches_dir
def get_pc_dir(self):
pc_dir = os.environ.get("QUILT_PC")
if not pc_dir:
pc_dir = self.pc_dir
return pc_dir
|
Introduce new quilt cli Command class
|
Introduce new quilt cli Command class
All cli commands should derive from this new class to simplyfy the
registration of new commands. The registration is done in the Command
class via a Meta class automatically.
|
Python
|
mit
|
bjoernricks/python-quilt,vadmium/python-quilt
|
Introduce new quilt cli Command class
All cli commands should derive from this new class to simplyfy the
registration of new commands. The registration is done in the Command
class via a Meta class automatically.
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
import sys
from optparse import OptionParser
command_map = dict()
def register_command(name, command_class):
command_map[name] = command_class
def find_command(name):
return command_map.get(name, None)
def list_commands():
return sorted(command_map.items())
class CommandMetaClass(type):
def __new__(meta, name, bases, dict):
cls = type.__new__(meta, name, bases, dict)
if cls.name is not None:
register_command(cls.name, cls)
return cls
class Command(object):
__metaclass__ = CommandMetaClass
min_args = 0
usage = ""
patches_dir = "patches"
pc_dir = ".pc"
name = None
def parse(self, args):
parser = OptionParser(usage=usage)
self.add_args(parser)
(options, pargs) = parser.parse_args(args)
if len(args) != self.min_args:
parser.print_usage()
sys.exit(1)
self.run(options, pargs)
def run(self, options, args):
pass
def add_args(self, parser):
pass
def get_patches_dir(self):
patches_dir = os.environ.get("QUILT_PATCHES")
if not patches_dir:
patches_dir = self.patches_dir
return patches_dir
def get_pc_dir(self):
pc_dir = os.environ.get("QUILT_PC")
if not pc_dir:
pc_dir = self.pc_dir
return pc_dir
|
<commit_before><commit_msg>Introduce new quilt cli Command class
All cli commands should derive from this new class to simplyfy the
registration of new commands. The registration is done in the Command
class via a Meta class automatically.<commit_after>
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
import sys
from optparse import OptionParser
command_map = dict()
def register_command(name, command_class):
command_map[name] = command_class
def find_command(name):
return command_map.get(name, None)
def list_commands():
return sorted(command_map.items())
class CommandMetaClass(type):
def __new__(meta, name, bases, dict):
cls = type.__new__(meta, name, bases, dict)
if cls.name is not None:
register_command(cls.name, cls)
return cls
class Command(object):
__metaclass__ = CommandMetaClass
min_args = 0
usage = ""
patches_dir = "patches"
pc_dir = ".pc"
name = None
def parse(self, args):
parser = OptionParser(usage=usage)
self.add_args(parser)
(options, pargs) = parser.parse_args(args)
if len(args) != self.min_args:
parser.print_usage()
sys.exit(1)
self.run(options, pargs)
def run(self, options, args):
pass
def add_args(self, parser):
pass
def get_patches_dir(self):
patches_dir = os.environ.get("QUILT_PATCHES")
if not patches_dir:
patches_dir = self.patches_dir
return patches_dir
def get_pc_dir(self):
pc_dir = os.environ.get("QUILT_PC")
if not pc_dir:
pc_dir = self.pc_dir
return pc_dir
|
Introduce new quilt cli Command class
All cli commands should derive from this new class to simplyfy the
registration of new commands. The registration is done in the Command
class via a Meta class automatically.# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
import sys
from optparse import OptionParser
command_map = dict()
def register_command(name, command_class):
command_map[name] = command_class
def find_command(name):
return command_map.get(name, None)
def list_commands():
return sorted(command_map.items())
class CommandMetaClass(type):
def __new__(meta, name, bases, dict):
cls = type.__new__(meta, name, bases, dict)
if cls.name is not None:
register_command(cls.name, cls)
return cls
class Command(object):
__metaclass__ = CommandMetaClass
min_args = 0
usage = ""
patches_dir = "patches"
pc_dir = ".pc"
name = None
def parse(self, args):
parser = OptionParser(usage=usage)
self.add_args(parser)
(options, pargs) = parser.parse_args(args)
if len(args) != self.min_args:
parser.print_usage()
sys.exit(1)
self.run(options, pargs)
def run(self, options, args):
pass
def add_args(self, parser):
pass
def get_patches_dir(self):
patches_dir = os.environ.get("QUILT_PATCHES")
if not patches_dir:
patches_dir = self.patches_dir
return patches_dir
def get_pc_dir(self):
pc_dir = os.environ.get("QUILT_PC")
if not pc_dir:
pc_dir = self.pc_dir
return pc_dir
|
<commit_before><commit_msg>Introduce new quilt cli Command class
All cli commands should derive from this new class to simplyfy the
registration of new commands. The registration is done in the Command
class via a Meta class automatically.<commit_after># vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
import sys
from optparse import OptionParser
command_map = dict()
def register_command(name, command_class):
command_map[name] = command_class
def find_command(name):
return command_map.get(name, None)
def list_commands():
return sorted(command_map.items())
class CommandMetaClass(type):
def __new__(meta, name, bases, dict):
cls = type.__new__(meta, name, bases, dict)
if cls.name is not None:
register_command(cls.name, cls)
return cls
class Command(object):
__metaclass__ = CommandMetaClass
min_args = 0
usage = ""
patches_dir = "patches"
pc_dir = ".pc"
name = None
def parse(self, args):
parser = OptionParser(usage=usage)
self.add_args(parser)
(options, pargs) = parser.parse_args(args)
if len(args) != self.min_args:
parser.print_usage()
sys.exit(1)
self.run(options, pargs)
def run(self, options, args):
pass
def add_args(self, parser):
pass
def get_patches_dir(self):
patches_dir = os.environ.get("QUILT_PATCHES")
if not patches_dir:
patches_dir = self.patches_dir
return patches_dir
def get_pc_dir(self):
pc_dir = os.environ.get("QUILT_PC")
if not pc_dir:
pc_dir = self.pc_dir
return pc_dir
|
|
7bda769dc62c7621a7606c5de060852b33cd7595
|
bookmarks/core/migrations/0004_auto_20160901_2322.py
|
bookmarks/core/migrations/0004_auto_20160901_2322.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160901_2303'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='url',
field=models.URLField(max_length=500),
),
]
|
Increase max length of url field.
|
Increase max length of url field.
|
Python
|
mit
|
tom-henderson/bookmarks,tom-henderson/bookmarks,tom-henderson/bookmarks
|
Increase max length of url field.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160901_2303'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='url',
field=models.URLField(max_length=500),
),
]
|
<commit_before><commit_msg>Increase max length of url field.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160901_2303'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='url',
field=models.URLField(max_length=500),
),
]
|
Increase max length of url field.# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160901_2303'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='url',
field=models.URLField(max_length=500),
),
]
|
<commit_before><commit_msg>Increase max length of url field.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160901_2303'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='url',
field=models.URLField(max_length=500),
),
]
|
|
c120dccb5ca78b7601a1606fef1b25f1b18b3f8c
|
xgds_core/sse-test/test-data-gen.py
|
xgds_core/sse-test/test-data-gen.py
|
#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
Add test data generator script
|
Add test data generator script
|
Python
|
apache-2.0
|
xgds/xgds_core,xgds/xgds_core,xgds/xgds_core
|
Add test data generator script
|
#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
<commit_before><commit_msg>Add test data generator script<commit_after>
|
#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
Add test data generator script#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
<commit_before><commit_msg>Add test data generator script<commit_after>#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
|
5dee79a170c02c2d2b17899538d5d41c3cf0ef49
|
tests/test_mailparsers_bug_submitted.py
|
tests/test_mailparsers_bug_submitted.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import BugSubmittedParser as p
class TestMailParserBugSubmitted(unittest.TestCase):
def setUp(self):
self.headers = {
'Subject': 'Bug#123456: Bug title',
'From': 'Submitter Name <name@host.tld>',
}
self.body = [
"Package: package-name",
"Version: version-here",
"",
"Description"
]
def testSimple(self):
msg = p.parse(self.headers, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'package-name')
self.assertEqual(msg.version, 'version-here')
self.assertEqual(msg.by, 'Submitter Name <name@host.tld>')
def testVersionWithSpaces(self):
self.body[1] = "Version: version with spaces"
msg = p.parse(self.headers, self.body)
self.failIf(msg.version)
if __name__ == "__main__":
unittest.main()
|
Add some simple tests for bug_submitted
|
Add some simple tests for bug_submitted
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
Python
|
agpl-3.0
|
lamby/debian-devel-changes-bot,sebastinas/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot
|
Add some simple tests for bug_submitted
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import BugSubmittedParser as p
class TestMailParserBugSubmitted(unittest.TestCase):
def setUp(self):
self.headers = {
'Subject': 'Bug#123456: Bug title',
'From': 'Submitter Name <name@host.tld>',
}
self.body = [
"Package: package-name",
"Version: version-here",
"",
"Description"
]
def testSimple(self):
msg = p.parse(self.headers, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'package-name')
self.assertEqual(msg.version, 'version-here')
self.assertEqual(msg.by, 'Submitter Name <name@host.tld>')
def testVersionWithSpaces(self):
self.body[1] = "Version: version with spaces"
msg = p.parse(self.headers, self.body)
self.failIf(msg.version)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some simple tests for bug_submitted
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import BugSubmittedParser as p
class TestMailParserBugSubmitted(unittest.TestCase):
def setUp(self):
self.headers = {
'Subject': 'Bug#123456: Bug title',
'From': 'Submitter Name <name@host.tld>',
}
self.body = [
"Package: package-name",
"Version: version-here",
"",
"Description"
]
def testSimple(self):
msg = p.parse(self.headers, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'package-name')
self.assertEqual(msg.version, 'version-here')
self.assertEqual(msg.by, 'Submitter Name <name@host.tld>')
def testVersionWithSpaces(self):
self.body[1] = "Version: version with spaces"
msg = p.parse(self.headers, self.body)
self.failIf(msg.version)
if __name__ == "__main__":
unittest.main()
|
Add some simple tests for bug_submitted
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import BugSubmittedParser as p
class TestMailParserBugSubmitted(unittest.TestCase):
def setUp(self):
self.headers = {
'Subject': 'Bug#123456: Bug title',
'From': 'Submitter Name <name@host.tld>',
}
self.body = [
"Package: package-name",
"Version: version-here",
"",
"Description"
]
def testSimple(self):
msg = p.parse(self.headers, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'package-name')
self.assertEqual(msg.version, 'version-here')
self.assertEqual(msg.by, 'Submitter Name <name@host.tld>')
def testVersionWithSpaces(self):
self.body[1] = "Version: version with spaces"
msg = p.parse(self.headers, self.body)
self.failIf(msg.version)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some simple tests for bug_submitted
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import BugSubmittedParser as p
class TestMailParserBugSubmitted(unittest.TestCase):
def setUp(self):
self.headers = {
'Subject': 'Bug#123456: Bug title',
'From': 'Submitter Name <name@host.tld>',
}
self.body = [
"Package: package-name",
"Version: version-here",
"",
"Description"
]
def testSimple(self):
msg = p.parse(self.headers, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'package-name')
self.assertEqual(msg.version, 'version-here')
self.assertEqual(msg.by, 'Submitter Name <name@host.tld>')
def testVersionWithSpaces(self):
self.body[1] = "Version: version with spaces"
msg = p.parse(self.headers, self.body)
self.failIf(msg.version)
if __name__ == "__main__":
unittest.main()
|
|
6de47a88a1c8a1a17e92958683bf7d6240b4dc0f
|
platforms/m3/programming/snsv7_par_csv_callback.py
|
platforms/m3/programming/snsv7_par_csv_callback.py
|
import csv
from datetime import datetime
import sys
logfile = open('mbus_snoop_log.txt','w')
wr = csv.writer(open('snsv7_snoop.txt','w'), delimiter=',', lineterminator='\n')
wr.writerow(['DATE','TIME','C_MEAS','C_REF','C_REV','C_PAR'])
count = 0
cdc_cmeas = 0
cdc_crev = 0
cdc_cpar = 0
cdc_cref = 0
cdc_date = 0
cdc_time = 0
cdc_group = False
def callback(time, address, data, cb0=-1, cb1=-1):
global count
global cdc_cmeas, cdc_crev, cdc_cpar, cdc_cref, cdc_date, cdc_time, cdc_group
# m3_ice snoop prints this for you now [though needs to add count - can do]
#print("@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
print >> logfile, "@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")"
if (str(int(address.encode('hex'),16))=="116"):
cdc_group = True
cdc_cmeas = int(data.encode('hex'),16)
cdc_time = datetime.now().strftime("%H:%M:%S.%f")[:-3]
cdc_date = datetime.now().strftime("%Y-%m-%d")
elif (str(int(address.encode('hex'),16))=="118"):
if cdc_group:
cdc_cref = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="119"):
if cdc_group:
cdc_crev = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="121"):
if cdc_group:
cdc_cpar = int(data.encode('hex'),16)
wr.writerow([cdc_date,cdc_time,cdc_cmeas,cdc_cref,cdc_crev,cdc_cpar])
count += 1
cdc_group = False
#if count>args.killcount:
# sys.exit()
# TODO(Pat): I need to add support for custom arguments, which I will do
if count > 1000:
sys.exit()
|
Add example for m3_ice + callback
|
Add example for m3_ice + callback
Usage:
$ m3_ice snoop -c snsv7_par_csv_callback.py
|
Python
|
apache-2.0
|
lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator
|
Add example for m3_ice + callback
Usage:
$ m3_ice snoop -c snsv7_par_csv_callback.py
|
import csv
from datetime import datetime
import sys
logfile = open('mbus_snoop_log.txt','w')
wr = csv.writer(open('snsv7_snoop.txt','w'), delimiter=',', lineterminator='\n')
wr.writerow(['DATE','TIME','C_MEAS','C_REF','C_REV','C_PAR'])
count = 0
cdc_cmeas = 0
cdc_crev = 0
cdc_cpar = 0
cdc_cref = 0
cdc_date = 0
cdc_time = 0
cdc_group = False
def callback(time, address, data, cb0=-1, cb1=-1):
global count
global cdc_cmeas, cdc_crev, cdc_cpar, cdc_cref, cdc_date, cdc_time, cdc_group
# m3_ice snoop prints this for you now [though needs to add count - can do]
#print("@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
print >> logfile, "@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")"
if (str(int(address.encode('hex'),16))=="116"):
cdc_group = True
cdc_cmeas = int(data.encode('hex'),16)
cdc_time = datetime.now().strftime("%H:%M:%S.%f")[:-3]
cdc_date = datetime.now().strftime("%Y-%m-%d")
elif (str(int(address.encode('hex'),16))=="118"):
if cdc_group:
cdc_cref = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="119"):
if cdc_group:
cdc_crev = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="121"):
if cdc_group:
cdc_cpar = int(data.encode('hex'),16)
wr.writerow([cdc_date,cdc_time,cdc_cmeas,cdc_cref,cdc_crev,cdc_cpar])
count += 1
cdc_group = False
#if count>args.killcount:
# sys.exit()
# TODO(Pat): I need to add support for custom arguments, which I will do
if count > 1000:
sys.exit()
|
<commit_before><commit_msg>Add example for m3_ice + callback
Usage:
$ m3_ice snoop -c snsv7_par_csv_callback.py<commit_after>
|
import csv
from datetime import datetime
import sys
logfile = open('mbus_snoop_log.txt','w')
wr = csv.writer(open('snsv7_snoop.txt','w'), delimiter=',', lineterminator='\n')
wr.writerow(['DATE','TIME','C_MEAS','C_REF','C_REV','C_PAR'])
count = 0
cdc_cmeas = 0
cdc_crev = 0
cdc_cpar = 0
cdc_cref = 0
cdc_date = 0
cdc_time = 0
cdc_group = False
def callback(time, address, data, cb0=-1, cb1=-1):
global count
global cdc_cmeas, cdc_crev, cdc_cpar, cdc_cref, cdc_date, cdc_time, cdc_group
# m3_ice snoop prints this for you now [though needs to add count - can do]
#print("@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
print >> logfile, "@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")"
if (str(int(address.encode('hex'),16))=="116"):
cdc_group = True
cdc_cmeas = int(data.encode('hex'),16)
cdc_time = datetime.now().strftime("%H:%M:%S.%f")[:-3]
cdc_date = datetime.now().strftime("%Y-%m-%d")
elif (str(int(address.encode('hex'),16))=="118"):
if cdc_group:
cdc_cref = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="119"):
if cdc_group:
cdc_crev = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="121"):
if cdc_group:
cdc_cpar = int(data.encode('hex'),16)
wr.writerow([cdc_date,cdc_time,cdc_cmeas,cdc_cref,cdc_crev,cdc_cpar])
count += 1
cdc_group = False
#if count>args.killcount:
# sys.exit()
# TODO(Pat): I need to add support for custom arguments, which I will do
if count > 1000:
sys.exit()
|
Add example for m3_ice + callback
Usage:
$ m3_ice snoop -c snsv7_par_csv_callback.pyimport csv
from datetime import datetime
import sys
logfile = open('mbus_snoop_log.txt','w')
wr = csv.writer(open('snsv7_snoop.txt','w'), delimiter=',', lineterminator='\n')
wr.writerow(['DATE','TIME','C_MEAS','C_REF','C_REV','C_PAR'])
count = 0
cdc_cmeas = 0
cdc_crev = 0
cdc_cpar = 0
cdc_cref = 0
cdc_date = 0
cdc_time = 0
cdc_group = False
def callback(time, address, data, cb0=-1, cb1=-1):
global count
global cdc_cmeas, cdc_crev, cdc_cpar, cdc_cref, cdc_date, cdc_time, cdc_group
# m3_ice snoop prints this for you now [though needs to add count - can do]
#print("@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
print >> logfile, "@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")"
if (str(int(address.encode('hex'),16))=="116"):
cdc_group = True
cdc_cmeas = int(data.encode('hex'),16)
cdc_time = datetime.now().strftime("%H:%M:%S.%f")[:-3]
cdc_date = datetime.now().strftime("%Y-%m-%d")
elif (str(int(address.encode('hex'),16))=="118"):
if cdc_group:
cdc_cref = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="119"):
if cdc_group:
cdc_crev = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="121"):
if cdc_group:
cdc_cpar = int(data.encode('hex'),16)
wr.writerow([cdc_date,cdc_time,cdc_cmeas,cdc_cref,cdc_crev,cdc_cpar])
count += 1
cdc_group = False
#if count>args.killcount:
# sys.exit()
# TODO(Pat): I need to add support for custom arguments, which I will do
if count > 1000:
sys.exit()
|
<commit_before><commit_msg>Add example for m3_ice + callback
Usage:
$ m3_ice snoop -c snsv7_par_csv_callback.py<commit_after>import csv
from datetime import datetime
import sys
logfile = open('mbus_snoop_log.txt','w')
wr = csv.writer(open('snsv7_snoop.txt','w'), delimiter=',', lineterminator='\n')
wr.writerow(['DATE','TIME','C_MEAS','C_REF','C_REV','C_PAR'])
count = 0
cdc_cmeas = 0
cdc_crev = 0
cdc_cpar = 0
cdc_cref = 0
cdc_date = 0
cdc_time = 0
cdc_group = False
def callback(time, address, data, cb0=-1, cb1=-1):
global count
global cdc_cmeas, cdc_crev, cdc_cpar, cdc_cref, cdc_date, cdc_time, cdc_group
# m3_ice snoop prints this for you now [though needs to add count - can do]
#print("@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
print >> logfile, "@" + str(count) + " Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")"
if (str(int(address.encode('hex'),16))=="116"):
cdc_group = True
cdc_cmeas = int(data.encode('hex'),16)
cdc_time = datetime.now().strftime("%H:%M:%S.%f")[:-3]
cdc_date = datetime.now().strftime("%Y-%m-%d")
elif (str(int(address.encode('hex'),16))=="118"):
if cdc_group:
cdc_cref = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="119"):
if cdc_group:
cdc_crev = int(data.encode('hex'),16)
elif (str(int(address.encode('hex'),16))=="121"):
if cdc_group:
cdc_cpar = int(data.encode('hex'),16)
wr.writerow([cdc_date,cdc_time,cdc_cmeas,cdc_cref,cdc_crev,cdc_cpar])
count += 1
cdc_group = False
#if count>args.killcount:
# sys.exit()
# TODO(Pat): I need to add support for custom arguments, which I will do
if count > 1000:
sys.exit()
|
|
377526392b3f4a01c3f307c3d30af9f3368ebc47
|
lintcode/Hard/087_Remove_Node_in_Binary_Search_Tree.py
|
lintcode/Hard/087_Remove_Node_in_Binary_Search_Tree.py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param value: Remove the node with given value.
@return: The root of the binary search tree after removal.
"""
def removeNode(self, root, value):
# write your code here
# Solution 1
# def inorder(bst):
# if (bst is None):
# return []
# return inorder(bst.left) + [bst.val] + inorder(bst.right)
# def buildTree(li):
# if (not li):
# return None
# mid = len(li) / 2
# r = TreeNode(li[mid])
# r.left = buildTree(li[:mid])
# r.right = buildTree(li[mid+1:])
# return r
# arr = inorder(root)
# if value in arr:
# i = arr.index(value)
# arr = arr[:i] + arr[i+1:]
# return buildTree(arr)
# Solution 2
def treeMini(bst):
mini = bst.val
if (bst.left):
mini = treeMini(bst.left)
return mini
parent = None
target = root
dir = None
while (target):
if (target.val > value):
parent = target
dir = 'left'
target = target.left
elif (target.val < value):
parent = target
dir = 'right'
target = target.right
else:
if (dir is None):
if (target.left is None and target.right is None):
return None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
return target.right
else:
return target.left
else:
if (target.left is None and target.right is None):
if (dir == 'left'):
parent.left = None
else:
parent.right = None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
if (dir == 'left'):
parent.left = target.right
else:
parent.right = target.right
else:
if (dir == 'left'):
parent.left = target.left
else:
parent.right = target.left
break
return root
|
Add solution to lintcode question 87
|
Add solution to lintcode question 87
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 87
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param value: Remove the node with given value.
@return: The root of the binary search tree after removal.
"""
def removeNode(self, root, value):
# write your code here
# Solution 1
# def inorder(bst):
# if (bst is None):
# return []
# return inorder(bst.left) + [bst.val] + inorder(bst.right)
# def buildTree(li):
# if (not li):
# return None
# mid = len(li) / 2
# r = TreeNode(li[mid])
# r.left = buildTree(li[:mid])
# r.right = buildTree(li[mid+1:])
# return r
# arr = inorder(root)
# if value in arr:
# i = arr.index(value)
# arr = arr[:i] + arr[i+1:]
# return buildTree(arr)
# Solution 2
def treeMini(bst):
mini = bst.val
if (bst.left):
mini = treeMini(bst.left)
return mini
parent = None
target = root
dir = None
while (target):
if (target.val > value):
parent = target
dir = 'left'
target = target.left
elif (target.val < value):
parent = target
dir = 'right'
target = target.right
else:
if (dir is None):
if (target.left is None and target.right is None):
return None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
return target.right
else:
return target.left
else:
if (target.left is None and target.right is None):
if (dir == 'left'):
parent.left = None
else:
parent.right = None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
if (dir == 'left'):
parent.left = target.right
else:
parent.right = target.right
else:
if (dir == 'left'):
parent.left = target.left
else:
parent.right = target.left
break
return root
|
<commit_before><commit_msg>Add solution to lintcode question 87<commit_after>
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param value: Remove the node with given value.
@return: The root of the binary search tree after removal.
"""
def removeNode(self, root, value):
# write your code here
# Solution 1
# def inorder(bst):
# if (bst is None):
# return []
# return inorder(bst.left) + [bst.val] + inorder(bst.right)
# def buildTree(li):
# if (not li):
# return None
# mid = len(li) / 2
# r = TreeNode(li[mid])
# r.left = buildTree(li[:mid])
# r.right = buildTree(li[mid+1:])
# return r
# arr = inorder(root)
# if value in arr:
# i = arr.index(value)
# arr = arr[:i] + arr[i+1:]
# return buildTree(arr)
# Solution 2
def treeMini(bst):
mini = bst.val
if (bst.left):
mini = treeMini(bst.left)
return mini
parent = None
target = root
dir = None
while (target):
if (target.val > value):
parent = target
dir = 'left'
target = target.left
elif (target.val < value):
parent = target
dir = 'right'
target = target.right
else:
if (dir is None):
if (target.left is None and target.right is None):
return None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
return target.right
else:
return target.left
else:
if (target.left is None and target.right is None):
if (dir == 'left'):
parent.left = None
else:
parent.right = None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
if (dir == 'left'):
parent.left = target.right
else:
parent.right = target.right
else:
if (dir == 'left'):
parent.left = target.left
else:
parent.right = target.left
break
return root
|
Add solution to lintcode question 87"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param value: Remove the node with given value.
@return: The root of the binary search tree after removal.
"""
def removeNode(self, root, value):
# write your code here
# Solution 1
# def inorder(bst):
# if (bst is None):
# return []
# return inorder(bst.left) + [bst.val] + inorder(bst.right)
# def buildTree(li):
# if (not li):
# return None
# mid = len(li) / 2
# r = TreeNode(li[mid])
# r.left = buildTree(li[:mid])
# r.right = buildTree(li[mid+1:])
# return r
# arr = inorder(root)
# if value in arr:
# i = arr.index(value)
# arr = arr[:i] + arr[i+1:]
# return buildTree(arr)
# Solution 2
def treeMini(bst):
mini = bst.val
if (bst.left):
mini = treeMini(bst.left)
return mini
parent = None
target = root
dir = None
while (target):
if (target.val > value):
parent = target
dir = 'left'
target = target.left
elif (target.val < value):
parent = target
dir = 'right'
target = target.right
else:
if (dir is None):
if (target.left is None and target.right is None):
return None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
return target.right
else:
return target.left
else:
if (target.left is None and target.right is None):
if (dir == 'left'):
parent.left = None
else:
parent.right = None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
if (dir == 'left'):
parent.left = target.right
else:
parent.right = target.right
else:
if (dir == 'left'):
parent.left = target.left
else:
parent.right = target.left
break
return root
|
<commit_before><commit_msg>Add solution to lintcode question 87<commit_after>"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param value: Remove the node with given value.
@return: The root of the binary search tree after removal.
"""
def removeNode(self, root, value):
# write your code here
# Solution 1
# def inorder(bst):
# if (bst is None):
# return []
# return inorder(bst.left) + [bst.val] + inorder(bst.right)
# def buildTree(li):
# if (not li):
# return None
# mid = len(li) / 2
# r = TreeNode(li[mid])
# r.left = buildTree(li[:mid])
# r.right = buildTree(li[mid+1:])
# return r
# arr = inorder(root)
# if value in arr:
# i = arr.index(value)
# arr = arr[:i] + arr[i+1:]
# return buildTree(arr)
# Solution 2
def treeMini(bst):
mini = bst.val
if (bst.left):
mini = treeMini(bst.left)
return mini
parent = None
target = root
dir = None
while (target):
if (target.val > value):
parent = target
dir = 'left'
target = target.left
elif (target.val < value):
parent = target
dir = 'right'
target = target.right
else:
if (dir is None):
if (target.left is None and target.right is None):
return None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
return target.right
else:
return target.left
else:
if (target.left is None and target.right is None):
if (dir == 'left'):
parent.left = None
else:
parent.right = None
elif (target.left and target.right):
target.val = treeMini(target.right)
self.removeNode(target.right, target.val)
else:
if (target.left is None):
if (dir == 'left'):
parent.left = target.right
else:
parent.right = target.right
else:
if (dir == 'left'):
parent.left = target.left
else:
parent.right = target.left
break
return root
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.