commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7a78e1b63588412945f24d86b53697eed3fc93d
|
app/handlers/boot_trigger.py
|
app/handlers/boot_trigger.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The RequestHandler for /trigger/boot URLs."""
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import models
class BootTriggerHandler(hbase.BaseHandler):
"""Handle the /trigger/boot URLs."""
def __init__(self, application, request, **kwargs):
super(BootTriggerHandler, self).__init__(
application, request, **kwargs)
@property
def collection(self):
return self.db[models.DEFCONFIG_COLLECTION]
@staticmethod
def _token_validation_func():
return hcommon.valid_token_bh
def _get(self, **kwargs):
response = hresponse.HandlerResponse()
# TODO: validate token and get the lab name.
return response
def _is_valid_token(self):
pass
|
Add stub boot trigger handler.
|
Add stub boot trigger handler.
|
Python
|
lgpl-2.1
|
kernelci/kernelci-backend,kernelci/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend
|
Add stub boot trigger handler.
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The RequestHandler for /trigger/boot URLs."""
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import models
class BootTriggerHandler(hbase.BaseHandler):
"""Handle the /trigger/boot URLs."""
def __init__(self, application, request, **kwargs):
super(BootTriggerHandler, self).__init__(
application, request, **kwargs)
@property
def collection(self):
return self.db[models.DEFCONFIG_COLLECTION]
@staticmethod
def _token_validation_func():
return hcommon.valid_token_bh
def _get(self, **kwargs):
response = hresponse.HandlerResponse()
# TODO: validate token and get the lab name.
return response
def _is_valid_token(self):
pass
|
<commit_before><commit_msg>Add stub boot trigger handler.<commit_after>
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The RequestHandler for /trigger/boot URLs."""
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import models
class BootTriggerHandler(hbase.BaseHandler):
"""Handle the /trigger/boot URLs."""
def __init__(self, application, request, **kwargs):
super(BootTriggerHandler, self).__init__(
application, request, **kwargs)
@property
def collection(self):
return self.db[models.DEFCONFIG_COLLECTION]
@staticmethod
def _token_validation_func():
return hcommon.valid_token_bh
def _get(self, **kwargs):
response = hresponse.HandlerResponse()
# TODO: validate token and get the lab name.
return response
def _is_valid_token(self):
pass
|
Add stub boot trigger handler.# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The RequestHandler for /trigger/boot URLs."""
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import models
class BootTriggerHandler(hbase.BaseHandler):
"""Handle the /trigger/boot URLs."""
def __init__(self, application, request, **kwargs):
super(BootTriggerHandler, self).__init__(
application, request, **kwargs)
@property
def collection(self):
return self.db[models.DEFCONFIG_COLLECTION]
@staticmethod
def _token_validation_func():
return hcommon.valid_token_bh
def _get(self, **kwargs):
response = hresponse.HandlerResponse()
# TODO: validate token and get the lab name.
return response
def _is_valid_token(self):
pass
|
<commit_before><commit_msg>Add stub boot trigger handler.<commit_after># This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The RequestHandler for /trigger/boot URLs."""
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import models
class BootTriggerHandler(hbase.BaseHandler):
"""Handle the /trigger/boot URLs."""
def __init__(self, application, request, **kwargs):
super(BootTriggerHandler, self).__init__(
application, request, **kwargs)
@property
def collection(self):
return self.db[models.DEFCONFIG_COLLECTION]
@staticmethod
def _token_validation_func():
return hcommon.valid_token_bh
def _get(self, **kwargs):
response = hresponse.HandlerResponse()
# TODO: validate token and get the lab name.
return response
def _is_valid_token(self):
pass
|
|
553660f2c67c7d3032b20a937b93262fbbb5e9f5
|
fastq/split_fastq_files.py
|
fastq/split_fastq_files.py
|
"""
Split paired end fastq files into different files
"""
import os
import sys
import argparse
from roblib import stream_fastq, message
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split PE fastq files into multiple files')
parser.add_argument('-l', help='fastq file 1', required=True)
parser.add_argument('-r', help='fastq file 2', required=True)
parser.add_argument('-n', help='number of reads per file. Default = ', type=int, default=10000)
parser.add_argument('-o', required=True,
help='stub of output file _n_R1.fastq and _n_R2.fastq will be added for files 1 to n')
parser.add_argument('-d', help='Output directory. Default = fastq_split', default='fastq_split')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
os.makedirs(args.d, exist_ok=True)
reads = {}
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.l):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.r):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
if sid in reads and reads[sid] != filecounter:
message("ERROR: Different file locations for {sid}. Left read is in {reads[sid]}. Right read is in {filecounter}. Please ensure your sequences are paired!", "RED")
if sid not in reads:
message("ERROR: Found an unpaire read, {sid}")
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
|
Split some fastq files into smaller files
|
Split some fastq files into smaller files
|
Python
|
mit
|
linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab
|
Split some fastq files into smaller files
|
"""
Split paired end fastq files into different files
"""
import os
import sys
import argparse
from roblib import stream_fastq, message
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split PE fastq files into multiple files')
parser.add_argument('-l', help='fastq file 1', required=True)
parser.add_argument('-r', help='fastq file 2', required=True)
parser.add_argument('-n', help='number of reads per file. Default = ', type=int, default=10000)
parser.add_argument('-o', required=True,
help='stub of output file _n_R1.fastq and _n_R2.fastq will be added for files 1 to n')
parser.add_argument('-d', help='Output directory. Default = fastq_split', default='fastq_split')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
os.makedirs(args.d, exist_ok=True)
reads = {}
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.l):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.r):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
if sid in reads and reads[sid] != filecounter:
message("ERROR: Different file locations for {sid}. Left read is in {reads[sid]}. Right read is in {filecounter}. Please ensure your sequences are paired!", "RED")
if sid not in reads:
message("ERROR: Found an unpaire read, {sid}")
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
|
<commit_before><commit_msg>Split some fastq files into smaller files<commit_after>
|
"""
Split paired end fastq files into different files
"""
import os
import sys
import argparse
from roblib import stream_fastq, message
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split PE fastq files into multiple files')
parser.add_argument('-l', help='fastq file 1', required=True)
parser.add_argument('-r', help='fastq file 2', required=True)
parser.add_argument('-n', help='number of reads per file. Default = ', type=int, default=10000)
parser.add_argument('-o', required=True,
help='stub of output file _n_R1.fastq and _n_R2.fastq will be added for files 1 to n')
parser.add_argument('-d', help='Output directory. Default = fastq_split', default='fastq_split')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
os.makedirs(args.d, exist_ok=True)
reads = {}
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.l):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.r):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
if sid in reads and reads[sid] != filecounter:
message("ERROR: Different file locations for {sid}. Left read is in {reads[sid]}. Right read is in {filecounter}. Please ensure your sequences are paired!", "RED")
if sid not in reads:
message("ERROR: Found an unpaire read, {sid}")
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
|
Split some fastq files into smaller files"""
Split paired end fastq files into different files
"""
import os
import sys
import argparse
from roblib import stream_fastq, message
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split PE fastq files into multiple files')
parser.add_argument('-l', help='fastq file 1', required=True)
parser.add_argument('-r', help='fastq file 2', required=True)
parser.add_argument('-n', help='number of reads per file. Default = ', type=int, default=10000)
parser.add_argument('-o', required=True,
help='stub of output file _n_R1.fastq and _n_R2.fastq will be added for files 1 to n')
parser.add_argument('-d', help='Output directory. Default = fastq_split', default='fastq_split')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
os.makedirs(args.d, exist_ok=True)
reads = {}
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.l):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.r):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
if sid in reads and reads[sid] != filecounter:
message("ERROR: Different file locations for {sid}. Left read is in {reads[sid]}. Right read is in {filecounter}. Please ensure your sequences are paired!", "RED")
if sid not in reads:
message("ERROR: Found an unpaire read, {sid}")
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
|
<commit_before><commit_msg>Split some fastq files into smaller files<commit_after>"""
Split paired end fastq files into different files
"""
import os
import sys
import argparse
from roblib import stream_fastq, message
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Split PE fastq files into multiple files')
parser.add_argument('-l', help='fastq file 1', required=True)
parser.add_argument('-r', help='fastq file 2', required=True)
parser.add_argument('-n', help='number of reads per file. Default = ', type=int, default=10000)
parser.add_argument('-o', required=True,
help='stub of output file _n_R1.fastq and _n_R2.fastq will be added for files 1 to n')
parser.add_argument('-d', help='Output directory. Default = fastq_split', default='fastq_split')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
os.makedirs(args.d, exist_ok=True)
reads = {}
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.l):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R1.fastq"))
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
filecounter = 1
counter = 0
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
for sid, seqid, seq, qual in stream_fastq(args.r):
counter += 1
if counter > args.n:
counter = 0
filecounter += 1
out.close()
out = open(os.path.join(args.d, f"{args.o}_{filecounter}.R2.fastq"))
if sid in reads and reads[sid] != filecounter:
message("ERROR: Different file locations for {sid}. Left read is in {reads[sid]}. Right read is in {filecounter}. Please ensure your sequences are paired!", "RED")
if sid not in reads:
message("ERROR: Found an unpaire read, {sid}")
out.write(f"@{seqid}\n{seq}\n+\n{qual}\n")
reads[sid] = filecounter
out.close()
|
|
aca1f53bc42915e8994c76607c86f486fb314a7a
|
py/island-perimeter.py
|
py/island-perimeter.py
|
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
r = len(grid)
c = len(grid[0])
perimeter = 0
for i in xrange(r):
for j in xrange(c):
if grid[i][j] == 1:
perimeter += 4
if i > 0 and grid[i - 1][j] == 1:
perimeter -= 1
if i < r - 1 and grid[i + 1][j] == 1:
perimeter -= 1
if j > 0 and grid[i][j - 1] == 1:
perimeter -= 1
if j < c - 1 and grid[i][j + 1] == 1:
perimeter -= 1
return perimeter
|
Add py solution for 463. Island Perimeter
|
Add py solution for 463. Island Perimeter
463. Island Perimeter: https://leetcode.com/problems/island-perimeter/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 463. Island Perimeter
463. Island Perimeter: https://leetcode.com/problems/island-perimeter/
|
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
r = len(grid)
c = len(grid[0])
perimeter = 0
for i in xrange(r):
for j in xrange(c):
if grid[i][j] == 1:
perimeter += 4
if i > 0 and grid[i - 1][j] == 1:
perimeter -= 1
if i < r - 1 and grid[i + 1][j] == 1:
perimeter -= 1
if j > 0 and grid[i][j - 1] == 1:
perimeter -= 1
if j < c - 1 and grid[i][j + 1] == 1:
perimeter -= 1
return perimeter
|
<commit_before><commit_msg>Add py solution for 463. Island Perimeter
463. Island Perimeter: https://leetcode.com/problems/island-perimeter/<commit_after>
|
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
r = len(grid)
c = len(grid[0])
perimeter = 0
for i in xrange(r):
for j in xrange(c):
if grid[i][j] == 1:
perimeter += 4
if i > 0 and grid[i - 1][j] == 1:
perimeter -= 1
if i < r - 1 and grid[i + 1][j] == 1:
perimeter -= 1
if j > 0 and grid[i][j - 1] == 1:
perimeter -= 1
if j < c - 1 and grid[i][j + 1] == 1:
perimeter -= 1
return perimeter
|
Add py solution for 463. Island Perimeter
463. Island Perimeter: https://leetcode.com/problems/island-perimeter/class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
r = len(grid)
c = len(grid[0])
perimeter = 0
for i in xrange(r):
for j in xrange(c):
if grid[i][j] == 1:
perimeter += 4
if i > 0 and grid[i - 1][j] == 1:
perimeter -= 1
if i < r - 1 and grid[i + 1][j] == 1:
perimeter -= 1
if j > 0 and grid[i][j - 1] == 1:
perimeter -= 1
if j < c - 1 and grid[i][j + 1] == 1:
perimeter -= 1
return perimeter
|
<commit_before><commit_msg>Add py solution for 463. Island Perimeter
463. Island Perimeter: https://leetcode.com/problems/island-perimeter/<commit_after>class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
r = len(grid)
c = len(grid[0])
perimeter = 0
for i in xrange(r):
for j in xrange(c):
if grid[i][j] == 1:
perimeter += 4
if i > 0 and grid[i - 1][j] == 1:
perimeter -= 1
if i < r - 1 and grid[i + 1][j] == 1:
perimeter -= 1
if j > 0 and grid[i][j - 1] == 1:
perimeter -= 1
if j < c - 1 and grid[i][j + 1] == 1:
perimeter -= 1
return perimeter
|
|
b1128cd47160ce977bc9c6d90e94cb7a4ca873b5
|
scripts/grant_board_access.py
|
scripts/grant_board_access.py
|
#!/usr/bin/env python
"""Grant access to a board to a user.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.board import access_control_service, board_service
from byceps.services.board.transfer.models import Board
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_user_screen_name
def validate_board(ctx, param, board_id: str) -> Board:
board = board_service.find_board(board_id)
if not board:
raise click.BadParameter(f'Unknown board ID "{board_id}".')
return board
@click.command()
@click.argument('board', metavar='BOARD_ID', callback=validate_board)
@click.argument('user', metavar='USER_SCREEN_NAME', callback=validate_user_screen_name)
def execute(board, user):
if access_control_service.has_user_access_to_board(user.id, board.id):
click.secho(f'User "{user.screen_name}" already has access '
f'to board "{board.id}".',
fg='yellow')
return
access_control_service.grant_access_to_board(board.id, user.id)
click.secho(f'Access to board "{board.id}" granted '
f'to user "{user.screen_name}".',
fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to grant board access to user
|
Add script to grant board access to user
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,m-ober/byceps
|
Add script to grant board access to user
|
#!/usr/bin/env python
"""Grant access to a board to a user.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.board import access_control_service, board_service
from byceps.services.board.transfer.models import Board
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_user_screen_name
def validate_board(ctx, param, board_id: str) -> Board:
board = board_service.find_board(board_id)
if not board:
raise click.BadParameter(f'Unknown board ID "{board_id}".')
return board
@click.command()
@click.argument('board', metavar='BOARD_ID', callback=validate_board)
@click.argument('user', metavar='USER_SCREEN_NAME', callback=validate_user_screen_name)
def execute(board, user):
if access_control_service.has_user_access_to_board(user.id, board.id):
click.secho(f'User "{user.screen_name}" already has access '
f'to board "{board.id}".',
fg='yellow')
return
access_control_service.grant_access_to_board(board.id, user.id)
click.secho(f'Access to board "{board.id}" granted '
f'to user "{user.screen_name}".',
fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to grant board access to user<commit_after>
|
#!/usr/bin/env python
"""Grant access to a board to a user.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.board import access_control_service, board_service
from byceps.services.board.transfer.models import Board
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_user_screen_name
def validate_board(ctx, param, board_id: str) -> Board:
board = board_service.find_board(board_id)
if not board:
raise click.BadParameter(f'Unknown board ID "{board_id}".')
return board
@click.command()
@click.argument('board', metavar='BOARD_ID', callback=validate_board)
@click.argument('user', metavar='USER_SCREEN_NAME', callback=validate_user_screen_name)
def execute(board, user):
if access_control_service.has_user_access_to_board(user.id, board.id):
click.secho(f'User "{user.screen_name}" already has access '
f'to board "{board.id}".',
fg='yellow')
return
access_control_service.grant_access_to_board(board.id, user.id)
click.secho(f'Access to board "{board.id}" granted '
f'to user "{user.screen_name}".',
fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to grant board access to user#!/usr/bin/env python
"""Grant access to a board to a user.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.board import access_control_service, board_service
from byceps.services.board.transfer.models import Board
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_user_screen_name
def validate_board(ctx, param, board_id: str) -> Board:
board = board_service.find_board(board_id)
if not board:
raise click.BadParameter(f'Unknown board ID "{board_id}".')
return board
@click.command()
@click.argument('board', metavar='BOARD_ID', callback=validate_board)
@click.argument('user', metavar='USER_SCREEN_NAME', callback=validate_user_screen_name)
def execute(board, user):
if access_control_service.has_user_access_to_board(user.id, board.id):
click.secho(f'User "{user.screen_name}" already has access '
f'to board "{board.id}".',
fg='yellow')
return
access_control_service.grant_access_to_board(board.id, user.id)
click.secho(f'Access to board "{board.id}" granted '
f'to user "{user.screen_name}".',
fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to grant board access to user<commit_after>#!/usr/bin/env python
"""Grant access to a board to a user.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.board import access_control_service, board_service
from byceps.services.board.transfer.models import Board
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_user_screen_name
def validate_board(ctx, param, board_id: str) -> Board:
board = board_service.find_board(board_id)
if not board:
raise click.BadParameter(f'Unknown board ID "{board_id}".')
return board
@click.command()
@click.argument('board', metavar='BOARD_ID', callback=validate_board)
@click.argument('user', metavar='USER_SCREEN_NAME', callback=validate_user_screen_name)
def execute(board, user):
if access_control_service.has_user_access_to_board(user.id, board.id):
click.secho(f'User "{user.screen_name}" already has access '
f'to board "{board.id}".',
fg='yellow')
return
access_control_service.grant_access_to_board(board.id, user.id)
click.secho(f'Access to board "{board.id}" granted '
f'to user "{user.screen_name}".',
fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
b7c6b9b2a5b7e5d7d8ebc78c53f156d4c53c7bbf
|
models/cuberun.py
|
models/cuberun.py
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(1, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# dense (1024 units)
model.add(Dense(1024, input_shape=()))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
# TODO: compile model
|
Add baseline convolutional neural network model.
|
Add baseline convolutional neural network model.
|
Python
|
mit
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
Add baseline convolutional neural network model.
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(1, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# dense (1024 units)
model.add(Dense(1024, input_shape=()))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
# TODO: compile model
|
<commit_before><commit_msg>Add baseline convolutional neural network model.<commit_after>
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(1, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# dense (1024 units)
model.add(Dense(1024, input_shape=()))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
# TODO: compile model
|
Add baseline convolutional neural network model.from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(1, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# dense (1024 units)
model.add(Dense(1024, input_shape=()))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
# TODO: compile model
|
<commit_before><commit_msg>Add baseline convolutional neural network model.<commit_after>from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# conv (64 5x5 kernels, stride size 2x1)
# TODO : 1 channel?
model.add(Convolution2D(64, 5, 5, input_shape=(1, 128, 256), activation="relu", subsample=(2, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (64 5x5 kernels, stride size 1x1)
model.add(Convolution2D(64, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (128 5x5 kernels, stride size 1x1)
model.add(Convolution2D(128, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 5x5 kernels, stride size 1x1)
model.add(Convolution2D(256, 5, 5, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# conv (256 3x3 kernels, stride size 1x1)
model.add(Convolution2D(256, 3, 3, activation="relu", subsample=(1, 1)))
# max pooling (2x2 kernels, stride size 2x2)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# dense (1024 units)
model.add(Dense(1024, input_shape=()))
# soft max (19 units)
model.add(Dense(19, activation='softmax'))
# TODO: compile model
|
|
3a5a228e25996e2acf39ea1ae5966542f4b27123
|
eve_neo4j/utils.py
|
eve_neo4j/utils.py
|
# -*- coding: utf-8 -*-
from copy import copy
from datetime import datetime
from eve.utils import config
def node_to_dict(node):
node = dict(node)
if config.DATE_CREATED in node:
node[config.DATE_CREATED] = datetime.fromtimestamp(
node[config.DATE_CREATED])
if config.LAST_UPDATED in node:
node[config.LAST_UPDATED] = datetime.fromtimestamp(
node[config.LAST_UPDATED])
return node
def count_selection(selection, with_limit_and_skip=False):
if not with_limit_and_skip:
selection = copy(selection)
selection._skip = None
selection._limit = None
query, params = selection._query_and_parameters
query = query.replace("RETURN _", "RETURN COUNT(_)")
return selection.graph.evaluate(query, params)
|
Convert nodes to dicts and count results of query
|
Convert nodes to dicts and count results of query
|
Python
|
mit
|
Grupo-Abraxas/eve-neo4j,Abraxas-Biosystems/eve-neo4j
|
Convert nodes to dicts and count results of query
|
# -*- coding: utf-8 -*-
from copy import copy
from datetime import datetime
from eve.utils import config
def node_to_dict(node):
node = dict(node)
if config.DATE_CREATED in node:
node[config.DATE_CREATED] = datetime.fromtimestamp(
node[config.DATE_CREATED])
if config.LAST_UPDATED in node:
node[config.LAST_UPDATED] = datetime.fromtimestamp(
node[config.LAST_UPDATED])
return node
def count_selection(selection, with_limit_and_skip=False):
if not with_limit_and_skip:
selection = copy(selection)
selection._skip = None
selection._limit = None
query, params = selection._query_and_parameters
query = query.replace("RETURN _", "RETURN COUNT(_)")
return selection.graph.evaluate(query, params)
|
<commit_before><commit_msg>Convert nodes to dicts and count results of query<commit_after>
|
# -*- coding: utf-8 -*-
from copy import copy
from datetime import datetime
from eve.utils import config
def node_to_dict(node):
node = dict(node)
if config.DATE_CREATED in node:
node[config.DATE_CREATED] = datetime.fromtimestamp(
node[config.DATE_CREATED])
if config.LAST_UPDATED in node:
node[config.LAST_UPDATED] = datetime.fromtimestamp(
node[config.LAST_UPDATED])
return node
def count_selection(selection, with_limit_and_skip=False):
if not with_limit_and_skip:
selection = copy(selection)
selection._skip = None
selection._limit = None
query, params = selection._query_and_parameters
query = query.replace("RETURN _", "RETURN COUNT(_)")
return selection.graph.evaluate(query, params)
|
Convert nodes to dicts and count results of query# -*- coding: utf-8 -*-
from copy import copy
from datetime import datetime
from eve.utils import config
def node_to_dict(node):
node = dict(node)
if config.DATE_CREATED in node:
node[config.DATE_CREATED] = datetime.fromtimestamp(
node[config.DATE_CREATED])
if config.LAST_UPDATED in node:
node[config.LAST_UPDATED] = datetime.fromtimestamp(
node[config.LAST_UPDATED])
return node
def count_selection(selection, with_limit_and_skip=False):
if not with_limit_and_skip:
selection = copy(selection)
selection._skip = None
selection._limit = None
query, params = selection._query_and_parameters
query = query.replace("RETURN _", "RETURN COUNT(_)")
return selection.graph.evaluate(query, params)
|
<commit_before><commit_msg>Convert nodes to dicts and count results of query<commit_after># -*- coding: utf-8 -*-
from copy import copy
from datetime import datetime
from eve.utils import config
def node_to_dict(node):
node = dict(node)
if config.DATE_CREATED in node:
node[config.DATE_CREATED] = datetime.fromtimestamp(
node[config.DATE_CREATED])
if config.LAST_UPDATED in node:
node[config.LAST_UPDATED] = datetime.fromtimestamp(
node[config.LAST_UPDATED])
return node
def count_selection(selection, with_limit_and_skip=False):
if not with_limit_and_skip:
selection = copy(selection)
selection._skip = None
selection._limit = None
query, params = selection._query_and_parameters
query = query.replace("RETURN _", "RETURN COUNT(_)")
return selection.graph.evaluate(query, params)
|
|
09311a99d37c8623a644acb30daf8523a5e7a196
|
django_auth_policy/validators.py
|
django_auth_policy/validators.py
|
from django.core.exceptions import ValidationError
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = dap_settings.PASSWORD_MIN_LENGTH_TEXT.format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = dap_settings.PASSWORD_COMPLEXITY_TEXT.format(
rule_text=rule['text'])
raise ValidationError(msg, 'password_complexity')
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = _(dap_settings.PASSWORD_MIN_LENGTH_TEXT).format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = _(dap_settings.PASSWORD_COMPLEXITY_TEXT).format(
rule_text=_(rule['text']))
raise ValidationError(msg, 'password_complexity')
|
Fix translatability of validation messages when defined in custom settings
|
Fix translatability of validation messages when defined in custom settings
|
Python
|
bsd-3-clause
|
mcella/django-auth-policy,mcella/django-auth-policy,Dreamsolution/django-auth-policy,Dreamsolution/django-auth-policy
|
from django.core.exceptions import ValidationError
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = dap_settings.PASSWORD_MIN_LENGTH_TEXT.format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = dap_settings.PASSWORD_COMPLEXITY_TEXT.format(
rule_text=rule['text'])
raise ValidationError(msg, 'password_complexity')
Fix translatability of validation messages when defined in custom settings
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = _(dap_settings.PASSWORD_MIN_LENGTH_TEXT).format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = _(dap_settings.PASSWORD_COMPLEXITY_TEXT).format(
rule_text=_(rule['text']))
raise ValidationError(msg, 'password_complexity')
|
<commit_before>from django.core.exceptions import ValidationError
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = dap_settings.PASSWORD_MIN_LENGTH_TEXT.format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = dap_settings.PASSWORD_COMPLEXITY_TEXT.format(
rule_text=rule['text'])
raise ValidationError(msg, 'password_complexity')
<commit_msg>Fix translatability of validation messages when defined in custom settings<commit_after>
|
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = _(dap_settings.PASSWORD_MIN_LENGTH_TEXT).format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = _(dap_settings.PASSWORD_COMPLEXITY_TEXT).format(
rule_text=_(rule['text']))
raise ValidationError(msg, 'password_complexity')
|
from django.core.exceptions import ValidationError
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = dap_settings.PASSWORD_MIN_LENGTH_TEXT.format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = dap_settings.PASSWORD_COMPLEXITY_TEXT.format(
rule_text=rule['text'])
raise ValidationError(msg, 'password_complexity')
Fix translatability of validation messages when defined in custom settingsfrom django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = _(dap_settings.PASSWORD_MIN_LENGTH_TEXT).format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = _(dap_settings.PASSWORD_COMPLEXITY_TEXT).format(
rule_text=_(rule['text']))
raise ValidationError(msg, 'password_complexity')
|
<commit_before>from django.core.exceptions import ValidationError
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = dap_settings.PASSWORD_MIN_LENGTH_TEXT.format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = dap_settings.PASSWORD_COMPLEXITY_TEXT.format(
rule_text=rule['text'])
raise ValidationError(msg, 'password_complexity')
<commit_msg>Fix translatability of validation messages when defined in custom settings<commit_after>from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django_auth_policy import settings as dap_settings
def password_min_length(value):
if dap_settings.PASSWORD_MIN_LENGTH_TEXT is None:
return
if len(value) < dap_settings.PASSWORD_MIN_LENGTH:
msg = _(dap_settings.PASSWORD_MIN_LENGTH_TEXT).format(
length=dap_settings.PASSWORD_MIN_LENGTH)
raise ValidationError(msg, code='password_min_length')
def password_complexity(value):
if not dap_settings.PASSWORD_COMPLEXITY:
return
pw_set = set(value)
for rule in dap_settings.PASSWORD_COMPLEXITY:
if not pw_set.intersection(rule['chars']):
msg = _(dap_settings.PASSWORD_COMPLEXITY_TEXT).format(
rule_text=_(rule['text']))
raise ValidationError(msg, 'password_complexity')
|
11ec7ed43fbd5d6dae786f8320d1540080a55d57
|
tools/secret_key_generator.py
|
tools/secret_key_generator.py
|
#!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
print generate_key(40, (random_string(),))
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
key = generate_key(40, (random_string(),))
if len(sys.argv) == 2:
fp = open(sys.argv[1], 'w')
fp.write("SECRET_KEY = \"%s\"\n" % key)
fp.close()
else:
print key
|
Update script for init seahub_settings.py in Windows
|
Update script for init seahub_settings.py in Windows
|
Python
|
apache-2.0
|
madflow/seahub,madflow/seahub,miurahr/seahub,cloudcopy/seahub,miurahr/seahub,miurahr/seahub,Chilledheart/seahub,Chilledheart/seahub,miurahr/seahub,madflow/seahub,Chilledheart/seahub,cloudcopy/seahub,Chilledheart/seahub,madflow/seahub,cloudcopy/seahub,cloudcopy/seahub,Chilledheart/seahub,madflow/seahub
|
#!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
print generate_key(40, (random_string(),))
Update script for init seahub_settings.py in Windows
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
key = generate_key(40, (random_string(),))
if len(sys.argv) == 2:
fp = open(sys.argv[1], 'w')
fp.write("SECRET_KEY = \"%s\"\n" % key)
fp.close()
else:
print key
|
<commit_before>#!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
print generate_key(40, (random_string(),))
<commit_msg>Update script for init seahub_settings.py in Windows<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
key = generate_key(40, (random_string(),))
if len(sys.argv) == 2:
fp = open(sys.argv[1], 'w')
fp.write("SECRET_KEY = \"%s\"\n" % key)
fp.close()
else:
print key
|
#!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
print generate_key(40, (random_string(),))
Update script for init seahub_settings.py in Windows#!/usr/bin/env python
# encoding: utf-8
import sys
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
key = generate_key(40, (random_string(),))
if len(sys.argv) == 2:
fp = open(sys.argv[1], 'w')
fp.write("SECRET_KEY = \"%s\"\n" % key)
fp.close()
else:
print key
|
<commit_before>#!/usr/bin/env python
# encoding: utf-8
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
print generate_key(40, (random_string(),))
<commit_msg>Update script for init seahub_settings.py in Windows<commit_after>#!/usr/bin/env python
# encoding: utf-8
import sys
from hashlib import md5, sha1
from base64 import urlsafe_b64encode as b64encode
import random
random.seed()
def random_string():
"""
Generate a random string (currently a random number as a string)
"""
return str(random.randint(0,100000))
def generate_key(max_length, data, encoder=b64encode, digester=md5):
"""
Generate a Base64-encoded 'random' key by hashing the data.
data is a tuple of seeding values. Pass arbitrary encoder and
digester for specific hashing and formatting of keys
"""
base = ''
for arg in data:
base += str(arg)
key = encoder(digester(base).digest())
return key[:max_length]
if __name__ == "__main__":
key = generate_key(40, (random_string(),))
if len(sys.argv) == 2:
fp = open(sys.argv[1], 'w')
fp.write("SECRET_KEY = \"%s\"\n" % key)
fp.close()
else:
print key
|
34b78a3bed13613394b8655b51ec46933cc9746e
|
py/find_intersection.py
|
py/find_intersection.py
|
"""
Find the intersection of two sorted arrays.
"""
def find_intersection(list_a, list_b):
"""
Assumes that both lists are sorted.
"""
# Initialize result list
intersection = []
# Initialize indecies for list_a and list_b
idx_a = 0
idx_b = 0
# Loop while the two indecies are within their respective lists
while idx_a < len(list_a) and idx_b < len(list_b):
a = list_a[idx_a]
b = list_b[idx_b]
# If the two values match and the value is not already in the result,
# add the value to the result; else advance the index referencing the
# smaller number since we know the other list does not have a match
if a == b and (len(intersection) == 0 or intersection[-1] != a):
intersection.append(list_a[idx_a])
elif a > b:
idx_b += 1
else:
idx_a += 1
return intersection
def test_find_intersection():
print "Testing empty lists..."
assert(find_intersection([], []) == [])
assert(find_intersection([], [1,2,3]) == [])
assert(find_intersection([1,2,3], []) == [])
print "=== Success ==="
print "Testing no intersection..."
assert(find_intersection([1,2,3,4,5], [6,7,8,9,10]) == [])
assert(find_intersection([1,3,5,7,9], [0,2,4,6,8]) == [])
assert(find_intersection([1,2,3,7,8,9], [4,5,6,10,11,12]) == [])
print "=== Success ==="
print "Testing intersection with no repeats..."
assert(find_intersection([0,1,2,3,7,8,9,10], [3,7,10]) == [3,7,10])
assert(find_intersection([-222,-10,-5,1,7,43,107],
[-234,-32,-30,-10,0,14,34,43,999]) == [-10,43])
print "=== Success ==="
print "Testing intersection with repeats..."
assert(find_intersection([-9,0,1,1,2,4,4,4,6,9,9,9,194,199],
[-10,-3,1,1,1,1,5,6,194,194,298]) == [1,6,194])
print "=== Success ==="
print "Testing large lists..."
assert(find_intersection(range(10050), range(-100, 999)) == range(999))
assert(find_intersection([-10]*999 + [0]*999 + [99]*999,
[-10]*999 + [1]*999 + [99]*999) == [-10, 99])
print "=== Success ==="
if __name__ == "__main__":
test_find_intersection()
|
Add find intersection between two sorted lists
|
Add find intersection between two sorted lists
|
Python
|
mit
|
tdeh/quickies,tdeh/quickies
|
Add find intersection between two sorted lists
|
"""
Find the intersection of two sorted arrays.
"""
def find_intersection(list_a, list_b):
"""
Assumes that both lists are sorted.
"""
# Initialize result list
intersection = []
# Initialize indecies for list_a and list_b
idx_a = 0
idx_b = 0
# Loop while the two indecies are within their respective lists
while idx_a < len(list_a) and idx_b < len(list_b):
a = list_a[idx_a]
b = list_b[idx_b]
# If the two values match and the value is not already in the result,
# add the value to the result; else advance the index referencing the
# smaller number since we know the other list does not have a match
if a == b and (len(intersection) == 0 or intersection[-1] != a):
intersection.append(list_a[idx_a])
elif a > b:
idx_b += 1
else:
idx_a += 1
return intersection
def test_find_intersection():
print "Testing empty lists..."
assert(find_intersection([], []) == [])
assert(find_intersection([], [1,2,3]) == [])
assert(find_intersection([1,2,3], []) == [])
print "=== Success ==="
print "Testing no intersection..."
assert(find_intersection([1,2,3,4,5], [6,7,8,9,10]) == [])
assert(find_intersection([1,3,5,7,9], [0,2,4,6,8]) == [])
assert(find_intersection([1,2,3,7,8,9], [4,5,6,10,11,12]) == [])
print "=== Success ==="
print "Testing intersection with no repeats..."
assert(find_intersection([0,1,2,3,7,8,9,10], [3,7,10]) == [3,7,10])
assert(find_intersection([-222,-10,-5,1,7,43,107],
[-234,-32,-30,-10,0,14,34,43,999]) == [-10,43])
print "=== Success ==="
print "Testing intersection with repeats..."
assert(find_intersection([-9,0,1,1,2,4,4,4,6,9,9,9,194,199],
[-10,-3,1,1,1,1,5,6,194,194,298]) == [1,6,194])
print "=== Success ==="
print "Testing large lists..."
assert(find_intersection(range(10050), range(-100, 999)) == range(999))
assert(find_intersection([-10]*999 + [0]*999 + [99]*999,
[-10]*999 + [1]*999 + [99]*999) == [-10, 99])
print "=== Success ==="
if __name__ == "__main__":
test_find_intersection()
|
<commit_before><commit_msg>Add find intersection between two sorted lists<commit_after>
|
"""
Find the intersection of two sorted arrays.
"""
def find_intersection(list_a, list_b):
"""
Assumes that both lists are sorted.
"""
# Initialize result list
intersection = []
# Initialize indecies for list_a and list_b
idx_a = 0
idx_b = 0
# Loop while the two indecies are within their respective lists
while idx_a < len(list_a) and idx_b < len(list_b):
a = list_a[idx_a]
b = list_b[idx_b]
# If the two values match and the value is not already in the result,
# add the value to the result; else advance the index referencing the
# smaller number since we know the other list does not have a match
if a == b and (len(intersection) == 0 or intersection[-1] != a):
intersection.append(list_a[idx_a])
elif a > b:
idx_b += 1
else:
idx_a += 1
return intersection
def test_find_intersection():
print "Testing empty lists..."
assert(find_intersection([], []) == [])
assert(find_intersection([], [1,2,3]) == [])
assert(find_intersection([1,2,3], []) == [])
print "=== Success ==="
print "Testing no intersection..."
assert(find_intersection([1,2,3,4,5], [6,7,8,9,10]) == [])
assert(find_intersection([1,3,5,7,9], [0,2,4,6,8]) == [])
assert(find_intersection([1,2,3,7,8,9], [4,5,6,10,11,12]) == [])
print "=== Success ==="
print "Testing intersection with no repeats..."
assert(find_intersection([0,1,2,3,7,8,9,10], [3,7,10]) == [3,7,10])
assert(find_intersection([-222,-10,-5,1,7,43,107],
[-234,-32,-30,-10,0,14,34,43,999]) == [-10,43])
print "=== Success ==="
print "Testing intersection with repeats..."
assert(find_intersection([-9,0,1,1,2,4,4,4,6,9,9,9,194,199],
[-10,-3,1,1,1,1,5,6,194,194,298]) == [1,6,194])
print "=== Success ==="
print "Testing large lists..."
assert(find_intersection(range(10050), range(-100, 999)) == range(999))
assert(find_intersection([-10]*999 + [0]*999 + [99]*999,
[-10]*999 + [1]*999 + [99]*999) == [-10, 99])
print "=== Success ==="
if __name__ == "__main__":
test_find_intersection()
|
Add find intersection between two sorted lists"""
Find the intersection of two sorted arrays.
"""
def find_intersection(list_a, list_b):
"""
Assumes that both lists are sorted.
"""
# Initialize result list
intersection = []
# Initialize indecies for list_a and list_b
idx_a = 0
idx_b = 0
# Loop while the two indecies are within their respective lists
while idx_a < len(list_a) and idx_b < len(list_b):
a = list_a[idx_a]
b = list_b[idx_b]
# If the two values match and the value is not already in the result,
# add the value to the result; else advance the index referencing the
# smaller number since we know the other list does not have a match
if a == b and (len(intersection) == 0 or intersection[-1] != a):
intersection.append(list_a[idx_a])
elif a > b:
idx_b += 1
else:
idx_a += 1
return intersection
def test_find_intersection():
print "Testing empty lists..."
assert(find_intersection([], []) == [])
assert(find_intersection([], [1,2,3]) == [])
assert(find_intersection([1,2,3], []) == [])
print "=== Success ==="
print "Testing no intersection..."
assert(find_intersection([1,2,3,4,5], [6,7,8,9,10]) == [])
assert(find_intersection([1,3,5,7,9], [0,2,4,6,8]) == [])
assert(find_intersection([1,2,3,7,8,9], [4,5,6,10,11,12]) == [])
print "=== Success ==="
print "Testing intersection with no repeats..."
assert(find_intersection([0,1,2,3,7,8,9,10], [3,7,10]) == [3,7,10])
assert(find_intersection([-222,-10,-5,1,7,43,107],
[-234,-32,-30,-10,0,14,34,43,999]) == [-10,43])
print "=== Success ==="
print "Testing intersection with repeats..."
assert(find_intersection([-9,0,1,1,2,4,4,4,6,9,9,9,194,199],
[-10,-3,1,1,1,1,5,6,194,194,298]) == [1,6,194])
print "=== Success ==="
print "Testing large lists..."
assert(find_intersection(range(10050), range(-100, 999)) == range(999))
assert(find_intersection([-10]*999 + [0]*999 + [99]*999,
[-10]*999 + [1]*999 + [99]*999) == [-10, 99])
print "=== Success ==="
if __name__ == "__main__":
test_find_intersection()
|
<commit_before><commit_msg>Add find intersection between two sorted lists<commit_after>"""
Find the intersection of two sorted arrays.
"""
def find_intersection(list_a, list_b):
"""
Assumes that both lists are sorted.
"""
# Initialize result list
intersection = []
# Initialize indecies for list_a and list_b
idx_a = 0
idx_b = 0
# Loop while the two indecies are within their respective lists
while idx_a < len(list_a) and idx_b < len(list_b):
a = list_a[idx_a]
b = list_b[idx_b]
# If the two values match and the value is not already in the result,
# add the value to the result; else advance the index referencing the
# smaller number since we know the other list does not have a match
if a == b and (len(intersection) == 0 or intersection[-1] != a):
intersection.append(list_a[idx_a])
elif a > b:
idx_b += 1
else:
idx_a += 1
return intersection
def test_find_intersection():
print "Testing empty lists..."
assert(find_intersection([], []) == [])
assert(find_intersection([], [1,2,3]) == [])
assert(find_intersection([1,2,3], []) == [])
print "=== Success ==="
print "Testing no intersection..."
assert(find_intersection([1,2,3,4,5], [6,7,8,9,10]) == [])
assert(find_intersection([1,3,5,7,9], [0,2,4,6,8]) == [])
assert(find_intersection([1,2,3,7,8,9], [4,5,6,10,11,12]) == [])
print "=== Success ==="
print "Testing intersection with no repeats..."
assert(find_intersection([0,1,2,3,7,8,9,10], [3,7,10]) == [3,7,10])
assert(find_intersection([-222,-10,-5,1,7,43,107],
[-234,-32,-30,-10,0,14,34,43,999]) == [-10,43])
print "=== Success ==="
print "Testing intersection with repeats..."
assert(find_intersection([-9,0,1,1,2,4,4,4,6,9,9,9,194,199],
[-10,-3,1,1,1,1,5,6,194,194,298]) == [1,6,194])
print "=== Success ==="
print "Testing large lists..."
assert(find_intersection(range(10050), range(-100, 999)) == range(999))
assert(find_intersection([-10]*999 + [0]*999 + [99]*999,
[-10]*999 + [1]*999 + [99]*999) == [-10, 99])
print "=== Success ==="
if __name__ == "__main__":
test_find_intersection()
|
|
5be9df0eece36d4f5ea29447edec309991f443c1
|
telethon/entity_database.py
|
telethon/entity_database.py
|
from . import utils
from .tl import TLObject
class EntityDatabase:
def __init__(self, enabled=True):
self.enabled = enabled
self._entities = {} # marked_id: user|chat|channel
# TODO Allow disabling some extra mappings
self._username_id = {} # username: marked_id
def add(self, entity):
if not self.enabled:
return
# Adds or updates the given entity
marked_id = utils.get_peer_id(entity, add_mark=True)
try:
old_entity = self._entities[marked_id]
old_entity.__dict__.update(entity) # Keep old references
# Update must delete old username
username = getattr(old_entity, 'username', None)
if username:
del self._username_id[username.lower()]
except KeyError:
# Add new entity
self._entities[marked_id] = entity
# Always update username if any
username = getattr(entity, 'username', None)
if username:
self._username_id[username.lower()] = marked_id
def __getitem__(self, key):
"""Accepts a digit only string as phone number,
otherwise it's treated as an username.
If an integer is given, it's treated as the ID of the desired User.
The ID given won't try to be guessed as the ID of a chat or channel,
as there may be an user with that ID, and it would be unreliable.
If a Peer is given (PeerUser, PeerChat, PeerChannel),
its specific entity is retrieved as User, Chat or Channel.
Note that megagroups are channels with .megagroup = True.
"""
if isinstance(key, str):
# TODO Parse phone properly, currently only usernames
key = key.lstrip('@').lower()
# TODO Use the client to return from username if not found
return self._entities[self._username_id[key]]
if isinstance(key, int):
return self._entities[key] # normal IDs are assumed users
if isinstance(key, TLObject) and type(key).SUBCLASS_OF_ID == 0x2d45687:
return self._entities[utils.get_peer_id(key, add_mark=True)]
raise KeyError(key)
def __delitem__(self, key):
target = self[key]
del self._entities[key]
if getattr(target, 'username'):
del self._username_id[target.username]
# TODO Allow search by name by tokenizing the input and return a list
def clear(self, target=None):
if target is None:
self._entities.clear()
else:
del self[target]
|
Add a basic EntityDatabase class
|
Add a basic EntityDatabase class
|
Python
|
mit
|
LonamiWebs/Telethon,LonamiWebs/Telethon,LonamiWebs/Telethon,LonamiWebs/Telethon,expectocode/Telethon,andr-04/Telethon
|
Add a basic EntityDatabase class
|
from . import utils
from .tl import TLObject
class EntityDatabase:
def __init__(self, enabled=True):
self.enabled = enabled
self._entities = {} # marked_id: user|chat|channel
# TODO Allow disabling some extra mappings
self._username_id = {} # username: marked_id
def add(self, entity):
if not self.enabled:
return
# Adds or updates the given entity
marked_id = utils.get_peer_id(entity, add_mark=True)
try:
old_entity = self._entities[marked_id]
old_entity.__dict__.update(entity) # Keep old references
# Update must delete old username
username = getattr(old_entity, 'username', None)
if username:
del self._username_id[username.lower()]
except KeyError:
# Add new entity
self._entities[marked_id] = entity
# Always update username if any
username = getattr(entity, 'username', None)
if username:
self._username_id[username.lower()] = marked_id
def __getitem__(self, key):
"""Accepts a digit only string as phone number,
otherwise it's treated as an username.
If an integer is given, it's treated as the ID of the desired User.
The ID given won't try to be guessed as the ID of a chat or channel,
as there may be an user with that ID, and it would be unreliable.
If a Peer is given (PeerUser, PeerChat, PeerChannel),
its specific entity is retrieved as User, Chat or Channel.
Note that megagroups are channels with .megagroup = True.
"""
if isinstance(key, str):
# TODO Parse phone properly, currently only usernames
key = key.lstrip('@').lower()
# TODO Use the client to return from username if not found
return self._entities[self._username_id[key]]
if isinstance(key, int):
return self._entities[key] # normal IDs are assumed users
if isinstance(key, TLObject) and type(key).SUBCLASS_OF_ID == 0x2d45687:
return self._entities[utils.get_peer_id(key, add_mark=True)]
raise KeyError(key)
def __delitem__(self, key):
target = self[key]
del self._entities[key]
if getattr(target, 'username'):
del self._username_id[target.username]
# TODO Allow search by name by tokenizing the input and return a list
def clear(self, target=None):
if target is None:
self._entities.clear()
else:
del self[target]
|
<commit_before><commit_msg>Add a basic EntityDatabase class<commit_after>
|
from . import utils
from .tl import TLObject
class EntityDatabase:
def __init__(self, enabled=True):
self.enabled = enabled
self._entities = {} # marked_id: user|chat|channel
# TODO Allow disabling some extra mappings
self._username_id = {} # username: marked_id
def add(self, entity):
if not self.enabled:
return
# Adds or updates the given entity
marked_id = utils.get_peer_id(entity, add_mark=True)
try:
old_entity = self._entities[marked_id]
old_entity.__dict__.update(entity) # Keep old references
# Update must delete old username
username = getattr(old_entity, 'username', None)
if username:
del self._username_id[username.lower()]
except KeyError:
# Add new entity
self._entities[marked_id] = entity
# Always update username if any
username = getattr(entity, 'username', None)
if username:
self._username_id[username.lower()] = marked_id
def __getitem__(self, key):
"""Accepts a digit only string as phone number,
otherwise it's treated as an username.
If an integer is given, it's treated as the ID of the desired User.
The ID given won't try to be guessed as the ID of a chat or channel,
as there may be an user with that ID, and it would be unreliable.
If a Peer is given (PeerUser, PeerChat, PeerChannel),
its specific entity is retrieved as User, Chat or Channel.
Note that megagroups are channels with .megagroup = True.
"""
if isinstance(key, str):
# TODO Parse phone properly, currently only usernames
key = key.lstrip('@').lower()
# TODO Use the client to return from username if not found
return self._entities[self._username_id[key]]
if isinstance(key, int):
return self._entities[key] # normal IDs are assumed users
if isinstance(key, TLObject) and type(key).SUBCLASS_OF_ID == 0x2d45687:
return self._entities[utils.get_peer_id(key, add_mark=True)]
raise KeyError(key)
def __delitem__(self, key):
target = self[key]
del self._entities[key]
if getattr(target, 'username'):
del self._username_id[target.username]
# TODO Allow search by name by tokenizing the input and return a list
def clear(self, target=None):
if target is None:
self._entities.clear()
else:
del self[target]
|
Add a basic EntityDatabase classfrom . import utils
from .tl import TLObject
class EntityDatabase:
def __init__(self, enabled=True):
self.enabled = enabled
self._entities = {} # marked_id: user|chat|channel
# TODO Allow disabling some extra mappings
self._username_id = {} # username: marked_id
def add(self, entity):
if not self.enabled:
return
# Adds or updates the given entity
marked_id = utils.get_peer_id(entity, add_mark=True)
try:
old_entity = self._entities[marked_id]
old_entity.__dict__.update(entity) # Keep old references
# Update must delete old username
username = getattr(old_entity, 'username', None)
if username:
del self._username_id[username.lower()]
except KeyError:
# Add new entity
self._entities[marked_id] = entity
# Always update username if any
username = getattr(entity, 'username', None)
if username:
self._username_id[username.lower()] = marked_id
def __getitem__(self, key):
"""Accepts a digit only string as phone number,
otherwise it's treated as an username.
If an integer is given, it's treated as the ID of the desired User.
The ID given won't try to be guessed as the ID of a chat or channel,
as there may be an user with that ID, and it would be unreliable.
If a Peer is given (PeerUser, PeerChat, PeerChannel),
its specific entity is retrieved as User, Chat or Channel.
Note that megagroups are channels with .megagroup = True.
"""
if isinstance(key, str):
# TODO Parse phone properly, currently only usernames
key = key.lstrip('@').lower()
# TODO Use the client to return from username if not found
return self._entities[self._username_id[key]]
if isinstance(key, int):
return self._entities[key] # normal IDs are assumed users
if isinstance(key, TLObject) and type(key).SUBCLASS_OF_ID == 0x2d45687:
return self._entities[utils.get_peer_id(key, add_mark=True)]
raise KeyError(key)
def __delitem__(self, key):
target = self[key]
del self._entities[key]
if getattr(target, 'username'):
del self._username_id[target.username]
# TODO Allow search by name by tokenizing the input and return a list
def clear(self, target=None):
if target is None:
self._entities.clear()
else:
del self[target]
|
<commit_before><commit_msg>Add a basic EntityDatabase class<commit_after>from . import utils
from .tl import TLObject
class EntityDatabase:
def __init__(self, enabled=True):
self.enabled = enabled
self._entities = {} # marked_id: user|chat|channel
# TODO Allow disabling some extra mappings
self._username_id = {} # username: marked_id
def add(self, entity):
if not self.enabled:
return
# Adds or updates the given entity
marked_id = utils.get_peer_id(entity, add_mark=True)
try:
old_entity = self._entities[marked_id]
old_entity.__dict__.update(entity) # Keep old references
# Update must delete old username
username = getattr(old_entity, 'username', None)
if username:
del self._username_id[username.lower()]
except KeyError:
# Add new entity
self._entities[marked_id] = entity
# Always update username if any
username = getattr(entity, 'username', None)
if username:
self._username_id[username.lower()] = marked_id
def __getitem__(self, key):
"""Accepts a digit only string as phone number,
otherwise it's treated as an username.
If an integer is given, it's treated as the ID of the desired User.
The ID given won't try to be guessed as the ID of a chat or channel,
as there may be an user with that ID, and it would be unreliable.
If a Peer is given (PeerUser, PeerChat, PeerChannel),
its specific entity is retrieved as User, Chat or Channel.
Note that megagroups are channels with .megagroup = True.
"""
if isinstance(key, str):
# TODO Parse phone properly, currently only usernames
key = key.lstrip('@').lower()
# TODO Use the client to return from username if not found
return self._entities[self._username_id[key]]
if isinstance(key, int):
return self._entities[key] # normal IDs are assumed users
if isinstance(key, TLObject) and type(key).SUBCLASS_OF_ID == 0x2d45687:
return self._entities[utils.get_peer_id(key, add_mark=True)]
raise KeyError(key)
def __delitem__(self, key):
target = self[key]
del self._entities[key]
if getattr(target, 'username'):
del self._username_id[target.username]
# TODO Allow search by name by tokenizing the input and return a list
def clear(self, target=None):
if target is None:
self._entities.clear()
else:
del self[target]
|
|
75fcb95c04bc56729a1521177ac0e2cb4462bbba
|
packs/st2/actions/chatops_format_list_result.py
|
packs/st2/actions/chatops_format_list_result.py
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = attribute.title()
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = item.get(attribute, None)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = self._get_header_attribute_name(attribute=attribute)
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = self._get_attribute_value(attribute=attribute, item=item)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
def _get_header_attribute_name(self, attribute):
name = attribute.replace('_', ' ').replace('.', ' ').title()
return name
def _get_attribute_value(self, attribute, item):
if '.' in attribute:
value = self._get_complex_attribute_value(attribute=attribute, item=item)
else:
value = item.get(attribute, None)
return value
def _get_complex_attribute_value(self, attribute, item):
attribute_names = attribute.split('.')
for index in range(0, (len(attribute_names) - 1)):
attribute_name = attribute_names[index]
item = item.get(attribute_name, {})
attribute_name = attribute_names[-1]
value = item.get(attribute_name, None)
return value
|
Add support for nested attribute lookup and formatting.
|
Add support for nested attribute lookup and formatting.
|
Python
|
apache-2.0
|
dennybaa/st2contrib,pearsontechnology/st2contrib,lmEshoo/st2contrib,meirwah/st2contrib,meirwah/st2contrib,tonybaloney/st2contrib,armab/st2contrib,armab/st2contrib,pidah/st2contrib,dennybaa/st2contrib,psychopenguin/st2contrib,digideskio/st2contrib,StackStorm/st2contrib,digideskio/st2contrib,lmEshoo/st2contrib,tonybaloney/st2contrib,pidah/st2contrib,pearsontechnology/st2contrib,psychopenguin/st2contrib,tonybaloney/st2contrib,armab/st2contrib,pinterb/st2contrib,pearsontechnology/st2contrib,pidah/st2contrib,pinterb/st2contrib,pearsontechnology/st2contrib,StackStorm/st2contrib,StackStorm/st2contrib
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = attribute.title()
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = item.get(attribute, None)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
Add support for nested attribute lookup and formatting.
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = self._get_header_attribute_name(attribute=attribute)
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = self._get_attribute_value(attribute=attribute, item=item)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
def _get_header_attribute_name(self, attribute):
name = attribute.replace('_', ' ').replace('.', ' ').title()
return name
def _get_attribute_value(self, attribute, item):
if '.' in attribute:
value = self._get_complex_attribute_value(attribute=attribute, item=item)
else:
value = item.get(attribute, None)
return value
def _get_complex_attribute_value(self, attribute, item):
attribute_names = attribute.split('.')
for index in range(0, (len(attribute_names) - 1)):
attribute_name = attribute_names[index]
item = item.get(attribute_name, {})
attribute_name = attribute_names[-1]
value = item.get(attribute_name, None)
return value
|
<commit_before>from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = attribute.title()
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = item.get(attribute, None)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
<commit_msg>Add support for nested attribute lookup and formatting.<commit_after>
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = self._get_header_attribute_name(attribute=attribute)
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = self._get_attribute_value(attribute=attribute, item=item)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
def _get_header_attribute_name(self, attribute):
name = attribute.replace('_', ' ').replace('.', ' ').title()
return name
def _get_attribute_value(self, attribute, item):
if '.' in attribute:
value = self._get_complex_attribute_value(attribute=attribute, item=item)
else:
value = item.get(attribute, None)
return value
def _get_complex_attribute_value(self, attribute, item):
attribute_names = attribute.split('.')
for index in range(0, (len(attribute_names) - 1)):
attribute_name = attribute_names[index]
item = item.get(attribute_name, {})
attribute_name = attribute_names[-1]
value = item.get(attribute_name, None)
return value
|
from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = attribute.title()
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = item.get(attribute, None)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
Add support for nested attribute lookup and formatting.from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = self._get_header_attribute_name(attribute=attribute)
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = self._get_attribute_value(attribute=attribute, item=item)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
def _get_header_attribute_name(self, attribute):
name = attribute.replace('_', ' ').replace('.', ' ').title()
return name
def _get_attribute_value(self, attribute, item):
if '.' in attribute:
value = self._get_complex_attribute_value(attribute=attribute, item=item)
else:
value = item.get(attribute, None)
return value
def _get_complex_attribute_value(self, attribute, item):
attribute_names = attribute.split('.')
for index in range(0, (len(attribute_names) - 1)):
attribute_name = attribute_names[index]
item = item.get(attribute_name, {})
attribute_name = attribute_names[-1]
value = item.get(attribute_name, None)
return value
|
<commit_before>from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = attribute.title()
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = item.get(attribute, None)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
<commit_msg>Add support for nested attribute lookup and formatting.<commit_after>from st2actions.runners.pythonrunner import Action
from prettytable import PrettyTable
__all__ = [
'St2ChatOpsFormatListResult'
]
class St2ChatOpsFormatListResult(Action):
def run(self, result, attributes):
table = PrettyTable()
if not result:
return 'No results.'
# Add headers
header = []
for attribute in attributes:
name = self._get_header_attribute_name(attribute=attribute)
header.append(name)
table.field_names = header
# Add rows
for item in result:
row = []
for attribute in attributes:
value = self._get_attribute_value(attribute=attribute, item=item)
row.append(value)
table.add_row(row)
result = table.get_string()
return result
def _get_header_attribute_name(self, attribute):
name = attribute.replace('_', ' ').replace('.', ' ').title()
return name
def _get_attribute_value(self, attribute, item):
if '.' in attribute:
value = self._get_complex_attribute_value(attribute=attribute, item=item)
else:
value = item.get(attribute, None)
return value
def _get_complex_attribute_value(self, attribute, item):
attribute_names = attribute.split('.')
for index in range(0, (len(attribute_names) - 1)):
attribute_name = attribute_names[index]
item = item.get(attribute_name, {})
attribute_name = attribute_names[-1]
value = item.get(attribute_name, None)
return value
|
d6daa56be204c6ab481756ea49ac97d89bc4ac3d
|
test/test_state_machines.py
|
test/test_state_machines.py
|
# -*- coding: utf-8 -*-
"""
test_state_machines
~~~~~~~~~~~~~~~~~~~
These tests validate the state machines directly. Writing meaningful tests for
this case can be tricky, so the majority of these tests use Hypothesis to try
to talk about general behaviours rather than specific cases
"""
import h2.connection
import h2.exceptions
import h2.stream
from hypothesis import given
from hypothesis.strategies import sampled_from
class TestConnectionStateMachine(object):
"""
Tests of the connection state machine.
"""
@given(state=sampled_from(h2.connection.ConnectionState),
input_=sampled_from(h2.connection.ConnectionInputs))
def test_state_transitions(self, state, input_):
c = h2.connection.H2ConnectionStateMachine()
c.state = state
try:
c.process_input(input_)
except h2.exceptions.ProtocolError:
assert c.state == h2.connection.ConnectionState.CLOSED
else:
assert c.state in h2.connection.ConnectionState
class TestStreamStateMachine(object):
"""
Tests of the stream state machine.
"""
@given(state=sampled_from(h2.stream.StreamState),
input_=sampled_from(h2.stream.StreamInputs))
def test_state_transitions(self, state, input_):
s = h2.stream.H2StreamStateMachine(stream_id=1)
s.state = state
try:
s.process_input(input_)
except h2.exceptions.ProtocolError:
assert s.state == h2.stream.StreamState.CLOSED
else:
assert s.state in h2.stream.StreamState
|
Add Hypothesis-based state machine tests.
|
Add Hypothesis-based state machine tests.
|
Python
|
mit
|
python-hyper/hyper-h2,vladmunteanu/hyper-h2,Kriechi/hyper-h2,Kriechi/hyper-h2,bhavishyagopesh/hyper-h2,python-hyper/hyper-h2,mhils/hyper-h2,vladmunteanu/hyper-h2
|
Add Hypothesis-based state machine tests.
|
# -*- coding: utf-8 -*-
"""
test_state_machines
~~~~~~~~~~~~~~~~~~~
These tests validate the state machines directly. Writing meaningful tests for
this case can be tricky, so the majority of these tests use Hypothesis to try
to talk about general behaviours rather than specific cases
"""
import h2.connection
import h2.exceptions
import h2.stream
from hypothesis import given
from hypothesis.strategies import sampled_from
class TestConnectionStateMachine(object):
"""
Tests of the connection state machine.
"""
@given(state=sampled_from(h2.connection.ConnectionState),
input_=sampled_from(h2.connection.ConnectionInputs))
def test_state_transitions(self, state, input_):
c = h2.connection.H2ConnectionStateMachine()
c.state = state
try:
c.process_input(input_)
except h2.exceptions.ProtocolError:
assert c.state == h2.connection.ConnectionState.CLOSED
else:
assert c.state in h2.connection.ConnectionState
class TestStreamStateMachine(object):
"""
Tests of the stream state machine.
"""
@given(state=sampled_from(h2.stream.StreamState),
input_=sampled_from(h2.stream.StreamInputs))
def test_state_transitions(self, state, input_):
s = h2.stream.H2StreamStateMachine(stream_id=1)
s.state = state
try:
s.process_input(input_)
except h2.exceptions.ProtocolError:
assert s.state == h2.stream.StreamState.CLOSED
else:
assert s.state in h2.stream.StreamState
|
<commit_before><commit_msg>Add Hypothesis-based state machine tests.<commit_after>
|
# -*- coding: utf-8 -*-
"""
test_state_machines
~~~~~~~~~~~~~~~~~~~
These tests validate the state machines directly. Writing meaningful tests for
this case can be tricky, so the majority of these tests use Hypothesis to try
to talk about general behaviours rather than specific cases
"""
import h2.connection
import h2.exceptions
import h2.stream
from hypothesis import given
from hypothesis.strategies import sampled_from
class TestConnectionStateMachine(object):
"""
Tests of the connection state machine.
"""
@given(state=sampled_from(h2.connection.ConnectionState),
input_=sampled_from(h2.connection.ConnectionInputs))
def test_state_transitions(self, state, input_):
c = h2.connection.H2ConnectionStateMachine()
c.state = state
try:
c.process_input(input_)
except h2.exceptions.ProtocolError:
assert c.state == h2.connection.ConnectionState.CLOSED
else:
assert c.state in h2.connection.ConnectionState
class TestStreamStateMachine(object):
"""
Tests of the stream state machine.
"""
@given(state=sampled_from(h2.stream.StreamState),
input_=sampled_from(h2.stream.StreamInputs))
def test_state_transitions(self, state, input_):
s = h2.stream.H2StreamStateMachine(stream_id=1)
s.state = state
try:
s.process_input(input_)
except h2.exceptions.ProtocolError:
assert s.state == h2.stream.StreamState.CLOSED
else:
assert s.state in h2.stream.StreamState
|
Add Hypothesis-based state machine tests.# -*- coding: utf-8 -*-
"""
test_state_machines
~~~~~~~~~~~~~~~~~~~
These tests validate the state machines directly. Writing meaningful tests for
this case can be tricky, so the majority of these tests use Hypothesis to try
to talk about general behaviours rather than specific cases
"""
import h2.connection
import h2.exceptions
import h2.stream
from hypothesis import given
from hypothesis.strategies import sampled_from
class TestConnectionStateMachine(object):
"""
Tests of the connection state machine.
"""
@given(state=sampled_from(h2.connection.ConnectionState),
input_=sampled_from(h2.connection.ConnectionInputs))
def test_state_transitions(self, state, input_):
c = h2.connection.H2ConnectionStateMachine()
c.state = state
try:
c.process_input(input_)
except h2.exceptions.ProtocolError:
assert c.state == h2.connection.ConnectionState.CLOSED
else:
assert c.state in h2.connection.ConnectionState
class TestStreamStateMachine(object):
"""
Tests of the stream state machine.
"""
@given(state=sampled_from(h2.stream.StreamState),
input_=sampled_from(h2.stream.StreamInputs))
def test_state_transitions(self, state, input_):
s = h2.stream.H2StreamStateMachine(stream_id=1)
s.state = state
try:
s.process_input(input_)
except h2.exceptions.ProtocolError:
assert s.state == h2.stream.StreamState.CLOSED
else:
assert s.state in h2.stream.StreamState
|
<commit_before><commit_msg>Add Hypothesis-based state machine tests.<commit_after># -*- coding: utf-8 -*-
"""
test_state_machines
~~~~~~~~~~~~~~~~~~~
These tests validate the state machines directly. Writing meaningful tests for
this case can be tricky, so the majority of these tests use Hypothesis to try
to talk about general behaviours rather than specific cases
"""
import h2.connection
import h2.exceptions
import h2.stream
from hypothesis import given
from hypothesis.strategies import sampled_from
class TestConnectionStateMachine(object):
"""
Tests of the connection state machine.
"""
@given(state=sampled_from(h2.connection.ConnectionState),
input_=sampled_from(h2.connection.ConnectionInputs))
def test_state_transitions(self, state, input_):
c = h2.connection.H2ConnectionStateMachine()
c.state = state
try:
c.process_input(input_)
except h2.exceptions.ProtocolError:
assert c.state == h2.connection.ConnectionState.CLOSED
else:
assert c.state in h2.connection.ConnectionState
class TestStreamStateMachine(object):
"""
Tests of the stream state machine.
"""
@given(state=sampled_from(h2.stream.StreamState),
input_=sampled_from(h2.stream.StreamInputs))
def test_state_transitions(self, state, input_):
s = h2.stream.H2StreamStateMachine(stream_id=1)
s.state = state
try:
s.process_input(input_)
except h2.exceptions.ProtocolError:
assert s.state == h2.stream.StreamState.CLOSED
else:
assert s.state in h2.stream.StreamState
|
|
9fafe695e139e512ec9bd4d181ed0151ab5d7265
|
miniskripts/rescale_mark.py
|
miniskripts/rescale_mark.py
|
#!/usr/bin/env python3
"""
Calculates rescaled mark using hardcoded intervals
Usage example:
$ ./rescale_mark.py 50
Your mark should be 65.0
"""
import sys
if len(sys.argv) != 2:
print("Please use your mark as the only argument")
sys.exit()
else:
try:
old_mark = int(sys.argv[1])
except ValueError:
print("Please use a number as an argument")
if old_mark not in range(100):
print("Please use a number between 0 and 100")
sys.exit()
if old_mark == 100 or old_mark == 0:
print("Your mark should be " + str(old_mark))
sys.exit()
difference = 15
intervals = {
25:0,
75:25,
100:75,
}
upper = next(i for i in set(intervals.keys()) if old_mark < i)
lower = intervals[upper]
upper_rescaled = min(upper + difference, 100)
lower_rescaled = lower + difference
new_mark = (lower_rescaled + (old_mark - lower) *
(upper_rescaled - lower_rescaled)/(upper - lower))
print("Your mark should be " + str(new_mark))
|
Add short script to calculate rescaling of marks
|
Add short script to calculate rescaling of marks
|
Python
|
mit
|
liviu-/miniskripts,liviu-/miniskripts
|
Add short script to calculate rescaling of marks
|
#!/usr/bin/env python3
"""
Calculates rescaled mark using hardcoded intervals
Usage example:
$ ./rescale_mark.py 50
Your mark should be 65.0
"""
import sys
if len(sys.argv) != 2:
print("Please use your mark as the only argument")
sys.exit()
else:
try:
old_mark = int(sys.argv[1])
except ValueError:
print("Please use a number as an argument")
if old_mark not in range(100):
print("Please use a number between 0 and 100")
sys.exit()
if old_mark == 100 or old_mark == 0:
print("Your mark should be " + str(old_mark))
sys.exit()
difference = 15
intervals = {
25:0,
75:25,
100:75,
}
upper = next(i for i in set(intervals.keys()) if old_mark < i)
lower = intervals[upper]
upper_rescaled = min(upper + difference, 100)
lower_rescaled = lower + difference
new_mark = (lower_rescaled + (old_mark - lower) *
(upper_rescaled - lower_rescaled)/(upper - lower))
print("Your mark should be " + str(new_mark))
|
<commit_before><commit_msg>Add short script to calculate rescaling of marks<commit_after>
|
#!/usr/bin/env python3
"""
Calculates rescaled mark using hardcoded intervals
Usage example:
$ ./rescale_mark.py 50
Your mark should be 65.0
"""
import sys
if len(sys.argv) != 2:
print("Please use your mark as the only argument")
sys.exit()
else:
try:
old_mark = int(sys.argv[1])
except ValueError:
print("Please use a number as an argument")
if old_mark not in range(100):
print("Please use a number between 0 and 100")
sys.exit()
if old_mark == 100 or old_mark == 0:
print("Your mark should be " + str(old_mark))
sys.exit()
difference = 15
intervals = {
25:0,
75:25,
100:75,
}
upper = next(i for i in set(intervals.keys()) if old_mark < i)
lower = intervals[upper]
upper_rescaled = min(upper + difference, 100)
lower_rescaled = lower + difference
new_mark = (lower_rescaled + (old_mark - lower) *
(upper_rescaled - lower_rescaled)/(upper - lower))
print("Your mark should be " + str(new_mark))
|
Add short script to calculate rescaling of marks#!/usr/bin/env python3
"""
Calculates rescaled mark using hardcoded intervals
Usage example:
$ ./rescale_mark.py 50
Your mark should be 65.0
"""
import sys
if len(sys.argv) != 2:
print("Please use your mark as the only argument")
sys.exit()
else:
try:
old_mark = int(sys.argv[1])
except ValueError:
print("Please use a number as an argument")
if old_mark not in range(100):
print("Please use a number between 0 and 100")
sys.exit()
if old_mark == 100 or old_mark == 0:
print("Your mark should be " + str(old_mark))
sys.exit()
difference = 15
intervals = {
25:0,
75:25,
100:75,
}
upper = next(i for i in set(intervals.keys()) if old_mark < i)
lower = intervals[upper]
upper_rescaled = min(upper + difference, 100)
lower_rescaled = lower + difference
new_mark = (lower_rescaled + (old_mark - lower) *
(upper_rescaled - lower_rescaled)/(upper - lower))
print("Your mark should be " + str(new_mark))
|
<commit_before><commit_msg>Add short script to calculate rescaling of marks<commit_after>#!/usr/bin/env python3
"""
Calculates rescaled mark using hardcoded intervals
Usage example:
$ ./rescale_mark.py 50
Your mark should be 65.0
"""
import sys
if len(sys.argv) != 2:
print("Please use your mark as the only argument")
sys.exit()
else:
try:
old_mark = int(sys.argv[1])
except ValueError:
print("Please use a number as an argument")
if old_mark not in range(100):
print("Please use a number between 0 and 100")
sys.exit()
if old_mark == 100 or old_mark == 0:
print("Your mark should be " + str(old_mark))
sys.exit()
difference = 15
intervals = {
25:0,
75:25,
100:75,
}
upper = next(i for i in set(intervals.keys()) if old_mark < i)
lower = intervals[upper]
upper_rescaled = min(upper + difference, 100)
lower_rescaled = lower + difference
new_mark = (lower_rescaled + (old_mark - lower) *
(upper_rescaled - lower_rescaled)/(upper - lower))
print("Your mark should be " + str(new_mark))
|
|
974efc82d723d18790e7b759e643c2352ad13325
|
content/3.introduction-crawler/download_page.py
|
content/3.introduction-crawler/download_page.py
|
import requests
def download(method='GET', url=None, tries_num=2, user_agent=None):
print('Download:', url)
try:
headers = {'User-Agent': user_agent}
req = requests.request(method=method, url=url, headers=headers)
print(req.headers)
html = req.text
except requests.Timeout as e:
print('Download error:', e)
html = None
if tries_num > 0:
print(req.status_code)
if hasattr(req, 'status_code') and 500 < req.status_code < 600:
return download(url, tries_num-1)
finally:
print(html)
return html
download(url='http://httpstat.us/200', user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')
|
Add download page py file.
|
Add download page py file.
|
Python
|
mit
|
EscapeLife/web_crawler
|
Add download page py file.
|
import requests
def download(method='GET', url=None, tries_num=2, user_agent=None):
print('Download:', url)
try:
headers = {'User-Agent': user_agent}
req = requests.request(method=method, url=url, headers=headers)
print(req.headers)
html = req.text
except requests.Timeout as e:
print('Download error:', e)
html = None
if tries_num > 0:
print(req.status_code)
if hasattr(req, 'status_code') and 500 < req.status_code < 600:
return download(url, tries_num-1)
finally:
print(html)
return html
download(url='http://httpstat.us/200', user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')
|
<commit_before><commit_msg>Add download page py file.<commit_after>
|
import requests
def download(method='GET', url=None, tries_num=2, user_agent=None):
print('Download:', url)
try:
headers = {'User-Agent': user_agent}
req = requests.request(method=method, url=url, headers=headers)
print(req.headers)
html = req.text
except requests.Timeout as e:
print('Download error:', e)
html = None
if tries_num > 0:
print(req.status_code)
if hasattr(req, 'status_code') and 500 < req.status_code < 600:
return download(url, tries_num-1)
finally:
print(html)
return html
download(url='http://httpstat.us/200', user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')
|
Add download page py file.import requests
def download(method='GET', url=None, tries_num=2, user_agent=None):
print('Download:', url)
try:
headers = {'User-Agent': user_agent}
req = requests.request(method=method, url=url, headers=headers)
print(req.headers)
html = req.text
except requests.Timeout as e:
print('Download error:', e)
html = None
if tries_num > 0:
print(req.status_code)
if hasattr(req, 'status_code') and 500 < req.status_code < 600:
return download(url, tries_num-1)
finally:
print(html)
return html
download(url='http://httpstat.us/200', user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')
|
<commit_before><commit_msg>Add download page py file.<commit_after>import requests
def download(method='GET', url=None, tries_num=2, user_agent=None):
print('Download:', url)
try:
headers = {'User-Agent': user_agent}
req = requests.request(method=method, url=url, headers=headers)
print(req.headers)
html = req.text
except requests.Timeout as e:
print('Download error:', e)
html = None
if tries_num > 0:
print(req.status_code)
if hasattr(req, 'status_code') and 500 < req.status_code < 600:
return download(url, tries_num-1)
finally:
print(html)
return html
download(url='http://httpstat.us/200', user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36')
|
|
0f43bbb5033e85fdfcd678aba670c1b56582c092
|
scripts/make_db_ndex.py
|
scripts/make_db_ndex.py
|
import glob
import pickle
from indra.db import get_primary_db
from indra.db.util import make_stmts_from_db_list
from indra.assemblers import CxAssembler
data_path = '/pmc/data/db_ndex'
def dump_statement_batch(stmts, fname):
print('Dumping into %s' % fname)
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh)
def load_statements():
fnames = glob.glob(data_path + 'pa_stmts_*.pkl')
all_stmts = []
for fname in fnames:
print('Loading %s' % fname)
with open(fname, 'rb') as fh:
stmts = pickle.load(fh)
all_stmts += stmts
return all_stmts
def assemble_cx(statements):
cxa = CxAssembler(statements)
model = cxa.make_model()
cxa.save_model('model.cx')
if __name__ == '__main__':
db = get_primary_db()
res = db.filter_query(db.PAStatements).yield_per(20000)
stmts = []
for idx, r in enumerate(res):
stmt = make_stmts_from_db_list([r])
stmts.append(stmt[0])
if idx > 0 and idx % 20000 == 0:
dump_statement_batch(stmts, data_path + 'pa_stmts_%d.pkl' % idx)
stmts = []
|
Add script to generate DB NDEx network
|
Add script to generate DB NDEx network
|
Python
|
bsd-2-clause
|
sorgerlab/bioagents,bgyori/bioagents
|
Add script to generate DB NDEx network
|
import glob
import pickle
from indra.db import get_primary_db
from indra.db.util import make_stmts_from_db_list
from indra.assemblers import CxAssembler
data_path = '/pmc/data/db_ndex'
def dump_statement_batch(stmts, fname):
print('Dumping into %s' % fname)
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh)
def load_statements():
fnames = glob.glob(data_path + 'pa_stmts_*.pkl')
all_stmts = []
for fname in fnames:
print('Loading %s' % fname)
with open(fname, 'rb') as fh:
stmts = pickle.load(fh)
all_stmts += stmts
return all_stmts
def assemble_cx(statements):
cxa = CxAssembler(statements)
model = cxa.make_model()
cxa.save_model('model.cx')
if __name__ == '__main__':
db = get_primary_db()
res = db.filter_query(db.PAStatements).yield_per(20000)
stmts = []
for idx, r in enumerate(res):
stmt = make_stmts_from_db_list([r])
stmts.append(stmt[0])
if idx > 0 and idx % 20000 == 0:
dump_statement_batch(stmts, data_path + 'pa_stmts_%d.pkl' % idx)
stmts = []
|
<commit_before><commit_msg>Add script to generate DB NDEx network<commit_after>
|
import glob
import pickle
from indra.db import get_primary_db
from indra.db.util import make_stmts_from_db_list
from indra.assemblers import CxAssembler
data_path = '/pmc/data/db_ndex'
def dump_statement_batch(stmts, fname):
print('Dumping into %s' % fname)
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh)
def load_statements():
fnames = glob.glob(data_path + 'pa_stmts_*.pkl')
all_stmts = []
for fname in fnames:
print('Loading %s' % fname)
with open(fname, 'rb') as fh:
stmts = pickle.load(fh)
all_stmts += stmts
return all_stmts
def assemble_cx(statements):
cxa = CxAssembler(statements)
model = cxa.make_model()
cxa.save_model('model.cx')
if __name__ == '__main__':
db = get_primary_db()
res = db.filter_query(db.PAStatements).yield_per(20000)
stmts = []
for idx, r in enumerate(res):
stmt = make_stmts_from_db_list([r])
stmts.append(stmt[0])
if idx > 0 and idx % 20000 == 0:
dump_statement_batch(stmts, data_path + 'pa_stmts_%d.pkl' % idx)
stmts = []
|
Add script to generate DB NDEx networkimport glob
import pickle
from indra.db import get_primary_db
from indra.db.util import make_stmts_from_db_list
from indra.assemblers import CxAssembler
data_path = '/pmc/data/db_ndex'
def dump_statement_batch(stmts, fname):
print('Dumping into %s' % fname)
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh)
def load_statements():
fnames = glob.glob(data_path + 'pa_stmts_*.pkl')
all_stmts = []
for fname in fnames:
print('Loading %s' % fname)
with open(fname, 'rb') as fh:
stmts = pickle.load(fh)
all_stmts += stmts
return all_stmts
def assemble_cx(statements):
cxa = CxAssembler(statements)
model = cxa.make_model()
cxa.save_model('model.cx')
if __name__ == '__main__':
db = get_primary_db()
res = db.filter_query(db.PAStatements).yield_per(20000)
stmts = []
for idx, r in enumerate(res):
stmt = make_stmts_from_db_list([r])
stmts.append(stmt[0])
if idx > 0 and idx % 20000 == 0:
dump_statement_batch(stmts, data_path + 'pa_stmts_%d.pkl' % idx)
stmts = []
|
<commit_before><commit_msg>Add script to generate DB NDEx network<commit_after>import glob
import pickle
from indra.db import get_primary_db
from indra.db.util import make_stmts_from_db_list
from indra.assemblers import CxAssembler
data_path = '/pmc/data/db_ndex'
def dump_statement_batch(stmts, fname):
print('Dumping into %s' % fname)
with open(fname, 'wb') as fh:
pickle.dump(stmts, fh)
def load_statements():
fnames = glob.glob(data_path + 'pa_stmts_*.pkl')
all_stmts = []
for fname in fnames:
print('Loading %s' % fname)
with open(fname, 'rb') as fh:
stmts = pickle.load(fh)
all_stmts += stmts
return all_stmts
def assemble_cx(statements):
cxa = CxAssembler(statements)
model = cxa.make_model()
cxa.save_model('model.cx')
if __name__ == '__main__':
db = get_primary_db()
res = db.filter_query(db.PAStatements).yield_per(20000)
stmts = []
for idx, r in enumerate(res):
stmt = make_stmts_from_db_list([r])
stmts.append(stmt[0])
if idx > 0 and idx % 20000 == 0:
dump_statement_batch(stmts, data_path + 'pa_stmts_%d.pkl' % idx)
stmts = []
|
|
17401f8fad648cbe4258f257fdc288b327aed9ab
|
integration/simple_test_module.py
|
integration/simple_test_module.py
|
import sys
import argparse
import operator
import threading
import time
from jnius import autoclass, cast
from TripsModule.trips_module import TripsModule
# Declare KQML java classes
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
class FIFO(object):
def __init__(self, lst=None):
if lst is None:
self.lst = []
else:
self.lst = lst
def pop(self):
return self.lst.pop()
def push(self, e):
self.lst = [e] + self.lst
def is_empty(self):
if not self.lst:
return True
return False
class TestModule(TripsModule):
'''
The Test module is a TRIPS module built to run unit test. It will
ts role is to receive and decode messages and send responses from
and to other agents in the system.
'''
def __init__(self, argv):
# Call the constructor of TripsModule
super(TestModule, self).__init__(argv)
self.expected = FIFO()
self.sent = FIFO()
# TODO:make this an input argument
self.test_file = 'integration/test.in'
def init(self):
'''
Initialize TRIPS module
'''
super(TestModule, self).init()
# Send ready message
self.ready()
self.run_tests(self.test_file)
return None
def run_tests(self, test_file):
fh = open(test_file, 'rt')
messages = fh.readlines()
send_msg = messages[0::2]
expect_msg = messages[1::2]
msg_id = 1
for sm, em in zip(send_msg, expect_msg):
msg_id_str = 'IO-%d' % msg_id
# TODO: allow non-request messages?
perf = KQMLPerformative.fromString(
'(request :reply-with %s :content %s)' % (msg_id_str, sm))
self.sent.push(sm)
self.expected.push(em)
self.send(perf)
msg_id += 1
def receive_reply(self, msg, content):
'''
Handle a "reply" message is received.
'''
sent = self.sent.pop().strip()
expected_content = self.expected.pop().strip()
actual_content = content.toString().strip()
print 'sent: ', sent
print 'expected: ', expected_content
print 'actual: ', actual_content
print '---'
assert(expected_content == actual_content)
if self.expected.is_empty():
sys.exit(0)
if __name__ == "__main__":
dm = TestModule(['-name', 'Test'] + sys.argv[1:])
dm.run()
|
Add simple test module for integration testing
|
Add simple test module for integration testing
|
Python
|
bsd-2-clause
|
sorgerlab/bioagents,bgyori/bioagents
|
Add simple test module for integration testing
|
import sys
import argparse
import operator
import threading
import time
from jnius import autoclass, cast
from TripsModule.trips_module import TripsModule
# Declare KQML java classes
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
class FIFO(object):
def __init__(self, lst=None):
if lst is None:
self.lst = []
else:
self.lst = lst
def pop(self):
return self.lst.pop()
def push(self, e):
self.lst = [e] + self.lst
def is_empty(self):
if not self.lst:
return True
return False
class TestModule(TripsModule):
'''
The Test module is a TRIPS module built to run unit test. It will
ts role is to receive and decode messages and send responses from
and to other agents in the system.
'''
def __init__(self, argv):
# Call the constructor of TripsModule
super(TestModule, self).__init__(argv)
self.expected = FIFO()
self.sent = FIFO()
# TODO:make this an input argument
self.test_file = 'integration/test.in'
def init(self):
'''
Initialize TRIPS module
'''
super(TestModule, self).init()
# Send ready message
self.ready()
self.run_tests(self.test_file)
return None
def run_tests(self, test_file):
fh = open(test_file, 'rt')
messages = fh.readlines()
send_msg = messages[0::2]
expect_msg = messages[1::2]
msg_id = 1
for sm, em in zip(send_msg, expect_msg):
msg_id_str = 'IO-%d' % msg_id
# TODO: allow non-request messages?
perf = KQMLPerformative.fromString(
'(request :reply-with %s :content %s)' % (msg_id_str, sm))
self.sent.push(sm)
self.expected.push(em)
self.send(perf)
msg_id += 1
def receive_reply(self, msg, content):
'''
Handle a "reply" message is received.
'''
sent = self.sent.pop().strip()
expected_content = self.expected.pop().strip()
actual_content = content.toString().strip()
print 'sent: ', sent
print 'expected: ', expected_content
print 'actual: ', actual_content
print '---'
assert(expected_content == actual_content)
if self.expected.is_empty():
sys.exit(0)
if __name__ == "__main__":
dm = TestModule(['-name', 'Test'] + sys.argv[1:])
dm.run()
|
<commit_before><commit_msg>Add simple test module for integration testing<commit_after>
|
import sys
import argparse
import operator
import threading
import time
from jnius import autoclass, cast
from TripsModule.trips_module import TripsModule
# Declare KQML java classes
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
class FIFO(object):
def __init__(self, lst=None):
if lst is None:
self.lst = []
else:
self.lst = lst
def pop(self):
return self.lst.pop()
def push(self, e):
self.lst = [e] + self.lst
def is_empty(self):
if not self.lst:
return True
return False
class TestModule(TripsModule):
'''
The Test module is a TRIPS module built to run unit test. It will
ts role is to receive and decode messages and send responses from
and to other agents in the system.
'''
def __init__(self, argv):
# Call the constructor of TripsModule
super(TestModule, self).__init__(argv)
self.expected = FIFO()
self.sent = FIFO()
# TODO:make this an input argument
self.test_file = 'integration/test.in'
def init(self):
'''
Initialize TRIPS module
'''
super(TestModule, self).init()
# Send ready message
self.ready()
self.run_tests(self.test_file)
return None
def run_tests(self, test_file):
fh = open(test_file, 'rt')
messages = fh.readlines()
send_msg = messages[0::2]
expect_msg = messages[1::2]
msg_id = 1
for sm, em in zip(send_msg, expect_msg):
msg_id_str = 'IO-%d' % msg_id
# TODO: allow non-request messages?
perf = KQMLPerformative.fromString(
'(request :reply-with %s :content %s)' % (msg_id_str, sm))
self.sent.push(sm)
self.expected.push(em)
self.send(perf)
msg_id += 1
def receive_reply(self, msg, content):
'''
Handle a "reply" message is received.
'''
sent = self.sent.pop().strip()
expected_content = self.expected.pop().strip()
actual_content = content.toString().strip()
print 'sent: ', sent
print 'expected: ', expected_content
print 'actual: ', actual_content
print '---'
assert(expected_content == actual_content)
if self.expected.is_empty():
sys.exit(0)
if __name__ == "__main__":
dm = TestModule(['-name', 'Test'] + sys.argv[1:])
dm.run()
|
Add simple test module for integration testingimport sys
import argparse
import operator
import threading
import time
from jnius import autoclass, cast
from TripsModule.trips_module import TripsModule
# Declare KQML java classes
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
class FIFO(object):
def __init__(self, lst=None):
if lst is None:
self.lst = []
else:
self.lst = lst
def pop(self):
return self.lst.pop()
def push(self, e):
self.lst = [e] + self.lst
def is_empty(self):
if not self.lst:
return True
return False
class TestModule(TripsModule):
'''
The Test module is a TRIPS module built to run unit test. It will
ts role is to receive and decode messages and send responses from
and to other agents in the system.
'''
def __init__(self, argv):
# Call the constructor of TripsModule
super(TestModule, self).__init__(argv)
self.expected = FIFO()
self.sent = FIFO()
# TODO:make this an input argument
self.test_file = 'integration/test.in'
def init(self):
'''
Initialize TRIPS module
'''
super(TestModule, self).init()
# Send ready message
self.ready()
self.run_tests(self.test_file)
return None
def run_tests(self, test_file):
fh = open(test_file, 'rt')
messages = fh.readlines()
send_msg = messages[0::2]
expect_msg = messages[1::2]
msg_id = 1
for sm, em in zip(send_msg, expect_msg):
msg_id_str = 'IO-%d' % msg_id
# TODO: allow non-request messages?
perf = KQMLPerformative.fromString(
'(request :reply-with %s :content %s)' % (msg_id_str, sm))
self.sent.push(sm)
self.expected.push(em)
self.send(perf)
msg_id += 1
def receive_reply(self, msg, content):
'''
Handle a "reply" message is received.
'''
sent = self.sent.pop().strip()
expected_content = self.expected.pop().strip()
actual_content = content.toString().strip()
print 'sent: ', sent
print 'expected: ', expected_content
print 'actual: ', actual_content
print '---'
assert(expected_content == actual_content)
if self.expected.is_empty():
sys.exit(0)
if __name__ == "__main__":
dm = TestModule(['-name', 'Test'] + sys.argv[1:])
dm.run()
|
<commit_before><commit_msg>Add simple test module for integration testing<commit_after>import sys
import argparse
import operator
import threading
import time
from jnius import autoclass, cast
from TripsModule.trips_module import TripsModule
# Declare KQML java classes
KQMLPerformative = autoclass('TRIPS.KQML.KQMLPerformative')
KQMLList = autoclass('TRIPS.KQML.KQMLList')
KQMLObject = autoclass('TRIPS.KQML.KQMLObject')
class FIFO(object):
def __init__(self, lst=None):
if lst is None:
self.lst = []
else:
self.lst = lst
def pop(self):
return self.lst.pop()
def push(self, e):
self.lst = [e] + self.lst
def is_empty(self):
if not self.lst:
return True
return False
class TestModule(TripsModule):
'''
The Test module is a TRIPS module built to run unit test. It will
ts role is to receive and decode messages and send responses from
and to other agents in the system.
'''
def __init__(self, argv):
# Call the constructor of TripsModule
super(TestModule, self).__init__(argv)
self.expected = FIFO()
self.sent = FIFO()
# TODO:make this an input argument
self.test_file = 'integration/test.in'
def init(self):
'''
Initialize TRIPS module
'''
super(TestModule, self).init()
# Send ready message
self.ready()
self.run_tests(self.test_file)
return None
def run_tests(self, test_file):
fh = open(test_file, 'rt')
messages = fh.readlines()
send_msg = messages[0::2]
expect_msg = messages[1::2]
msg_id = 1
for sm, em in zip(send_msg, expect_msg):
msg_id_str = 'IO-%d' % msg_id
# TODO: allow non-request messages?
perf = KQMLPerformative.fromString(
'(request :reply-with %s :content %s)' % (msg_id_str, sm))
self.sent.push(sm)
self.expected.push(em)
self.send(perf)
msg_id += 1
def receive_reply(self, msg, content):
'''
Handle a "reply" message is received.
'''
sent = self.sent.pop().strip()
expected_content = self.expected.pop().strip()
actual_content = content.toString().strip()
print 'sent: ', sent
print 'expected: ', expected_content
print 'actual: ', actual_content
print '---'
assert(expected_content == actual_content)
if self.expected.is_empty():
sys.exit(0)
if __name__ == "__main__":
dm = TestModule(['-name', 'Test'] + sys.argv[1:])
dm.run()
|
|
e56f42b17e96145d5e4ee5238ca16c4fe1d06df5
|
salt/utils/entrypoints.py
|
salt/utils/entrypoints.py
|
import logging
import sys
import types
USE_IMPORTLIB_METADATA_STDLIB = USE_IMPORTLIB_METADATA = USE_PKG_RESOURCES = False
if sys.version_info >= (3, 10):
# Python 3.10 will include a fix in importlib.metadata which allows us to
# get the distribution of a loaded entry-point
import importlib.metadata # pylint: disable=no-member,no-name-in-module
USE_IMPORTLIB_METADATA_STDLIB = True
else:
if sys.version_info >= (3, 6):
# importlib_metadata available for python version lower than 3.6 do not
# include the functionality we need.
try:
import importlib_metadata
importlib_metadata_version = [
int(part)
for part in importlib_metadata.version("importlib_metadata").split(".")
if part.isdigit()
]
if tuple(importlib_metadata_version) >= (3, 3, 0):
# Version 3.3.0 of importlib_metadata includes a fix which allows us to
# get the distribution of a loaded entry-point
USE_IMPORTLIB_METADATA = True
except ImportError:
# We don't have importlib_metadata but USE_IMPORTLIB_METADATA is set to false by default
pass
if not USE_IMPORTLIB_METADATA_STDLIB and not USE_IMPORTLIB_METADATA:
# Try to use pkg_resources
try:
import pkg_resources
USE_PKG_RESOURCES = True
except ImportError:
# We don't have pkg_resources but USE_PKG_RESOURCES is set to false by default
pass
log = logging.getLogger(__name__)
def iter_entry_points(group, name=None):
entry_points_listing = []
if USE_IMPORTLIB_METADATA_STDLIB:
log.debug("Using importlib.metadata to load entry points")
entry_points = importlib.metadata.entry_points()
elif USE_IMPORTLIB_METADATA:
log.debug("Using importlib_metadata to load entry points")
entry_points = importlib_metadata.entry_points()
elif USE_PKG_RESOURCES:
log.debug("Using pkg_resources to load entry points")
entry_points_listing = list(pkg_resources.iter_entry_points(group, name=name))
else:
return entry_points_listing
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
for entry_point_group, entry_points_list in entry_points.items():
if entry_point_group != group:
continue
for entry_point in entry_points_list:
if name is not None and entry_point.name != name:
continue
entry_points_listing.append(entry_point)
return entry_points_listing
def name_and_version_from_entry_point(entry_point):
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
return types.SimpleNamespace(
name=entry_point.dist.metadata["name"], version=entry_point.dist.version,
)
elif USE_PKG_RESOURCES:
return types.SimpleNamespace(
name=entry_point.dist.key, version=entry_point.dist.version
)
|
Add an utility module to load entry-points with whatever lib is available
|
Add an utility module to load entry-points with whatever lib is available
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add an utility module to load entry-points with whatever lib is available
|
import logging
import sys
import types
USE_IMPORTLIB_METADATA_STDLIB = USE_IMPORTLIB_METADATA = USE_PKG_RESOURCES = False
if sys.version_info >= (3, 10):
# Python 3.10 will include a fix in importlib.metadata which allows us to
# get the distribution of a loaded entry-point
import importlib.metadata # pylint: disable=no-member,no-name-in-module
USE_IMPORTLIB_METADATA_STDLIB = True
else:
if sys.version_info >= (3, 6):
# importlib_metadata available for python version lower than 3.6 do not
# include the functionality we need.
try:
import importlib_metadata
importlib_metadata_version = [
int(part)
for part in importlib_metadata.version("importlib_metadata").split(".")
if part.isdigit()
]
if tuple(importlib_metadata_version) >= (3, 3, 0):
# Version 3.3.0 of importlib_metadata includes a fix which allows us to
# get the distribution of a loaded entry-point
USE_IMPORTLIB_METADATA = True
except ImportError:
# We don't have importlib_metadata but USE_IMPORTLIB_METADATA is set to false by default
pass
if not USE_IMPORTLIB_METADATA_STDLIB and not USE_IMPORTLIB_METADATA:
# Try to use pkg_resources
try:
import pkg_resources
USE_PKG_RESOURCES = True
except ImportError:
# We don't have pkg_resources but USE_PKG_RESOURCES is set to false by default
pass
log = logging.getLogger(__name__)
def iter_entry_points(group, name=None):
entry_points_listing = []
if USE_IMPORTLIB_METADATA_STDLIB:
log.debug("Using importlib.metadata to load entry points")
entry_points = importlib.metadata.entry_points()
elif USE_IMPORTLIB_METADATA:
log.debug("Using importlib_metadata to load entry points")
entry_points = importlib_metadata.entry_points()
elif USE_PKG_RESOURCES:
log.debug("Using pkg_resources to load entry points")
entry_points_listing = list(pkg_resources.iter_entry_points(group, name=name))
else:
return entry_points_listing
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
for entry_point_group, entry_points_list in entry_points.items():
if entry_point_group != group:
continue
for entry_point in entry_points_list:
if name is not None and entry_point.name != name:
continue
entry_points_listing.append(entry_point)
return entry_points_listing
def name_and_version_from_entry_point(entry_point):
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
return types.SimpleNamespace(
name=entry_point.dist.metadata["name"], version=entry_point.dist.version,
)
elif USE_PKG_RESOURCES:
return types.SimpleNamespace(
name=entry_point.dist.key, version=entry_point.dist.version
)
|
<commit_before><commit_msg>Add an utility module to load entry-points with whatever lib is available<commit_after>
|
import logging
import sys
import types
USE_IMPORTLIB_METADATA_STDLIB = USE_IMPORTLIB_METADATA = USE_PKG_RESOURCES = False
if sys.version_info >= (3, 10):
# Python 3.10 will include a fix in importlib.metadata which allows us to
# get the distribution of a loaded entry-point
import importlib.metadata # pylint: disable=no-member,no-name-in-module
USE_IMPORTLIB_METADATA_STDLIB = True
else:
if sys.version_info >= (3, 6):
# importlib_metadata available for python version lower than 3.6 do not
# include the functionality we need.
try:
import importlib_metadata
importlib_metadata_version = [
int(part)
for part in importlib_metadata.version("importlib_metadata").split(".")
if part.isdigit()
]
if tuple(importlib_metadata_version) >= (3, 3, 0):
# Version 3.3.0 of importlib_metadata includes a fix which allows us to
# get the distribution of a loaded entry-point
USE_IMPORTLIB_METADATA = True
except ImportError:
# We don't have importlib_metadata but USE_IMPORTLIB_METADATA is set to false by default
pass
if not USE_IMPORTLIB_METADATA_STDLIB and not USE_IMPORTLIB_METADATA:
# Try to use pkg_resources
try:
import pkg_resources
USE_PKG_RESOURCES = True
except ImportError:
# We don't have pkg_resources but USE_PKG_RESOURCES is set to false by default
pass
log = logging.getLogger(__name__)
def iter_entry_points(group, name=None):
entry_points_listing = []
if USE_IMPORTLIB_METADATA_STDLIB:
log.debug("Using importlib.metadata to load entry points")
entry_points = importlib.metadata.entry_points()
elif USE_IMPORTLIB_METADATA:
log.debug("Using importlib_metadata to load entry points")
entry_points = importlib_metadata.entry_points()
elif USE_PKG_RESOURCES:
log.debug("Using pkg_resources to load entry points")
entry_points_listing = list(pkg_resources.iter_entry_points(group, name=name))
else:
return entry_points_listing
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
for entry_point_group, entry_points_list in entry_points.items():
if entry_point_group != group:
continue
for entry_point in entry_points_list:
if name is not None and entry_point.name != name:
continue
entry_points_listing.append(entry_point)
return entry_points_listing
def name_and_version_from_entry_point(entry_point):
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
return types.SimpleNamespace(
name=entry_point.dist.metadata["name"], version=entry_point.dist.version,
)
elif USE_PKG_RESOURCES:
return types.SimpleNamespace(
name=entry_point.dist.key, version=entry_point.dist.version
)
|
Add an utility module to load entry-points with whatever lib is availableimport logging
import sys
import types
USE_IMPORTLIB_METADATA_STDLIB = USE_IMPORTLIB_METADATA = USE_PKG_RESOURCES = False
if sys.version_info >= (3, 10):
# Python 3.10 will include a fix in importlib.metadata which allows us to
# get the distribution of a loaded entry-point
import importlib.metadata # pylint: disable=no-member,no-name-in-module
USE_IMPORTLIB_METADATA_STDLIB = True
else:
if sys.version_info >= (3, 6):
# importlib_metadata available for python version lower than 3.6 do not
# include the functionality we need.
try:
import importlib_metadata
importlib_metadata_version = [
int(part)
for part in importlib_metadata.version("importlib_metadata").split(".")
if part.isdigit()
]
if tuple(importlib_metadata_version) >= (3, 3, 0):
# Version 3.3.0 of importlib_metadata includes a fix which allows us to
# get the distribution of a loaded entry-point
USE_IMPORTLIB_METADATA = True
except ImportError:
# We don't have importlib_metadata but USE_IMPORTLIB_METADATA is set to false by default
pass
if not USE_IMPORTLIB_METADATA_STDLIB and not USE_IMPORTLIB_METADATA:
# Try to use pkg_resources
try:
import pkg_resources
USE_PKG_RESOURCES = True
except ImportError:
# We don't have pkg_resources but USE_PKG_RESOURCES is set to false by default
pass
log = logging.getLogger(__name__)
def iter_entry_points(group, name=None):
entry_points_listing = []
if USE_IMPORTLIB_METADATA_STDLIB:
log.debug("Using importlib.metadata to load entry points")
entry_points = importlib.metadata.entry_points()
elif USE_IMPORTLIB_METADATA:
log.debug("Using importlib_metadata to load entry points")
entry_points = importlib_metadata.entry_points()
elif USE_PKG_RESOURCES:
log.debug("Using pkg_resources to load entry points")
entry_points_listing = list(pkg_resources.iter_entry_points(group, name=name))
else:
return entry_points_listing
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
for entry_point_group, entry_points_list in entry_points.items():
if entry_point_group != group:
continue
for entry_point in entry_points_list:
if name is not None and entry_point.name != name:
continue
entry_points_listing.append(entry_point)
return entry_points_listing
def name_and_version_from_entry_point(entry_point):
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
return types.SimpleNamespace(
name=entry_point.dist.metadata["name"], version=entry_point.dist.version,
)
elif USE_PKG_RESOURCES:
return types.SimpleNamespace(
name=entry_point.dist.key, version=entry_point.dist.version
)
|
<commit_before><commit_msg>Add an utility module to load entry-points with whatever lib is available<commit_after>import logging
import sys
import types
USE_IMPORTLIB_METADATA_STDLIB = USE_IMPORTLIB_METADATA = USE_PKG_RESOURCES = False
if sys.version_info >= (3, 10):
# Python 3.10 will include a fix in importlib.metadata which allows us to
# get the distribution of a loaded entry-point
import importlib.metadata # pylint: disable=no-member,no-name-in-module
USE_IMPORTLIB_METADATA_STDLIB = True
else:
if sys.version_info >= (3, 6):
# importlib_metadata available for python version lower than 3.6 do not
# include the functionality we need.
try:
import importlib_metadata
importlib_metadata_version = [
int(part)
for part in importlib_metadata.version("importlib_metadata").split(".")
if part.isdigit()
]
if tuple(importlib_metadata_version) >= (3, 3, 0):
# Version 3.3.0 of importlib_metadata includes a fix which allows us to
# get the distribution of a loaded entry-point
USE_IMPORTLIB_METADATA = True
except ImportError:
# We don't have importlib_metadata but USE_IMPORTLIB_METADATA is set to false by default
pass
if not USE_IMPORTLIB_METADATA_STDLIB and not USE_IMPORTLIB_METADATA:
# Try to use pkg_resources
try:
import pkg_resources
USE_PKG_RESOURCES = True
except ImportError:
# We don't have pkg_resources but USE_PKG_RESOURCES is set to false by default
pass
log = logging.getLogger(__name__)
def iter_entry_points(group, name=None):
entry_points_listing = []
if USE_IMPORTLIB_METADATA_STDLIB:
log.debug("Using importlib.metadata to load entry points")
entry_points = importlib.metadata.entry_points()
elif USE_IMPORTLIB_METADATA:
log.debug("Using importlib_metadata to load entry points")
entry_points = importlib_metadata.entry_points()
elif USE_PKG_RESOURCES:
log.debug("Using pkg_resources to load entry points")
entry_points_listing = list(pkg_resources.iter_entry_points(group, name=name))
else:
return entry_points_listing
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
for entry_point_group, entry_points_list in entry_points.items():
if entry_point_group != group:
continue
for entry_point in entry_points_list:
if name is not None and entry_point.name != name:
continue
entry_points_listing.append(entry_point)
return entry_points_listing
def name_and_version_from_entry_point(entry_point):
if USE_IMPORTLIB_METADATA_STDLIB or USE_IMPORTLIB_METADATA:
return types.SimpleNamespace(
name=entry_point.dist.metadata["name"], version=entry_point.dist.version,
)
elif USE_PKG_RESOURCES:
return types.SimpleNamespace(
name=entry_point.dist.key, version=entry_point.dist.version
)
|
|
43b46f1e3ded3972dede7226cf0255b904d028bd
|
django/notejam/pads/tests.py
|
django/notejam/pads/tests.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': 'user@example.com',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
Test improvementes. Empty Pad test class added.
|
Django: Test improvementes. Empty Pad test class added.
|
Python
|
mit
|
hstaugaard/notejam,nadavge/notejam,lefloh/notejam,lefloh/notejam,williamn/notejam,hstaugaard/notejam,nadavge/notejam,williamn/notejam,hstaugaard/notejam,hstaugaard/notejam,lefloh/notejam,lefloh/notejam,williamn/notejam,nadavge/notejam,lefloh/notejam,hstaugaard/notejam,williamn/notejam,shikhardb/notejam,williamn/notejam,williamn/notejam,hstaugaard/notejam,shikhardb/notejam,lefloh/notejam,hstaugaard/notejam,lefloh/notejam,williamn/notejam,shikhardb/notejam,shikhardb/notejam,nadavge/notejam,hstaugaard/notejam,nadavge/notejam,shikhardb/notejam
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
Django: Test improvementes. Empty Pad test class added.
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': 'user@example.com',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
<commit_before>"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
<commit_msg>Django: Test improvementes. Empty Pad test class added.<commit_after>
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': 'user@example.com',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
Django: Test improvementes. Empty Pad test class added.from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': 'user@example.com',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
<commit_before>"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
<commit_msg>Django: Test improvementes. Empty Pad test class added.<commit_after>from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': 'user@example.com',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
a245a8861eb35af77bb5387a7945f07af8cef017
|
learntools/computer_vision/ex4.py
|
learntools/computer_vision/ex4.py
|
from learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_solution = ""
class Q2(ThoughtExperiment):
_solution = ""
class Q3A(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3B(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3C(ThoughtExperiment):
_hint = ""
_solution = ""
Q3 = MultipartProblem(Q3A, Q3B, Q3C)
class Q4(CodingProblem):
_hint = ""
_solution = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3, Q4,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
Add exercise 4 checking code
|
Add exercise 4 checking code
|
Python
|
apache-2.0
|
Kaggle/learntools,Kaggle/learntools
|
Add exercise 4 checking code
|
from learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_solution = ""
class Q2(ThoughtExperiment):
_solution = ""
class Q3A(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3B(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3C(ThoughtExperiment):
_hint = ""
_solution = ""
Q3 = MultipartProblem(Q3A, Q3B, Q3C)
class Q4(CodingProblem):
_hint = ""
_solution = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3, Q4,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
<commit_before><commit_msg>Add exercise 4 checking code<commit_after>
|
from learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_solution = ""
class Q2(ThoughtExperiment):
_solution = ""
class Q3A(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3B(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3C(ThoughtExperiment):
_hint = ""
_solution = ""
Q3 = MultipartProblem(Q3A, Q3B, Q3C)
class Q4(CodingProblem):
_hint = ""
_solution = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3, Q4,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
Add exercise 4 checking codefrom learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_solution = ""
class Q2(ThoughtExperiment):
_solution = ""
class Q3A(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3B(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3C(ThoughtExperiment):
_hint = ""
_solution = ""
Q3 = MultipartProblem(Q3A, Q3B, Q3C)
class Q4(CodingProblem):
_hint = ""
_solution = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3, Q4,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
<commit_before><commit_msg>Add exercise 4 checking code<commit_after>from learntools.core import *
import tensorflow as tf
class Q1(ThoughtExperiment):
_solution = ""
class Q2(ThoughtExperiment):
_solution = ""
class Q3A(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3B(ThoughtExperiment):
_hint = ""
_solution = ""
class Q3C(ThoughtExperiment):
_hint = ""
_solution = ""
Q3 = MultipartProblem(Q3A, Q3B, Q3C)
class Q4(CodingProblem):
_hint = ""
_solution = ""
def check(self):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3, Q4,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
|
94e1944d160f242ebebe9614395bb795e5074d6d
|
examples/codepage_tables.py
|
examples/codepage_tables.py
|
"""Prints code page tables.
"""
import six
import sys
from escpos import printer
from escpos.constants import *
def main():
dummy = printer.Dummy()
dummy.hw('init')
for codepage in sys.argv[1:] or ['USA']:
dummy.set(height=2, width=2)
dummy._raw(codepage+"\n\n\n")
print_codepage(dummy, codepage)
dummy._raw("\n\n")
dummy.cut()
print dummy.output
def print_codepage(printer, codepage):
if codepage.isdigit():
codepage = int(codepage)
printer._raw(CODEPAGE_CHANGE + six.int2byte(codepage))
printer._raw("after")
else:
printer.charcode(codepage)
sep = ""
# Table header
printer.set(text_type='B')
printer._raw(" %s\n" % sep.join(map(lambda s: hex(s)[2:], range(0,16))))
printer.set()
# The table
for x in range(0,16):
# First column
printer.set(text_type='B')
printer._raw("%s " % hex(x)[2:])
printer.set()
for y in range(0,16):
byte = six.int2byte(x*16+y)
if byte in (ESC, CTL_LF, CTL_FF, CTL_CR, CTL_HT, CTL_VT):
byte = ' '
printer._raw(byte)
printer._raw(sep)
printer._raw('\n')
main()
|
Add script to output codepage tables.
|
Add script to output codepage tables.
|
Python
|
mit
|
python-escpos/python-escpos,belono/python-escpos,braveheuel/python-escpos
|
Add script to output codepage tables.
|
"""Prints code page tables.
"""
import six
import sys
from escpos import printer
from escpos.constants import *
def main():
dummy = printer.Dummy()
dummy.hw('init')
for codepage in sys.argv[1:] or ['USA']:
dummy.set(height=2, width=2)
dummy._raw(codepage+"\n\n\n")
print_codepage(dummy, codepage)
dummy._raw("\n\n")
dummy.cut()
print dummy.output
def print_codepage(printer, codepage):
if codepage.isdigit():
codepage = int(codepage)
printer._raw(CODEPAGE_CHANGE + six.int2byte(codepage))
printer._raw("after")
else:
printer.charcode(codepage)
sep = ""
# Table header
printer.set(text_type='B')
printer._raw(" %s\n" % sep.join(map(lambda s: hex(s)[2:], range(0,16))))
printer.set()
# The table
for x in range(0,16):
# First column
printer.set(text_type='B')
printer._raw("%s " % hex(x)[2:])
printer.set()
for y in range(0,16):
byte = six.int2byte(x*16+y)
if byte in (ESC, CTL_LF, CTL_FF, CTL_CR, CTL_HT, CTL_VT):
byte = ' '
printer._raw(byte)
printer._raw(sep)
printer._raw('\n')
main()
|
<commit_before><commit_msg>Add script to output codepage tables.<commit_after>
|
"""Prints code page tables.
"""
import six
import sys
from escpos import printer
from escpos.constants import *
def main():
dummy = printer.Dummy()
dummy.hw('init')
for codepage in sys.argv[1:] or ['USA']:
dummy.set(height=2, width=2)
dummy._raw(codepage+"\n\n\n")
print_codepage(dummy, codepage)
dummy._raw("\n\n")
dummy.cut()
print dummy.output
def print_codepage(printer, codepage):
if codepage.isdigit():
codepage = int(codepage)
printer._raw(CODEPAGE_CHANGE + six.int2byte(codepage))
printer._raw("after")
else:
printer.charcode(codepage)
sep = ""
# Table header
printer.set(text_type='B')
printer._raw(" %s\n" % sep.join(map(lambda s: hex(s)[2:], range(0,16))))
printer.set()
# The table
for x in range(0,16):
# First column
printer.set(text_type='B')
printer._raw("%s " % hex(x)[2:])
printer.set()
for y in range(0,16):
byte = six.int2byte(x*16+y)
if byte in (ESC, CTL_LF, CTL_FF, CTL_CR, CTL_HT, CTL_VT):
byte = ' '
printer._raw(byte)
printer._raw(sep)
printer._raw('\n')
main()
|
Add script to output codepage tables."""Prints code page tables.
"""
import six
import sys
from escpos import printer
from escpos.constants import *
def main():
dummy = printer.Dummy()
dummy.hw('init')
for codepage in sys.argv[1:] or ['USA']:
dummy.set(height=2, width=2)
dummy._raw(codepage+"\n\n\n")
print_codepage(dummy, codepage)
dummy._raw("\n\n")
dummy.cut()
print dummy.output
def print_codepage(printer, codepage):
if codepage.isdigit():
codepage = int(codepage)
printer._raw(CODEPAGE_CHANGE + six.int2byte(codepage))
printer._raw("after")
else:
printer.charcode(codepage)
sep = ""
# Table header
printer.set(text_type='B')
printer._raw(" %s\n" % sep.join(map(lambda s: hex(s)[2:], range(0,16))))
printer.set()
# The table
for x in range(0,16):
# First column
printer.set(text_type='B')
printer._raw("%s " % hex(x)[2:])
printer.set()
for y in range(0,16):
byte = six.int2byte(x*16+y)
if byte in (ESC, CTL_LF, CTL_FF, CTL_CR, CTL_HT, CTL_VT):
byte = ' '
printer._raw(byte)
printer._raw(sep)
printer._raw('\n')
main()
|
<commit_before><commit_msg>Add script to output codepage tables.<commit_after>"""Prints code page tables.
"""
import six
import sys
from escpos import printer
from escpos.constants import *
def main():
dummy = printer.Dummy()
dummy.hw('init')
for codepage in sys.argv[1:] or ['USA']:
dummy.set(height=2, width=2)
dummy._raw(codepage+"\n\n\n")
print_codepage(dummy, codepage)
dummy._raw("\n\n")
dummy.cut()
print dummy.output
def print_codepage(printer, codepage):
if codepage.isdigit():
codepage = int(codepage)
printer._raw(CODEPAGE_CHANGE + six.int2byte(codepage))
printer._raw("after")
else:
printer.charcode(codepage)
sep = ""
# Table header
printer.set(text_type='B')
printer._raw(" %s\n" % sep.join(map(lambda s: hex(s)[2:], range(0,16))))
printer.set()
# The table
for x in range(0,16):
# First column
printer.set(text_type='B')
printer._raw("%s " % hex(x)[2:])
printer.set()
for y in range(0,16):
byte = six.int2byte(x*16+y)
if byte in (ESC, CTL_LF, CTL_FF, CTL_CR, CTL_HT, CTL_VT):
byte = ' '
printer._raw(byte)
printer._raw(sep)
printer._raw('\n')
main()
|
|
eacb6e0ce160e2b29f7be2f50ae05969ec31543d
|
grovekit/ip_lcd.py
|
grovekit/ip_lcd.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Max Vilimpoc
#
# References:
# http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
# https://github.com/intel-iot-devkit/upm/blob/master/examples/python/rgb-lcd.py
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import fcntl
import struct
import pyupm_i2clcd as lcd
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
# Clear
myLcd.clear()
# Green
myLcd.setColor(255, 255, 0)
# Zero the cursor
myLcd.setCursor(0,0)
# Print it.
ip_address = get_ip_address('wlan0')
myLcd.write(ip_address)
|
Add WiFi IP to LCD
|
Add WiFi IP to LCD
|
Python
|
bsd-2-clause
|
ktkirk/HSSI,ktkirk/HSSI,ktkirk/HSSI
|
Add WiFi IP to LCD
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Max Vilimpoc
#
# References:
# http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
# https://github.com/intel-iot-devkit/upm/blob/master/examples/python/rgb-lcd.py
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import fcntl
import struct
import pyupm_i2clcd as lcd
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
# Clear
myLcd.clear()
# Green
myLcd.setColor(255, 255, 0)
# Zero the cursor
myLcd.setCursor(0,0)
# Print it.
ip_address = get_ip_address('wlan0')
myLcd.write(ip_address)
|
<commit_before><commit_msg>Add WiFi IP to LCD<commit_after>
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Max Vilimpoc
#
# References:
# http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
# https://github.com/intel-iot-devkit/upm/blob/master/examples/python/rgb-lcd.py
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import fcntl
import struct
import pyupm_i2clcd as lcd
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
# Clear
myLcd.clear()
# Green
myLcd.setColor(255, 255, 0)
# Zero the cursor
myLcd.setCursor(0,0)
# Print it.
ip_address = get_ip_address('wlan0')
myLcd.write(ip_address)
|
Add WiFi IP to LCD#!/usr/bin/env python
#
# Copyright (c) 2015 Max Vilimpoc
#
# References:
# http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
# https://github.com/intel-iot-devkit/upm/blob/master/examples/python/rgb-lcd.py
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import fcntl
import struct
import pyupm_i2clcd as lcd
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
# Clear
myLcd.clear()
# Green
myLcd.setColor(255, 255, 0)
# Zero the cursor
myLcd.setCursor(0,0)
# Print it.
ip_address = get_ip_address('wlan0')
myLcd.write(ip_address)
|
<commit_before><commit_msg>Add WiFi IP to LCD<commit_after>#!/usr/bin/env python
#
# Copyright (c) 2015 Max Vilimpoc
#
# References:
# http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
# https://github.com/intel-iot-devkit/upm/blob/master/examples/python/rgb-lcd.py
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import fcntl
import struct
import pyupm_i2clcd as lcd
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
# Clear
myLcd.clear()
# Green
myLcd.setColor(255, 255, 0)
# Zero the cursor
myLcd.setCursor(0,0)
# Print it.
ip_address = get_ip_address('wlan0')
myLcd.write(ip_address)
|
|
42498b014c2ffcd4a507511aa62c4d49250d2a8c
|
tests/test_config.py
|
tests/test_config.py
|
from time import sleep
from tests.base import IntegrationTest, MultipleSourceTest
class TestSimpleColorAssigment(IntegrationTest):
def configure_global_varialbes(self):
super(TestSimpleColorAssigment, self).configure_global_varialbes()
self.command('let g:taskwiki_source_tw_colors="yes"')
# Also setup TW config at this point
self.tw.execute_command(['config', 'color.active', 'color2'])
def execute(self):
assert "ctermfg=2" in self.command("hi TaskWikiTaskActive", silent=False)
|
Add test to test automatic color assigment from TW
|
tests: Add test to test automatic color assigment from TW
|
Python
|
mit
|
Spirotot/taskwiki,phha/taskwiki
|
tests: Add test to test automatic color assigment from TW
|
from time import sleep
from tests.base import IntegrationTest, MultipleSourceTest
class TestSimpleColorAssigment(IntegrationTest):
def configure_global_varialbes(self):
super(TestSimpleColorAssigment, self).configure_global_varialbes()
self.command('let g:taskwiki_source_tw_colors="yes"')
# Also setup TW config at this point
self.tw.execute_command(['config', 'color.active', 'color2'])
def execute(self):
assert "ctermfg=2" in self.command("hi TaskWikiTaskActive", silent=False)
|
<commit_before><commit_msg>tests: Add test to test automatic color assigment from TW<commit_after>
|
from time import sleep
from tests.base import IntegrationTest, MultipleSourceTest
class TestSimpleColorAssigment(IntegrationTest):
def configure_global_varialbes(self):
super(TestSimpleColorAssigment, self).configure_global_varialbes()
self.command('let g:taskwiki_source_tw_colors="yes"')
# Also setup TW config at this point
self.tw.execute_command(['config', 'color.active', 'color2'])
def execute(self):
assert "ctermfg=2" in self.command("hi TaskWikiTaskActive", silent=False)
|
tests: Add test to test automatic color assigment from TWfrom time import sleep
from tests.base import IntegrationTest, MultipleSourceTest
class TestSimpleColorAssigment(IntegrationTest):
def configure_global_varialbes(self):
super(TestSimpleColorAssigment, self).configure_global_varialbes()
self.command('let g:taskwiki_source_tw_colors="yes"')
# Also setup TW config at this point
self.tw.execute_command(['config', 'color.active', 'color2'])
def execute(self):
assert "ctermfg=2" in self.command("hi TaskWikiTaskActive", silent=False)
|
<commit_before><commit_msg>tests: Add test to test automatic color assigment from TW<commit_after>from time import sleep
from tests.base import IntegrationTest, MultipleSourceTest
class TestSimpleColorAssigment(IntegrationTest):
def configure_global_varialbes(self):
super(TestSimpleColorAssigment, self).configure_global_varialbes()
self.command('let g:taskwiki_source_tw_colors="yes"')
# Also setup TW config at this point
self.tw.execute_command(['config', 'color.active', 'color2'])
def execute(self):
assert "ctermfg=2" in self.command("hi TaskWikiTaskActive", silent=False)
|
|
29cc044b50cf0fdc8bfad97f194ea7fa993e08e6
|
tests/test_worker.py
|
tests/test_worker.py
|
from twisted.trial import unittest
from ooni.plugoo import work, tests
class WorkerTestCase(unittest.TestCase):
def testWorkGenerator(self):
class DummyTest:
assets = {}
dummytest = DummyTest()
asset = []
for i in range(10):
asset.append(i)
dummytest.assets['asset'] = asset
wgen = work.WorkGenerator(dummytest)
for j, x in enumerate(wgen):
pass
self.assertEqual(i, j)
|
Write test case for worker
|
Write test case for worker
|
Python
|
bsd-2-clause
|
kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,hackerberry/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,hackerberry/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe
|
Write test case for worker
|
from twisted.trial import unittest
from ooni.plugoo import work, tests
class WorkerTestCase(unittest.TestCase):
def testWorkGenerator(self):
class DummyTest:
assets = {}
dummytest = DummyTest()
asset = []
for i in range(10):
asset.append(i)
dummytest.assets['asset'] = asset
wgen = work.WorkGenerator(dummytest)
for j, x in enumerate(wgen):
pass
self.assertEqual(i, j)
|
<commit_before><commit_msg>Write test case for worker<commit_after>
|
from twisted.trial import unittest
from ooni.plugoo import work, tests
class WorkerTestCase(unittest.TestCase):
def testWorkGenerator(self):
class DummyTest:
assets = {}
dummytest = DummyTest()
asset = []
for i in range(10):
asset.append(i)
dummytest.assets['asset'] = asset
wgen = work.WorkGenerator(dummytest)
for j, x in enumerate(wgen):
pass
self.assertEqual(i, j)
|
Write test case for workerfrom twisted.trial import unittest
from ooni.plugoo import work, tests
class WorkerTestCase(unittest.TestCase):
def testWorkGenerator(self):
class DummyTest:
assets = {}
dummytest = DummyTest()
asset = []
for i in range(10):
asset.append(i)
dummytest.assets['asset'] = asset
wgen = work.WorkGenerator(dummytest)
for j, x in enumerate(wgen):
pass
self.assertEqual(i, j)
|
<commit_before><commit_msg>Write test case for worker<commit_after>from twisted.trial import unittest
from ooni.plugoo import work, tests
class WorkerTestCase(unittest.TestCase):
def testWorkGenerator(self):
class DummyTest:
assets = {}
dummytest = DummyTest()
asset = []
for i in range(10):
asset.append(i)
dummytest.assets['asset'] = asset
wgen = work.WorkGenerator(dummytest)
for j, x in enumerate(wgen):
pass
self.assertEqual(i, j)
|
|
0bb35201c93fb364b6521f8b6ef1693e64034f74
|
illumstats.py
|
illumstats.py
|
import h5py
import re
import numpy as np
from image_toolbox.util import regex_from_format_string
class Illumstats:
'''Utility class for an illumination correction statistics file.
The class provides the mean and standard deviation image,
which were precalculated across all images acquired in the same channel,
and the corresponding channel number.
'''
def __init__(self, filename, cfg):
'''
Initialize Illumstats class.
Parameters:
:filename: Path to the statistics file : str.
:cfg: Configuration settings : dict.
'''
self.cfg = cfg
self.filename = filename
self._statistics = None
self._mean_image = None
self._std_image = None
self._channel = None
@property
def statistics(self):
'''
Load precomputed statistics and return mean and standard deviation
images as a tuple of numpy arrays.
By default the statistics files are HDF5 files
with the following structure:
/stat_values Group
/stat_values/mean Dataset
/stat_values/std Dataset
'''
if not self._statistics:
stats = h5py.File(self.filename, 'r')
stats = stats['stat_values']
# Matlab transposes arrays when saving them to HDF5 files
# so we have to transpose them back!
mean_image = np.array(stats['mean'][()], dtype='float64').conj().T
std_image = np.array(stats['std'][()], dtype='float64').conj().T
self._statistics = (mean_image, std_image)
return self._statistics
@property
def channel(self):
if not self._channel:
regexp = regex_from_format_string(self.cfg['STATS_FILE_FORMAT'])
m = re.search(regexp, self.filename)
if not m:
raise Exception('Can\'t determine channel from '
'illumination statistics file "%s"'
% self.filename)
self._channel = int(m.group('channel'))
return self._channel
@property
def mean_image(self):
if not self._mean_image:
self._mean_image = self._statistics[0]
return self._mean_image
@property
def std_image(self):
if not self._std_image:
self._std_image = self._statistics[1]
return self._std_image
|
Add new class for illumination statistics files
|
Add new class for illumination statistics files
|
Python
|
agpl-3.0
|
TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary
|
Add new class for illumination statistics files
|
import h5py
import re
import numpy as np
from image_toolbox.util import regex_from_format_string
class Illumstats:
'''Utility class for an illumination correction statistics file.
The class provides the mean and standard deviation image,
which were precalculated across all images acquired in the same channel,
and the corresponding channel number.
'''
def __init__(self, filename, cfg):
'''
Initialize Illumstats class.
Parameters:
:filename: Path to the statistics file : str.
:cfg: Configuration settings : dict.
'''
self.cfg = cfg
self.filename = filename
self._statistics = None
self._mean_image = None
self._std_image = None
self._channel = None
@property
def statistics(self):
'''
Load precomputed statistics and return mean and standard deviation
images as a tuple of numpy arrays.
By default the statistics files are HDF5 files
with the following structure:
/stat_values Group
/stat_values/mean Dataset
/stat_values/std Dataset
'''
if not self._statistics:
stats = h5py.File(self.filename, 'r')
stats = stats['stat_values']
# Matlab transposes arrays when saving them to HDF5 files
# so we have to transpose them back!
mean_image = np.array(stats['mean'][()], dtype='float64').conj().T
std_image = np.array(stats['std'][()], dtype='float64').conj().T
self._statistics = (mean_image, std_image)
return self._statistics
@property
def channel(self):
if not self._channel:
regexp = regex_from_format_string(self.cfg['STATS_FILE_FORMAT'])
m = re.search(regexp, self.filename)
if not m:
raise Exception('Can\'t determine channel from '
'illumination statistics file "%s"'
% self.filename)
self._channel = int(m.group('channel'))
return self._channel
@property
def mean_image(self):
if not self._mean_image:
self._mean_image = self._statistics[0]
return self._mean_image
@property
def std_image(self):
if not self._std_image:
self._std_image = self._statistics[1]
return self._std_image
|
<commit_before><commit_msg>Add new class for illumination statistics files<commit_after>
|
import h5py
import re
import numpy as np
from image_toolbox.util import regex_from_format_string
class Illumstats:
'''Utility class for an illumination correction statistics file.
The class provides the mean and standard deviation image,
which were precalculated across all images acquired in the same channel,
and the corresponding channel number.
'''
def __init__(self, filename, cfg):
'''
Initialize Illumstats class.
Parameters:
:filename: Path to the statistics file : str.
:cfg: Configuration settings : dict.
'''
self.cfg = cfg
self.filename = filename
self._statistics = None
self._mean_image = None
self._std_image = None
self._channel = None
@property
def statistics(self):
'''
Load precomputed statistics and return mean and standard deviation
images as a tuple of numpy arrays.
By default the statistics files are HDF5 files
with the following structure:
/stat_values Group
/stat_values/mean Dataset
/stat_values/std Dataset
'''
if not self._statistics:
stats = h5py.File(self.filename, 'r')
stats = stats['stat_values']
# Matlab transposes arrays when saving them to HDF5 files
# so we have to transpose them back!
mean_image = np.array(stats['mean'][()], dtype='float64').conj().T
std_image = np.array(stats['std'][()], dtype='float64').conj().T
self._statistics = (mean_image, std_image)
return self._statistics
@property
def channel(self):
if not self._channel:
regexp = regex_from_format_string(self.cfg['STATS_FILE_FORMAT'])
m = re.search(regexp, self.filename)
if not m:
raise Exception('Can\'t determine channel from '
'illumination statistics file "%s"'
% self.filename)
self._channel = int(m.group('channel'))
return self._channel
@property
def mean_image(self):
if not self._mean_image:
self._mean_image = self._statistics[0]
return self._mean_image
@property
def std_image(self):
if not self._std_image:
self._std_image = self._statistics[1]
return self._std_image
|
Add new class for illumination statistics filesimport h5py
import re
import numpy as np
from image_toolbox.util import regex_from_format_string
class Illumstats:
'''Utility class for an illumination correction statistics file.
The class provides the mean and standard deviation image,
which were precalculated across all images acquired in the same channel,
and the corresponding channel number.
'''
def __init__(self, filename, cfg):
'''
Initialize Illumstats class.
Parameters:
:filename: Path to the statistics file : str.
:cfg: Configuration settings : dict.
'''
self.cfg = cfg
self.filename = filename
self._statistics = None
self._mean_image = None
self._std_image = None
self._channel = None
@property
def statistics(self):
'''
Load precomputed statistics and return mean and standard deviation
images as a tuple of numpy arrays.
By default the statistics files are HDF5 files
with the following structure:
/stat_values Group
/stat_values/mean Dataset
/stat_values/std Dataset
'''
if not self._statistics:
stats = h5py.File(self.filename, 'r')
stats = stats['stat_values']
# Matlab transposes arrays when saving them to HDF5 files
# so we have to transpose them back!
mean_image = np.array(stats['mean'][()], dtype='float64').conj().T
std_image = np.array(stats['std'][()], dtype='float64').conj().T
self._statistics = (mean_image, std_image)
return self._statistics
@property
def channel(self):
if not self._channel:
regexp = regex_from_format_string(self.cfg['STATS_FILE_FORMAT'])
m = re.search(regexp, self.filename)
if not m:
raise Exception('Can\'t determine channel from '
'illumination statistics file "%s"'
% self.filename)
self._channel = int(m.group('channel'))
return self._channel
@property
def mean_image(self):
if not self._mean_image:
self._mean_image = self._statistics[0]
return self._mean_image
@property
def std_image(self):
if not self._std_image:
self._std_image = self._statistics[1]
return self._std_image
|
<commit_before><commit_msg>Add new class for illumination statistics files<commit_after>import h5py
import re
import numpy as np
from image_toolbox.util import regex_from_format_string
class Illumstats:
'''Utility class for an illumination correction statistics file.
The class provides the mean and standard deviation image,
which were precalculated across all images acquired in the same channel,
and the corresponding channel number.
'''
def __init__(self, filename, cfg):
'''
Initialize Illumstats class.
Parameters:
:filename: Path to the statistics file : str.
:cfg: Configuration settings : dict.
'''
self.cfg = cfg
self.filename = filename
self._statistics = None
self._mean_image = None
self._std_image = None
self._channel = None
@property
def statistics(self):
'''
Load precomputed statistics and return mean and standard deviation
images as a tuple of numpy arrays.
By default the statistics files are HDF5 files
with the following structure:
/stat_values Group
/stat_values/mean Dataset
/stat_values/std Dataset
'''
if not self._statistics:
stats = h5py.File(self.filename, 'r')
stats = stats['stat_values']
# Matlab transposes arrays when saving them to HDF5 files
# so we have to transpose them back!
mean_image = np.array(stats['mean'][()], dtype='float64').conj().T
std_image = np.array(stats['std'][()], dtype='float64').conj().T
self._statistics = (mean_image, std_image)
return self._statistics
@property
def channel(self):
if not self._channel:
regexp = regex_from_format_string(self.cfg['STATS_FILE_FORMAT'])
m = re.search(regexp, self.filename)
if not m:
raise Exception('Can\'t determine channel from '
'illumination statistics file "%s"'
% self.filename)
self._channel = int(m.group('channel'))
return self._channel
@property
def mean_image(self):
if not self._mean_image:
self._mean_image = self._statistics[0]
return self._mean_image
@property
def std_image(self):
if not self._std_image:
self._std_image = self._statistics[1]
return self._std_image
|
|
2db27459ce1e102038a610e0d432ac5090097d27
|
hdltools/codegen/__init__.py
|
hdltools/codegen/__init__.py
|
"""Code generation primitives."""
import hdltools.verilog.codegen
import hdltools.specc.codegen
BUILTIN_CODE_GENERATORS = {
"verilog": hdltools.verilog.codegen.VerilogCodeGenerator,
"specc": hdltools.specc.codegen.SpecCCodeGenerator,
}
|
Add list of builtin codegenerators
|
Add list of builtin codegenerators
|
Python
|
mit
|
brunosmmm/hdltools,brunosmmm/hdltools
|
Add list of builtin codegenerators
|
"""Code generation primitives."""
import hdltools.verilog.codegen
import hdltools.specc.codegen
BUILTIN_CODE_GENERATORS = {
"verilog": hdltools.verilog.codegen.VerilogCodeGenerator,
"specc": hdltools.specc.codegen.SpecCCodeGenerator,
}
|
<commit_before><commit_msg>Add list of builtin codegenerators<commit_after>
|
"""Code generation primitives."""
import hdltools.verilog.codegen
import hdltools.specc.codegen
BUILTIN_CODE_GENERATORS = {
"verilog": hdltools.verilog.codegen.VerilogCodeGenerator,
"specc": hdltools.specc.codegen.SpecCCodeGenerator,
}
|
Add list of builtin codegenerators"""Code generation primitives."""
import hdltools.verilog.codegen
import hdltools.specc.codegen
BUILTIN_CODE_GENERATORS = {
"verilog": hdltools.verilog.codegen.VerilogCodeGenerator,
"specc": hdltools.specc.codegen.SpecCCodeGenerator,
}
|
<commit_before><commit_msg>Add list of builtin codegenerators<commit_after>"""Code generation primitives."""
import hdltools.verilog.codegen
import hdltools.specc.codegen
BUILTIN_CODE_GENERATORS = {
"verilog": hdltools.verilog.codegen.VerilogCodeGenerator,
"specc": hdltools.specc.codegen.SpecCCodeGenerator,
}
|
|
ef1d65282771c806f68d717d57172597184db26c
|
rest_framework/tests/test_urlizer.py
|
rest_framework/tests/test_urlizer.py
|
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.templatetags.rest_framework import urlize_quoted_links
import sys
class URLizerTests(TestCase):
"""
Test if both JSON and YAML URLs are transformed into links well
"""
def _urlize_dict_check(self, data):
"""
For all items in dict test assert that the value is urlized key
"""
for original, urlized in data.items():
assert urlize_quoted_links(original, nofollow=False) == urlized
def test_json_with_url(self):
"""
Test if JSON URLs are transformed into links well
"""
data = {}
data['"url": "http://api/users/1/", '] = \
'"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
'"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
self._urlize_dict_check(data)
def test_yaml_with_url(self):
"""
Test if YAML URLs are transformed into links well
"""
data = {}
data['''{users: 'http://api/users/'}'''] = \
'''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
data['''foo_set: ['http://api/foos/1/']'''] = \
'''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
self._urlize_dict_check(data)
|
Introduce tests for urlize_quoted_links() function
|
Introduce tests for urlize_quoted_links() function
|
Python
|
bsd-2-clause
|
yiyocx/django-rest-framework,buptlsl/django-rest-framework,arpheno/django-rest-framework,jness/django-rest-framework,kylefox/django-rest-framework,jerryhebert/django-rest-framework,jpadilla/django-rest-framework,justanr/django-rest-framework,abdulhaq-e/django-rest-framework,wwj718/django-rest-framework,wwj718/django-rest-framework,zeldalink0515/django-rest-framework,andriy-s/django-rest-framework,kennydude/django-rest-framework,rafaelang/django-rest-framework,abdulhaq-e/django-rest-framework,aericson/django-rest-framework,iheitlager/django-rest-framework,kgeorgy/django-rest-framework,jpadilla/django-rest-framework,tomchristie/django-rest-framework,atombrella/django-rest-framework,James1345/django-rest-framework,d0ugal/django-rest-framework,d0ugal/django-rest-framework,nryoung/django-rest-framework,wedaly/django-rest-framework,xiaotangyuan/django-rest-framework,cheif/django-rest-framework,brandoncazander/django-rest-framework,elim/django-rest-framework,wedaly/django-rest-framework,arpheno/django-rest-framework,wzbozon/django-rest-framework,dmwyatt/django-rest-framework,vstoykov/django-rest-framework,sbellem/django-rest-framework,cyberj/django-rest-framework,kennydude/django-rest-framework,edx/django-rest-framework,rafaelang/django-rest-framework,waytai/django-rest-framework,thedrow/django-rest-framework-1,linovia/django-rest-framework,jtiai/django-rest-framework,d0ugal/django-rest-framework,rubendura/django-rest-framework,aericson/django-rest-framework,sbellem/django-rest-framework,justanr/django-rest-framework,iheitlager/django-rest-framework,simudream/django-rest-framework,maryokhin/django-rest-framework,ajaali/django-rest-framework,sheppard/django-rest-framework,waytai/django-rest-framework,hnakamur/django-rest-framework,fishky/django-rest-framework,arpheno/django-rest-framework,sehmaschine/django-rest-framework,gregmuellegger/django-rest-framework,nryoung/django-rest-framework,uploadcare/django-rest-framework,HireAnEsquire/django-rest-framework,yiyocx/django-rest-framework,werthen/django-rest-framework,thedrow/django-rest-framework-1,YBJAY00000/django-rest-framework,James1345/django-rest-framework,kennydude/django-rest-framework,krinart/django-rest-framework,ashishfinoit/django-rest-framework,tcroiset/django-rest-framework,hnarayanan/django-rest-framework,tigeraniya/django-rest-framework,kylefox/django-rest-framework,yiyocx/django-rest-framework,delinhabit/django-rest-framework,gregmuellegger/django-rest-framework,antonyc/django-rest-framework,nhorelik/django-rest-framework,jpulec/django-rest-framework,akalipetis/django-rest-framework,iheitlager/django-rest-framework,VishvajitP/django-rest-framework,hnakamur/django-rest-framework,ajaali/django-rest-framework,davesque/django-rest-framework,rafaelcaricio/django-rest-framework,rhblind/django-rest-framework,johnraz/django-rest-framework,buptlsl/django-rest-framework,AlexandreProenca/django-rest-framework,andriy-s/django-rest-framework,MJafarMashhadi/django-rest-framework,James1345/django-rest-framework,ticosax/django-rest-framework,paolopaolopaolo/django-rest-framework,akalipetis/django-rest-framework,rubendura/django-rest-framework,potpath/django-rest-framework,ticosax/django-rest-framework,kezabelle/django-rest-framework,jpadilla/django-rest-framework,MJafarMashhadi/django-rest-framework,agconti/django-rest-framework,brandoncazander/django-rest-framework,ezheidtmann/django-rest-framework,YBJAY00000/django-rest-framework,douwevandermeij/django-rest-framework,edx/django-rest-framework,uruz/django-rest-framework,sehmaschine/django-rest-framework,lubomir/django-rest-framework,ambivalentno/django-rest-framework,damycra/django-rest-framework,kylefox/django-rest-framework,adambain-vokal/django-rest-framework,lubomir/django-rest-framework,maryokhin/django-rest-framework,simudream/django-rest-framework,bluedazzle/django-rest-framework,krinart/django-rest-framework,alacritythief/django-rest-framework,AlexandreProenca/django-rest-framework,callorico/django-rest-framework,ezheidtmann/django-rest-framework,alacritythief/django-rest-framework,linovia/django-rest-framework,kgeorgy/django-rest-framework,krinart/django-rest-framework,atombrella/django-rest-framework,jpulec/django-rest-framework,canassa/django-rest-framework,simudream/django-rest-framework,sbellem/django-rest-framework,dmwyatt/django-rest-framework,aericson/django-rest-framework,uploadcare/django-rest-framework,jtiai/django-rest-framework,maryokhin/django-rest-framework,tigeraniya/django-rest-framework,alacritythief/django-rest-framework,dmwyatt/django-rest-framework,nhorelik/django-rest-framework,davesque/django-rest-framework,nhorelik/django-rest-framework,ezheidtmann/django-rest-framework,rafaelcaricio/django-rest-framework,douwevandermeij/django-rest-framework,thedrow/django-rest-framework-1,raphaelmerx/django-rest-framework,delinhabit/django-rest-framework,jness/django-rest-framework,pombredanne/django-rest-framework,canassa/django-rest-framework,pombredanne/django-rest-framework,raphaelmerx/django-rest-framework,ebsaral/django-rest-framework,ossanna16/django-rest-framework,qsorix/django-rest-framework,sheppard/django-rest-framework,jerryhebert/django-rest-framework,andriy-s/django-rest-framework,wzbozon/django-rest-framework,delinhabit/django-rest-framework,paolopaolopaolo/django-rest-framework,cheif/django-rest-framework,zeldalink0515/django-rest-framework,leeahoward/django-rest-framework,davesque/django-rest-framework,edx/django-rest-framework,agconti/django-rest-framework,fishky/django-rest-framework,uploadcare/django-rest-framework,ashishfinoit/django-rest-framework,nryoung/django-rest-framework,canassa/django-rest-framework,fishky/django-rest-framework,VishvajitP/django-rest-framework,adambain-vokal/django-rest-framework,mgaitan/django-rest-framework,brandoncazander/django-rest-framework,tomchristie/django-rest-framework,qsorix/django-rest-framework,kgeorgy/django-rest-framework,antonyc/django-rest-framework,wwj718/django-rest-framework,ambivalentno/django-rest-framework,elim/django-rest-framework,hnarayanan/django-rest-framework,xiaotangyuan/django-rest-framework,MJafarMashhadi/django-rest-framework,hunter007/django-rest-framework,vstoykov/django-rest-framework,rubendura/django-rest-framework,ossanna16/django-rest-framework,tcroiset/django-rest-framework,raphaelmerx/django-rest-framework,rafaelcaricio/django-rest-framework,douwevandermeij/django-rest-framework,callorico/django-rest-framework,lubomir/django-rest-framework,xiaotangyuan/django-rest-framework,vstoykov/django-rest-framework,werthen/django-rest-framework,cheif/django-rest-framework,cyberj/django-rest-framework,kezabelle/django-rest-framework,wangpanjun/django-rest-framework,zeldalink0515/django-rest-framework,ebsaral/django-rest-framework,uruz/django-rest-framework,jness/django-rest-framework,jerryhebert/django-rest-framework,gregmuellegger/django-rest-framework,antonyc/django-rest-framework,hunter007/django-rest-framework,ashishfinoit/django-rest-framework,potpath/django-rest-framework,VishvajitP/django-rest-framework,abdulhaq-e/django-rest-framework,bluedazzle/django-rest-framework,tomchristie/django-rest-framework,hnakamur/django-rest-framework,kezabelle/django-rest-framework,elim/django-rest-framework,justanr/django-rest-framework,tigeraniya/django-rest-framework,buptlsl/django-rest-framework,potpath/django-rest-framework,pombredanne/django-rest-framework,akalipetis/django-rest-framework,HireAnEsquire/django-rest-framework,mgaitan/django-rest-framework,bluedazzle/django-rest-framework,ossanna16/django-rest-framework,linovia/django-rest-framework,adambain-vokal/django-rest-framework,wzbozon/django-rest-framework,HireAnEsquire/django-rest-framework,jpulec/django-rest-framework,werthen/django-rest-framework,ajaali/django-rest-framework,wangpanjun/django-rest-framework,hunter007/django-rest-framework,damycra/django-rest-framework,qsorix/django-rest-framework,damycra/django-rest-framework,atombrella/django-rest-framework,ambivalentno/django-rest-framework,rhblind/django-rest-framework,uruz/django-rest-framework,leeahoward/django-rest-framework,ebsaral/django-rest-framework,paolopaolopaolo/django-rest-framework,wedaly/django-rest-framework,ticosax/django-rest-framework,agconti/django-rest-framework,leeahoward/django-rest-framework,johnraz/django-rest-framework,sheppard/django-rest-framework,rhblind/django-rest-framework,mgaitan/django-rest-framework,johnraz/django-rest-framework,tcroiset/django-rest-framework,wangpanjun/django-rest-framework,callorico/django-rest-framework,hnarayanan/django-rest-framework,YBJAY00000/django-rest-framework,cyberj/django-rest-framework,waytai/django-rest-framework,sehmaschine/django-rest-framework,AlexandreProenca/django-rest-framework,jtiai/django-rest-framework,rafaelang/django-rest-framework
|
Introduce tests for urlize_quoted_links() function
|
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.templatetags.rest_framework import urlize_quoted_links
import sys
class URLizerTests(TestCase):
"""
Test if both JSON and YAML URLs are transformed into links well
"""
def _urlize_dict_check(self, data):
"""
For all items in dict test assert that the value is urlized key
"""
for original, urlized in data.items():
assert urlize_quoted_links(original, nofollow=False) == urlized
def test_json_with_url(self):
"""
Test if JSON URLs are transformed into links well
"""
data = {}
data['"url": "http://api/users/1/", '] = \
'"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
'"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
self._urlize_dict_check(data)
def test_yaml_with_url(self):
"""
Test if YAML URLs are transformed into links well
"""
data = {}
data['''{users: 'http://api/users/'}'''] = \
'''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
data['''foo_set: ['http://api/foos/1/']'''] = \
'''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
self._urlize_dict_check(data)
|
<commit_before><commit_msg>Introduce tests for urlize_quoted_links() function<commit_after>
|
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.templatetags.rest_framework import urlize_quoted_links
import sys
class URLizerTests(TestCase):
"""
Test if both JSON and YAML URLs are transformed into links well
"""
def _urlize_dict_check(self, data):
"""
For all items in dict test assert that the value is urlized key
"""
for original, urlized in data.items():
assert urlize_quoted_links(original, nofollow=False) == urlized
def test_json_with_url(self):
"""
Test if JSON URLs are transformed into links well
"""
data = {}
data['"url": "http://api/users/1/", '] = \
'"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
'"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
self._urlize_dict_check(data)
def test_yaml_with_url(self):
"""
Test if YAML URLs are transformed into links well
"""
data = {}
data['''{users: 'http://api/users/'}'''] = \
'''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
data['''foo_set: ['http://api/foos/1/']'''] = \
'''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
self._urlize_dict_check(data)
|
Introduce tests for urlize_quoted_links() functionfrom __future__ import unicode_literals
from django.test import TestCase
from rest_framework.templatetags.rest_framework import urlize_quoted_links
import sys
class URLizerTests(TestCase):
"""
Test if both JSON and YAML URLs are transformed into links well
"""
def _urlize_dict_check(self, data):
"""
For all items in dict test assert that the value is urlized key
"""
for original, urlized in data.items():
assert urlize_quoted_links(original, nofollow=False) == urlized
def test_json_with_url(self):
"""
Test if JSON URLs are transformed into links well
"""
data = {}
data['"url": "http://api/users/1/", '] = \
'"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
'"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
self._urlize_dict_check(data)
def test_yaml_with_url(self):
"""
Test if YAML URLs are transformed into links well
"""
data = {}
data['''{users: 'http://api/users/'}'''] = \
'''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
data['''foo_set: ['http://api/foos/1/']'''] = \
'''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
self._urlize_dict_check(data)
|
<commit_before><commit_msg>Introduce tests for urlize_quoted_links() function<commit_after>from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.templatetags.rest_framework import urlize_quoted_links
import sys
class URLizerTests(TestCase):
"""
Test if both JSON and YAML URLs are transformed into links well
"""
def _urlize_dict_check(self, data):
"""
For all items in dict test assert that the value is urlized key
"""
for original, urlized in data.items():
assert urlize_quoted_links(original, nofollow=False) == urlized
def test_json_with_url(self):
"""
Test if JSON URLs are transformed into links well
"""
data = {}
data['"url": "http://api/users/1/", '] = \
'"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
'"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
self._urlize_dict_check(data)
def test_yaml_with_url(self):
"""
Test if YAML URLs are transformed into links well
"""
data = {}
data['''{users: 'http://api/users/'}'''] = \
'''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
data['''foo_set: ['http://api/foos/1/']'''] = \
'''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
self._urlize_dict_check(data)
|
|
a24c6c66642060bf948023462a75fbc25ce6bc64
|
core/migrations/0030_email_flags.py
|
core/migrations/0030_email_flags.py
|
# Generated by Django 1.10.5 on 2017-01-07 17:29
from __future__ import unicode_literals
import datetime
from django.db import migrations
from django.utils import timezone
def add_timestamps_to_event(event):
""" This will:
add a thank-you-email-sent timestamp to all events in the past
add a submit-information-email-sent timestamp to all events in the past
"""
today = datetime.date.today()
if event.date < today:
if (
not all((event.date.year, event.date.month, event.date.day)) and
event.date.month == today.month and
event.date.year == today.year
):
# for events that don't have a firm date, if year and month are the same as today then we can't know
# for sure that this event is already over.
return
event.thank_you_email_sent = timezone.now()
event.submit_information_email_sent = timezone.now()
event.save(update_fields=['thank_you_email_sent', 'submit_information_email_sent'])
def set_default_email_flags(apps, schema_editor):
Event = apps.get_model("core", "Event")
for event in Event.objects.all():
add_timestamps_to_event(event)
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170107_1539'),
]
operations = [
migrations.RunPython(set_default_email_flags),
]
|
Add a data migration to make sure we don’t send a lot of emails to old events
|
Add a data migration to make sure we don’t send a lot of emails to old events
|
Python
|
bsd-3-clause
|
patjouk/djangogirls,patjouk/djangogirls,patjouk/djangogirls,DjangoGirls/djangogirls,DjangoGirls/djangogirls,DjangoGirls/djangogirls,patjouk/djangogirls
|
Add a data migration to make sure we don’t send a lot of emails to old events
|
# Generated by Django 1.10.5 on 2017-01-07 17:29
from __future__ import unicode_literals
import datetime
from django.db import migrations
from django.utils import timezone
def add_timestamps_to_event(event):
""" This will:
add a thank-you-email-sent timestamp to all events in the past
add a submit-information-email-sent timestamp to all events in the past
"""
today = datetime.date.today()
if event.date < today:
if (
not all((event.date.year, event.date.month, event.date.day)) and
event.date.month == today.month and
event.date.year == today.year
):
# for events that don't have a firm date, if year and month are the same as today then we can't know
# for sure that this event is already over.
return
event.thank_you_email_sent = timezone.now()
event.submit_information_email_sent = timezone.now()
event.save(update_fields=['thank_you_email_sent', 'submit_information_email_sent'])
def set_default_email_flags(apps, schema_editor):
Event = apps.get_model("core", "Event")
for event in Event.objects.all():
add_timestamps_to_event(event)
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170107_1539'),
]
operations = [
migrations.RunPython(set_default_email_flags),
]
|
<commit_before><commit_msg>Add a data migration to make sure we don’t send a lot of emails to old events<commit_after>
|
# Generated by Django 1.10.5 on 2017-01-07 17:29
from __future__ import unicode_literals
import datetime
from django.db import migrations
from django.utils import timezone
def add_timestamps_to_event(event):
""" This will:
add a thank-you-email-sent timestamp to all events in the past
add a submit-information-email-sent timestamp to all events in the past
"""
today = datetime.date.today()
if event.date < today:
if (
not all((event.date.year, event.date.month, event.date.day)) and
event.date.month == today.month and
event.date.year == today.year
):
# for events that don't have a firm date, if year and month are the same as today then we can't know
# for sure that this event is already over.
return
event.thank_you_email_sent = timezone.now()
event.submit_information_email_sent = timezone.now()
event.save(update_fields=['thank_you_email_sent', 'submit_information_email_sent'])
def set_default_email_flags(apps, schema_editor):
Event = apps.get_model("core", "Event")
for event in Event.objects.all():
add_timestamps_to_event(event)
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170107_1539'),
]
operations = [
migrations.RunPython(set_default_email_flags),
]
|
Add a data migration to make sure we don’t send a lot of emails to old events# Generated by Django 1.10.5 on 2017-01-07 17:29
from __future__ import unicode_literals
import datetime
from django.db import migrations
from django.utils import timezone
def add_timestamps_to_event(event):
""" This will:
add a thank-you-email-sent timestamp to all events in the past
add a submit-information-email-sent timestamp to all events in the past
"""
today = datetime.date.today()
if event.date < today:
if (
not all((event.date.year, event.date.month, event.date.day)) and
event.date.month == today.month and
event.date.year == today.year
):
# for events that don't have a firm date, if year and month are the same as today then we can't know
# for sure that this event is already over.
return
event.thank_you_email_sent = timezone.now()
event.submit_information_email_sent = timezone.now()
event.save(update_fields=['thank_you_email_sent', 'submit_information_email_sent'])
def set_default_email_flags(apps, schema_editor):
Event = apps.get_model("core", "Event")
for event in Event.objects.all():
add_timestamps_to_event(event)
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170107_1539'),
]
operations = [
migrations.RunPython(set_default_email_flags),
]
|
<commit_before><commit_msg>Add a data migration to make sure we don’t send a lot of emails to old events<commit_after># Generated by Django 1.10.5 on 2017-01-07 17:29
from __future__ import unicode_literals
import datetime
from django.db import migrations
from django.utils import timezone
def add_timestamps_to_event(event):
""" This will:
add a thank-you-email-sent timestamp to all events in the past
add a submit-information-email-sent timestamp to all events in the past
"""
today = datetime.date.today()
if event.date < today:
if (
not all((event.date.year, event.date.month, event.date.day)) and
event.date.month == today.month and
event.date.year == today.year
):
# for events that don't have a firm date, if year and month are the same as today then we can't know
# for sure that this event is already over.
return
event.thank_you_email_sent = timezone.now()
event.submit_information_email_sent = timezone.now()
event.save(update_fields=['thank_you_email_sent', 'submit_information_email_sent'])
def set_default_email_flags(apps, schema_editor):
Event = apps.get_model("core", "Event")
for event in Event.objects.all():
add_timestamps_to_event(event)
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170107_1539'),
]
operations = [
migrations.RunPython(set_default_email_flags),
]
|
|
9f74ebb88ea2eef623849b4e954f086caea306e3
|
andalusian/migrations/0006_auto_20190725_1407.py
|
andalusian/migrations/0006_auto_20190725_1407.py
|
# Generated by Django 2.2.1 on 2019-07-25 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0005_auto_20190717_1211'),
]
operations = [
migrations.AlterField(
model_name='form',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='form',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='formtype',
name='type',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='transliterated_name',
field=models.TextField(),
),
]
|
Add new andalusian migration file
|
Add new andalusian migration file
|
Python
|
agpl-3.0
|
MTG/dunya,MTG/dunya,MTG/dunya,MTG/dunya
|
Add new andalusian migration file
|
# Generated by Django 2.2.1 on 2019-07-25 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0005_auto_20190717_1211'),
]
operations = [
migrations.AlterField(
model_name='form',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='form',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='formtype',
name='type',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='transliterated_name',
field=models.TextField(),
),
]
|
<commit_before><commit_msg>Add new andalusian migration file<commit_after>
|
# Generated by Django 2.2.1 on 2019-07-25 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0005_auto_20190717_1211'),
]
operations = [
migrations.AlterField(
model_name='form',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='form',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='formtype',
name='type',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='transliterated_name',
field=models.TextField(),
),
]
|
Add new andalusian migration file# Generated by Django 2.2.1 on 2019-07-25 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0005_auto_20190717_1211'),
]
operations = [
migrations.AlterField(
model_name='form',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='form',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='formtype',
name='type',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='transliterated_name',
field=models.TextField(),
),
]
|
<commit_before><commit_msg>Add new andalusian migration file<commit_after># Generated by Django 2.2.1 on 2019-07-25 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('andalusian', '0005_auto_20190717_1211'),
]
operations = [
migrations.AlterField(
model_name='form',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='form',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='formtype',
name='type',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='mizan',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='nawba',
name='transliterated_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='name',
field=models.TextField(),
),
migrations.AlterField(
model_name='tab',
name='transliterated_name',
field=models.TextField(),
),
]
|
|
ebb0916a7c63c1aaf383c696c203199ca79f70ac
|
nereid/backend.py
|
nereid/backend.py
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(self.database_name, self.user, self.context.copy())
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(
self.database_name, self.user,
readonly=False, context=self.context.copy()
)
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
Change the way transaction is initiated as readonly support was introduced in version 2.4
|
Change the way transaction is initiated as readonly support was introduced in version 2.4
|
Python
|
bsd-3-clause
|
riteshshrv/nereid,usudaysingh/nereid,usudaysingh/nereid,riteshshrv/nereid,fulfilio/nereid,fulfilio/nereid,prakashpp/nereid,prakashpp/nereid
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(self.database_name, self.user, self.context.copy())
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
Change the way transaction is initiated as readonly support was introduced in version 2.4
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(
self.database_name, self.user,
readonly=False, context=self.context.copy()
)
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
<commit_before># -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(self.database_name, self.user, self.context.copy())
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
<commit_msg>Change the way transaction is initiated as readonly support was introduced in version 2.4<commit_after>
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(
self.database_name, self.user,
readonly=False, context=self.context.copy()
)
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(self.database_name, self.user, self.context.copy())
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
Change the way transaction is initiated as readonly support was introduced in version 2.4# -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(
self.database_name, self.user,
readonly=False, context=self.context.copy()
)
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
<commit_before># -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(self.database_name, self.user, self.context.copy())
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
<commit_msg>Change the way transaction is initiated as readonly support was introduced in version 2.4<commit_after># -*- coding: UTF-8 -*-
'''
nereid.backend
Backed - Tryton specific features
:copyright: (c) 2010-2012 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
class TransactionManager(object):
def __init__(self, database_name, user, context=None):
self.database_name = database_name
self.user = user
self.context = context if context is not None else {}
def __enter__(self):
from trytond.transaction import Transaction
Transaction().start(
self.database_name, self.user,
readonly=False, context=self.context.copy()
)
return Transaction()
def __exit__(self, type, value, traceback):
from trytond.transaction import Transaction
Transaction().stop()
|
cc109dee42d9a221ebba9caae8cc5c8b6bba4351
|
test/test_normalizedString.py
|
test/test_normalizedString.py
|
from rdflib import *
import unittest
class test_normalisedString(unittest.TestCase):
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
self.assertEqual(lit == lit2, False)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString)
st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string)
self.assertFalse(Literal.eq(st,lit))
def test3(self):
lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3()
print(lit)
self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>")
if __name__ == "__main__":
unittest.main()
|
Test cases for normalized string
|
Test cases for normalized string
|
Python
|
bsd-3-clause
|
RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib
|
Test cases for normalized string
|
from rdflib import *
import unittest
class test_normalisedString(unittest.TestCase):
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
self.assertEqual(lit == lit2, False)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString)
st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string)
self.assertFalse(Literal.eq(st,lit))
def test3(self):
lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3()
print(lit)
self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test cases for normalized string<commit_after>
|
from rdflib import *
import unittest
class test_normalisedString(unittest.TestCase):
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
self.assertEqual(lit == lit2, False)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString)
st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string)
self.assertFalse(Literal.eq(st,lit))
def test3(self):
lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3()
print(lit)
self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>")
if __name__ == "__main__":
unittest.main()
|
Test cases for normalized stringfrom rdflib import *
import unittest
class test_normalisedString(unittest.TestCase):
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
self.assertEqual(lit == lit2, False)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString)
st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string)
self.assertFalse(Literal.eq(st,lit))
def test3(self):
lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3()
print(lit)
self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test cases for normalized string<commit_after>from rdflib import *
import unittest
class test_normalisedString(unittest.TestCase):
def test1(self):
lit2 = Literal("\two\nw", datatype=XSD.normalizedString)
lit = Literal("\two\nw", datatype=XSD.string)
self.assertEqual(lit == lit2, False)
def test2(self):
lit = Literal("\tBeing a Doctor Is\n\ta Full-Time Job\r", datatype=XSD.normalizedString)
st = Literal(" Being a Doctor Is a Full-Time Job ", datatype=XSD.string)
self.assertFalse(Literal.eq(st,lit))
def test3(self):
lit=Literal("hey\nthere", datatype=XSD.normalizedString).n3()
print(lit)
self.assertTrue(lit=="\"hey there\"^^<http://www.w3.org/2001/XMLSchema#normalizedString>")
if __name__ == "__main__":
unittest.main()
|
|
fc43f8e71854aee1ac786bdb8555e8eba1510cd1
|
salt/modules/philips_hue.py
|
salt/modules/philips_hue.py
|
# -*- coding: utf-8 -*-
'''
Philips HUE lamps module for proxy.
'''
from __future__ import absolute_import
import sys
__virtualname__ = 'hue'
__proxyenabled__ = ['philips_hue']
def _proxy():
'''
Get proxy.
'''
return __opts__['proxymodule']
def __virtual__():
'''
Start the Philips HUE only for proxies.
'''
def _mkf(cmd_name, doc):
def _cmd(*args, **kw):
return _proxy()[_proxy().loaded_base_name + "." + cmd_name](*args, **kw)
return _cmd
import salt.proxy.philips_hue as hue
for method in dir(hue):
if method.startswith('call_'):
setattr(sys.modules[__name__], method[5:], _mkf(method, getattr(hue, method).__doc__))
del hue
return _proxy() and __virtualname__ or False
|
Implement Philips HUE wrapper caller for Minion Proxy
|
Implement Philips HUE wrapper caller for Minion Proxy
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Implement Philips HUE wrapper caller for Minion Proxy
|
# -*- coding: utf-8 -*-
'''
Philips HUE lamps module for proxy.
'''
from __future__ import absolute_import
import sys
__virtualname__ = 'hue'
__proxyenabled__ = ['philips_hue']
def _proxy():
'''
Get proxy.
'''
return __opts__['proxymodule']
def __virtual__():
'''
Start the Philips HUE only for proxies.
'''
def _mkf(cmd_name, doc):
def _cmd(*args, **kw):
return _proxy()[_proxy().loaded_base_name + "." + cmd_name](*args, **kw)
return _cmd
import salt.proxy.philips_hue as hue
for method in dir(hue):
if method.startswith('call_'):
setattr(sys.modules[__name__], method[5:], _mkf(method, getattr(hue, method).__doc__))
del hue
return _proxy() and __virtualname__ or False
|
<commit_before><commit_msg>Implement Philips HUE wrapper caller for Minion Proxy<commit_after>
|
# -*- coding: utf-8 -*-
'''
Philips HUE lamps module for proxy.
'''
from __future__ import absolute_import
import sys
__virtualname__ = 'hue'
__proxyenabled__ = ['philips_hue']
def _proxy():
'''
Get proxy.
'''
return __opts__['proxymodule']
def __virtual__():
'''
Start the Philips HUE only for proxies.
'''
def _mkf(cmd_name, doc):
def _cmd(*args, **kw):
return _proxy()[_proxy().loaded_base_name + "." + cmd_name](*args, **kw)
return _cmd
import salt.proxy.philips_hue as hue
for method in dir(hue):
if method.startswith('call_'):
setattr(sys.modules[__name__], method[5:], _mkf(method, getattr(hue, method).__doc__))
del hue
return _proxy() and __virtualname__ or False
|
Implement Philips HUE wrapper caller for Minion Proxy# -*- coding: utf-8 -*-
'''
Philips HUE lamps module for proxy.
'''
from __future__ import absolute_import
import sys
__virtualname__ = 'hue'
__proxyenabled__ = ['philips_hue']
def _proxy():
'''
Get proxy.
'''
return __opts__['proxymodule']
def __virtual__():
'''
Start the Philips HUE only for proxies.
'''
def _mkf(cmd_name, doc):
def _cmd(*args, **kw):
return _proxy()[_proxy().loaded_base_name + "." + cmd_name](*args, **kw)
return _cmd
import salt.proxy.philips_hue as hue
for method in dir(hue):
if method.startswith('call_'):
setattr(sys.modules[__name__], method[5:], _mkf(method, getattr(hue, method).__doc__))
del hue
return _proxy() and __virtualname__ or False
|
<commit_before><commit_msg>Implement Philips HUE wrapper caller for Minion Proxy<commit_after># -*- coding: utf-8 -*-
'''
Philips HUE lamps module for proxy.
'''
from __future__ import absolute_import
import sys
__virtualname__ = 'hue'
__proxyenabled__ = ['philips_hue']
def _proxy():
'''
Get proxy.
'''
return __opts__['proxymodule']
def __virtual__():
'''
Start the Philips HUE only for proxies.
'''
def _mkf(cmd_name, doc):
def _cmd(*args, **kw):
return _proxy()[_proxy().loaded_base_name + "." + cmd_name](*args, **kw)
return _cmd
import salt.proxy.philips_hue as hue
for method in dir(hue):
if method.startswith('call_'):
setattr(sys.modules[__name__], method[5:], _mkf(method, getattr(hue, method).__doc__))
del hue
return _proxy() and __virtualname__ or False
|
|
30672f8800edec0191835303077e93b0110189e2
|
distributionviewer/api/migrations/0006_on_delete_cascade_dataset.py
|
distributionviewer/api/migrations/0006_on_delete_cascade_dataset.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-02 19:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_dataset_display'),
]
operations = [
migrations.RunSQL(
[
"""
ALTER TABLE api_categorycollection
DROP CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id,
ADD CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_categorypoint
DROP CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id,
ADD CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id
FOREIGN KEY (collection_id)
REFERENCES api_categorycollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericcollection
DROP CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id,
ADD CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericpoint
DROP CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id,
ADD CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id
FOREIGN KEY (collection_id)
REFERENCES api_numericcollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
"""
]
)
]
|
Add ON DELETE CASCADE to more easily remove bad datasets
|
Add ON DELETE CASCADE to more easily remove bad datasets
|
Python
|
mpl-2.0
|
openjck/distribution-viewer,openjck/distribution-viewer,openjck/distribution-viewer,openjck/distribution-viewer
|
Add ON DELETE CASCADE to more easily remove bad datasets
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-02 19:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_dataset_display'),
]
operations = [
migrations.RunSQL(
[
"""
ALTER TABLE api_categorycollection
DROP CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id,
ADD CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_categorypoint
DROP CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id,
ADD CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id
FOREIGN KEY (collection_id)
REFERENCES api_categorycollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericcollection
DROP CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id,
ADD CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericpoint
DROP CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id,
ADD CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id
FOREIGN KEY (collection_id)
REFERENCES api_numericcollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
"""
]
)
]
|
<commit_before><commit_msg>Add ON DELETE CASCADE to more easily remove bad datasets<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-02 19:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_dataset_display'),
]
operations = [
migrations.RunSQL(
[
"""
ALTER TABLE api_categorycollection
DROP CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id,
ADD CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_categorypoint
DROP CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id,
ADD CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id
FOREIGN KEY (collection_id)
REFERENCES api_categorycollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericcollection
DROP CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id,
ADD CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericpoint
DROP CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id,
ADD CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id
FOREIGN KEY (collection_id)
REFERENCES api_numericcollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
"""
]
)
]
|
Add ON DELETE CASCADE to more easily remove bad datasets# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-02 19:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_dataset_display'),
]
operations = [
migrations.RunSQL(
[
"""
ALTER TABLE api_categorycollection
DROP CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id,
ADD CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_categorypoint
DROP CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id,
ADD CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id
FOREIGN KEY (collection_id)
REFERENCES api_categorycollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericcollection
DROP CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id,
ADD CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericpoint
DROP CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id,
ADD CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id
FOREIGN KEY (collection_id)
REFERENCES api_numericcollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
"""
]
)
]
|
<commit_before><commit_msg>Add ON DELETE CASCADE to more easily remove bad datasets<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-02 19:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_dataset_display'),
]
operations = [
migrations.RunSQL(
[
"""
ALTER TABLE api_categorycollection
DROP CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id,
ADD CONSTRAINT api_categorycollection_dataset_id_ef3ff939_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_categorypoint
DROP CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id,
ADD CONSTRAINT api_categor_collection_id_5d302f37_fk_api_categorycollection_id
FOREIGN KEY (collection_id)
REFERENCES api_categorycollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericcollection
DROP CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id,
ADD CONSTRAINT api_numericcollection_dataset_id_55539ea8_fk_api_dataset_id
FOREIGN KEY (dataset_id)
REFERENCES api_dataset(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
""",
"""
ALTER TABLE api_numericpoint
DROP CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id,
ADD CONSTRAINT api_numericp_collection_id_611f1337_fk_api_numericcollection_id
FOREIGN KEY (collection_id)
REFERENCES api_numericcollection(id)
ON DELETE CASCADE
DEFERRABLE INITIALLY DEFERRED;
"""
]
)
]
|
|
9258bdf23f0019826c007cdf63ed956ad178ddbd
|
app/deparse.py
|
app/deparse.py
|
feature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
features = []
for feature in feature_order:
if feature in segment.positive:
features.append('+')
elif feature in segment.negative:
features.append('-')
else:
features.append('0')
return ''.join(features)
def deparse_words(words, segments, diacritics):
pass
|
Add segment conversion to feature string
|
Add segment conversion to feature string
|
Python
|
mit
|
kdelwat/LangEvolve,kdelwat/LangEvolve,kdelwat/LangEvolve
|
Add segment conversion to feature string
|
feature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
features = []
for feature in feature_order:
if feature in segment.positive:
features.append('+')
elif feature in segment.negative:
features.append('-')
else:
features.append('0')
return ''.join(features)
def deparse_words(words, segments, diacritics):
pass
|
<commit_before><commit_msg>Add segment conversion to feature string<commit_after>
|
feature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
features = []
for feature in feature_order:
if feature in segment.positive:
features.append('+')
elif feature in segment.negative:
features.append('-')
else:
features.append('0')
return ''.join(features)
def deparse_words(words, segments, diacritics):
pass
|
Add segment conversion to feature stringfeature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
features = []
for feature in feature_order:
if feature in segment.positive:
features.append('+')
elif feature in segment.negative:
features.append('-')
else:
features.append('0')
return ''.join(features)
def deparse_words(words, segments, diacritics):
pass
|
<commit_before><commit_msg>Add segment conversion to feature string<commit_after>feature_order = ['syllabic',
'stress',
'long',
'consonantal',
'sonorant',
'continuant',
'delayedrelease',
'approximant',
'tap',
'trill',
'nasal',
'voice',
'spreadglottis',
'constrictedglottis',
'labial',
'round',
'labiodental',
'coronal',
'anterior',
'distributed',
'strident',
'lateral',
'dorsal',
'high',
'low',
'front',
'back',
'tense']
def feature_string(segment):
'''Convert a Segment object into a feature string.'''
features = []
for feature in feature_order:
if feature in segment.positive:
features.append('+')
elif feature in segment.negative:
features.append('-')
else:
features.append('0')
return ''.join(features)
def deparse_words(words, segments, diacritics):
pass
|
|
e52a45901e3a062d4b55c0f3050f2bd4b6b4d08c
|
mies_nwb_viewer.py
|
mies_nwb_viewer.py
|
import acq4
from neuroanalysis.nwb_viewer import MiesNwbViewer, MiesNwb
acq4.pyqtgraph.dbg()
m = acq4.Manager.Manager(argv=['-D', '-n', '-m', 'Data Manager'])
dm = m.getModule('Data Manager')
v = MiesNwbViewer()
v.show()
def load_from_dm():
v.set_nwb(MiesNwb(m.currentFile.name()))
btn = acq4.pyqtgraph.Qt.QtGui.QPushButton('load from data manager')
v.vsplit.insertWidget(0, btn)
btn.clicked.connect(load_from_dm)
|
Add entry script for mies/acq4 analysis
|
Add entry script for mies/acq4 analysis
|
Python
|
mit
|
campagnola/neuroanalysis
|
Add entry script for mies/acq4 analysis
|
import acq4
from neuroanalysis.nwb_viewer import MiesNwbViewer, MiesNwb
acq4.pyqtgraph.dbg()
m = acq4.Manager.Manager(argv=['-D', '-n', '-m', 'Data Manager'])
dm = m.getModule('Data Manager')
v = MiesNwbViewer()
v.show()
def load_from_dm():
v.set_nwb(MiesNwb(m.currentFile.name()))
btn = acq4.pyqtgraph.Qt.QtGui.QPushButton('load from data manager')
v.vsplit.insertWidget(0, btn)
btn.clicked.connect(load_from_dm)
|
<commit_before><commit_msg>Add entry script for mies/acq4 analysis<commit_after>
|
import acq4
from neuroanalysis.nwb_viewer import MiesNwbViewer, MiesNwb
acq4.pyqtgraph.dbg()
m = acq4.Manager.Manager(argv=['-D', '-n', '-m', 'Data Manager'])
dm = m.getModule('Data Manager')
v = MiesNwbViewer()
v.show()
def load_from_dm():
v.set_nwb(MiesNwb(m.currentFile.name()))
btn = acq4.pyqtgraph.Qt.QtGui.QPushButton('load from data manager')
v.vsplit.insertWidget(0, btn)
btn.clicked.connect(load_from_dm)
|
Add entry script for mies/acq4 analysisimport acq4
from neuroanalysis.nwb_viewer import MiesNwbViewer, MiesNwb
acq4.pyqtgraph.dbg()
m = acq4.Manager.Manager(argv=['-D', '-n', '-m', 'Data Manager'])
dm = m.getModule('Data Manager')
v = MiesNwbViewer()
v.show()
def load_from_dm():
v.set_nwb(MiesNwb(m.currentFile.name()))
btn = acq4.pyqtgraph.Qt.QtGui.QPushButton('load from data manager')
v.vsplit.insertWidget(0, btn)
btn.clicked.connect(load_from_dm)
|
<commit_before><commit_msg>Add entry script for mies/acq4 analysis<commit_after>import acq4
from neuroanalysis.nwb_viewer import MiesNwbViewer, MiesNwb
acq4.pyqtgraph.dbg()
m = acq4.Manager.Manager(argv=['-D', '-n', '-m', 'Data Manager'])
dm = m.getModule('Data Manager')
v = MiesNwbViewer()
v.show()
def load_from_dm():
v.set_nwb(MiesNwb(m.currentFile.name()))
btn = acq4.pyqtgraph.Qt.QtGui.QPushButton('load from data manager')
v.vsplit.insertWidget(0, btn)
btn.clicked.connect(load_from_dm)
|
|
80ee01e15ec62b4286b98ecc85c13c398154d7ac
|
py/redundant-connection.py
|
py/redundant-connection.py
|
from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
Add py solution for 684. Redundant Connection
|
Add py solution for 684. Redundant Connection
684. Redundant Connection: https://leetcode.com/problems/redundant-connection/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 684. Redundant Connection
684. Redundant Connection: https://leetcode.com/problems/redundant-connection/
|
from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
<commit_before><commit_msg>Add py solution for 684. Redundant Connection
684. Redundant Connection: https://leetcode.com/problems/redundant-connection/<commit_after>
|
from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
Add py solution for 684. Redundant Connection
684. Redundant Connection: https://leetcode.com/problems/redundant-connection/from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
<commit_before><commit_msg>Add py solution for 684. Redundant Connection
684. Redundant Connection: https://leetcode.com/problems/redundant-connection/<commit_after>from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
|
|
72b3ffe2ca7f735991aca304d42d495484ee3f3d
|
meshnet/serial/connection.py
|
meshnet/serial/connection.py
|
import asyncio
import logging
from meshnet.serial.messages import SerialMessageConsumer
logger = logging.getLogger(__name__)
class SerialBuffer(object):
def __init__(self):
self._buff = bytearray()
def put(self, data):
self._buff.append(data)
def read(self, max_bytes):
ret = self._buff[:max_bytes]
self._buff = self._buff[max_bytes:]
return bytes(ret)
def available(self):
return len(self._buff)
class AioSerial(asyncio.Protocol):
def __init__(self):
self._consumer = SerialMessageConsumer()
self.transport = None
self._buffer = SerialBuffer()
def connection_made(self, transport):
self.transport = transport
logger.info('serial port opened: %s', transport)
def data_received(self, data):
logger.debug('data received', repr(data))
self._buffer.put(data)
while self._buffer.available() > 0:
packet = self._consumer.consume(self._buffer, max_len=self._buffer.available())
if packet is not None:
self._on_packet(packet)
def _on_packet(self, packet):
# XXX call packet handlers here
pass
def connection_lost(self, exc):
logger.warning("Serial port closed!")
def pause_writing(self):
logger.debug('pause writing, buffer=%d', self.transport.get_write_buffer_size())
def resume_writing(self):
logger.debug('resume writing, buffer=%d', self.transport.get_write_buffer_size())
|
Add some async protocol handling code
|
Add some async protocol handling code
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
Python
|
bsd-3-clause
|
janLo/automation_mesh,janLo/automation_mesh,janLo/automation_mesh
|
Add some async protocol handling code
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
import asyncio
import logging
from meshnet.serial.messages import SerialMessageConsumer
logger = logging.getLogger(__name__)
class SerialBuffer(object):
def __init__(self):
self._buff = bytearray()
def put(self, data):
self._buff.append(data)
def read(self, max_bytes):
ret = self._buff[:max_bytes]
self._buff = self._buff[max_bytes:]
return bytes(ret)
def available(self):
return len(self._buff)
class AioSerial(asyncio.Protocol):
def __init__(self):
self._consumer = SerialMessageConsumer()
self.transport = None
self._buffer = SerialBuffer()
def connection_made(self, transport):
self.transport = transport
logger.info('serial port opened: %s', transport)
def data_received(self, data):
logger.debug('data received', repr(data))
self._buffer.put(data)
while self._buffer.available() > 0:
packet = self._consumer.consume(self._buffer, max_len=self._buffer.available())
if packet is not None:
self._on_packet(packet)
def _on_packet(self, packet):
# XXX call packet handlers here
pass
def connection_lost(self, exc):
logger.warning("Serial port closed!")
def pause_writing(self):
logger.debug('pause writing, buffer=%d', self.transport.get_write_buffer_size())
def resume_writing(self):
logger.debug('resume writing, buffer=%d', self.transport.get_write_buffer_size())
|
<commit_before><commit_msg>Add some async protocol handling code
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>
|
import asyncio
import logging
from meshnet.serial.messages import SerialMessageConsumer
logger = logging.getLogger(__name__)
class SerialBuffer(object):
def __init__(self):
self._buff = bytearray()
def put(self, data):
self._buff.append(data)
def read(self, max_bytes):
ret = self._buff[:max_bytes]
self._buff = self._buff[max_bytes:]
return bytes(ret)
def available(self):
return len(self._buff)
class AioSerial(asyncio.Protocol):
def __init__(self):
self._consumer = SerialMessageConsumer()
self.transport = None
self._buffer = SerialBuffer()
def connection_made(self, transport):
self.transport = transport
logger.info('serial port opened: %s', transport)
def data_received(self, data):
logger.debug('data received', repr(data))
self._buffer.put(data)
while self._buffer.available() > 0:
packet = self._consumer.consume(self._buffer, max_len=self._buffer.available())
if packet is not None:
self._on_packet(packet)
def _on_packet(self, packet):
# XXX call packet handlers here
pass
def connection_lost(self, exc):
logger.warning("Serial port closed!")
def pause_writing(self):
logger.debug('pause writing, buffer=%d', self.transport.get_write_buffer_size())
def resume_writing(self):
logger.debug('resume writing, buffer=%d', self.transport.get_write_buffer_size())
|
Add some async protocol handling code
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>import asyncio
import logging
from meshnet.serial.messages import SerialMessageConsumer
logger = logging.getLogger(__name__)
class SerialBuffer(object):
def __init__(self):
self._buff = bytearray()
def put(self, data):
self._buff.append(data)
def read(self, max_bytes):
ret = self._buff[:max_bytes]
self._buff = self._buff[max_bytes:]
return bytes(ret)
def available(self):
return len(self._buff)
class AioSerial(asyncio.Protocol):
def __init__(self):
self._consumer = SerialMessageConsumer()
self.transport = None
self._buffer = SerialBuffer()
def connection_made(self, transport):
self.transport = transport
logger.info('serial port opened: %s', transport)
def data_received(self, data):
logger.debug('data received', repr(data))
self._buffer.put(data)
while self._buffer.available() > 0:
packet = self._consumer.consume(self._buffer, max_len=self._buffer.available())
if packet is not None:
self._on_packet(packet)
def _on_packet(self, packet):
# XXX call packet handlers here
pass
def connection_lost(self, exc):
logger.warning("Serial port closed!")
def pause_writing(self):
logger.debug('pause writing, buffer=%d', self.transport.get_write_buffer_size())
def resume_writing(self):
logger.debug('resume writing, buffer=%d', self.transport.get_write_buffer_size())
|
<commit_before><commit_msg>Add some async protocol handling code
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>import asyncio
import logging
from meshnet.serial.messages import SerialMessageConsumer
logger = logging.getLogger(__name__)
class SerialBuffer(object):
def __init__(self):
self._buff = bytearray()
def put(self, data):
self._buff.append(data)
def read(self, max_bytes):
ret = self._buff[:max_bytes]
self._buff = self._buff[max_bytes:]
return bytes(ret)
def available(self):
return len(self._buff)
class AioSerial(asyncio.Protocol):
def __init__(self):
self._consumer = SerialMessageConsumer()
self.transport = None
self._buffer = SerialBuffer()
def connection_made(self, transport):
self.transport = transport
logger.info('serial port opened: %s', transport)
def data_received(self, data):
logger.debug('data received', repr(data))
self._buffer.put(data)
while self._buffer.available() > 0:
packet = self._consumer.consume(self._buffer, max_len=self._buffer.available())
if packet is not None:
self._on_packet(packet)
def _on_packet(self, packet):
# XXX call packet handlers here
pass
def connection_lost(self, exc):
logger.warning("Serial port closed!")
def pause_writing(self):
logger.debug('pause writing, buffer=%d', self.transport.get_write_buffer_size())
def resume_writing(self):
logger.debug('resume writing, buffer=%d', self.transport.get_write_buffer_size())
|
|
657f9f8c4997b6a9e7021830a229178e0881afba
|
shoop/notify/migrations/0002_notification_identifier.py
|
shoop/notify/migrations/0002_notification_identifier.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop_notify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='identifier',
field=shoop.core.fields.InternalIdentifierField(unique=False, editable=False, max_length=64, blank=True, null=True),
),
]
|
Add migration to make notification identifier not unique
|
Notify: Add migration to make notification identifier not unique
Refs SHOOP-1282
|
Python
|
agpl-3.0
|
suutari/shoop,shoopio/shoop,shoopio/shoop,shawnadelic/shuup,hrayr-artunyan/shuup,hrayr-artunyan/shuup,suutari/shoop,shawnadelic/shuup,taedori81/shoop,suutari-ai/shoop,jorge-marques/shoop,suutari-ai/shoop,jorge-marques/shoop,suutari/shoop,shawnadelic/shuup,lawzou/shoop,taedori81/shoop,jorge-marques/shoop,lawzou/shoop,shoopio/shoop,akx/shoop,taedori81/shoop,hrayr-artunyan/shuup,akx/shoop,suutari-ai/shoop,lawzou/shoop,akx/shoop
|
Notify: Add migration to make notification identifier not unique
Refs SHOOP-1282
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop_notify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='identifier',
field=shoop.core.fields.InternalIdentifierField(unique=False, editable=False, max_length=64, blank=True, null=True),
),
]
|
<commit_before><commit_msg>Notify: Add migration to make notification identifier not unique
Refs SHOOP-1282<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop_notify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='identifier',
field=shoop.core.fields.InternalIdentifierField(unique=False, editable=False, max_length=64, blank=True, null=True),
),
]
|
Notify: Add migration to make notification identifier not unique
Refs SHOOP-1282# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop_notify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='identifier',
field=shoop.core.fields.InternalIdentifierField(unique=False, editable=False, max_length=64, blank=True, null=True),
),
]
|
<commit_before><commit_msg>Notify: Add migration to make notification identifier not unique
Refs SHOOP-1282<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shoop.core.fields
class Migration(migrations.Migration):
dependencies = [
('shoop_notify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='identifier',
field=shoop.core.fields.InternalIdentifierField(unique=False, editable=False, max_length=64, blank=True, null=True),
),
]
|
|
5de41a702f819fca3cea68470912b68dd82b2888
|
alembic/versions/a3fe8c8a344_associate_groups_wit.py
|
alembic/versions/a3fe8c8a344_associate_groups_wit.py
|
"""Associate groups with files.
Revision ID: a3fe8c8a344
Revises: 525162a280bd
Create Date: 2013-11-05 13:55:04.498181
"""
# revision identifiers, used by Alembic.
revision = 'a3fe8c8a344'
down_revision = '525162a280bd'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
submission = table('submission',
column('group_id', sa.Integer),
column('id', sa.Integer))
subtofile = table('submissiontofile',
column('file_id', sa.Integer),
column('submission_id'))
usertofile = table('user_to_file',
column('user_id', sa.Integer),
column('file_id', sa.Integer))
usertogroup = table('user_to_group',
column('group_id', sa.Integer),
column('user_id', sa.Integer))
def upgrade():
conn = op.get_bind()
group_files = defaultdict(set)
group_users = defaultdict(list)
sub_to_group = {}
to_add = set()
user_files = defaultdict(set)
# Fetch mapping of users to files
for (user_id, file_id) in conn.execute(usertofile.select()):
user_files[user_id].add(file_id)
# Fetch mapping of groups to users
for (group_id, user_id) in conn.execute(usertogroup.select()):
group_users[group_id].append(user_id)
# Fetch mapping of submissions to groups
for (group_id, sub_id) in conn.execute(submission.select()):
sub_to_group[sub_id] = group_id
# Build mapping of groups to files
for (file_id, sub_id) in conn.execute(subtofile.select()):
group_files[sub_to_group[sub_id]].add(file_id)
# Build set of user to file associations to add
for group_id, files in group_files.items():
for user_id in group_users[group_id]:
for file_id in files - user_files[user_id]:
to_add.add((user_id, file_id))
if to_add:
op.bulk_insert(usertofile,
[{'user_id': x[0], 'file_id': x[1]} for x in to_add])
def downgrade():
pass
|
Write migration to associate group users with the group's files.
|
Write migration to associate group users with the group's files.
|
Python
|
bsd-2-clause
|
ucsb-cs/submit,ucsb-cs/submit,ucsb-cs/submit,ucsb-cs/submit
|
Write migration to associate group users with the group's files.
|
"""Associate groups with files.
Revision ID: a3fe8c8a344
Revises: 525162a280bd
Create Date: 2013-11-05 13:55:04.498181
"""
# revision identifiers, used by Alembic.
revision = 'a3fe8c8a344'
down_revision = '525162a280bd'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
submission = table('submission',
column('group_id', sa.Integer),
column('id', sa.Integer))
subtofile = table('submissiontofile',
column('file_id', sa.Integer),
column('submission_id'))
usertofile = table('user_to_file',
column('user_id', sa.Integer),
column('file_id', sa.Integer))
usertogroup = table('user_to_group',
column('group_id', sa.Integer),
column('user_id', sa.Integer))
def upgrade():
conn = op.get_bind()
group_files = defaultdict(set)
group_users = defaultdict(list)
sub_to_group = {}
to_add = set()
user_files = defaultdict(set)
# Fetch mapping of users to files
for (user_id, file_id) in conn.execute(usertofile.select()):
user_files[user_id].add(file_id)
# Fetch mapping of groups to users
for (group_id, user_id) in conn.execute(usertogroup.select()):
group_users[group_id].append(user_id)
# Fetch mapping of submissions to groups
for (group_id, sub_id) in conn.execute(submission.select()):
sub_to_group[sub_id] = group_id
# Build mapping of groups to files
for (file_id, sub_id) in conn.execute(subtofile.select()):
group_files[sub_to_group[sub_id]].add(file_id)
# Build set of user to file associations to add
for group_id, files in group_files.items():
for user_id in group_users[group_id]:
for file_id in files - user_files[user_id]:
to_add.add((user_id, file_id))
if to_add:
op.bulk_insert(usertofile,
[{'user_id': x[0], 'file_id': x[1]} for x in to_add])
def downgrade():
pass
|
<commit_before><commit_msg>Write migration to associate group users with the group's files.<commit_after>
|
"""Associate groups with files.
Revision ID: a3fe8c8a344
Revises: 525162a280bd
Create Date: 2013-11-05 13:55:04.498181
"""
# revision identifiers, used by Alembic.
revision = 'a3fe8c8a344'
down_revision = '525162a280bd'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
submission = table('submission',
column('group_id', sa.Integer),
column('id', sa.Integer))
subtofile = table('submissiontofile',
column('file_id', sa.Integer),
column('submission_id'))
usertofile = table('user_to_file',
column('user_id', sa.Integer),
column('file_id', sa.Integer))
usertogroup = table('user_to_group',
column('group_id', sa.Integer),
column('user_id', sa.Integer))
def upgrade():
conn = op.get_bind()
group_files = defaultdict(set)
group_users = defaultdict(list)
sub_to_group = {}
to_add = set()
user_files = defaultdict(set)
# Fetch mapping of users to files
for (user_id, file_id) in conn.execute(usertofile.select()):
user_files[user_id].add(file_id)
# Fetch mapping of groups to users
for (group_id, user_id) in conn.execute(usertogroup.select()):
group_users[group_id].append(user_id)
# Fetch mapping of submissions to groups
for (group_id, sub_id) in conn.execute(submission.select()):
sub_to_group[sub_id] = group_id
# Build mapping of groups to files
for (file_id, sub_id) in conn.execute(subtofile.select()):
group_files[sub_to_group[sub_id]].add(file_id)
# Build set of user to file associations to add
for group_id, files in group_files.items():
for user_id in group_users[group_id]:
for file_id in files - user_files[user_id]:
to_add.add((user_id, file_id))
if to_add:
op.bulk_insert(usertofile,
[{'user_id': x[0], 'file_id': x[1]} for x in to_add])
def downgrade():
pass
|
Write migration to associate group users with the group's files."""Associate groups with files.
Revision ID: a3fe8c8a344
Revises: 525162a280bd
Create Date: 2013-11-05 13:55:04.498181
"""
# revision identifiers, used by Alembic.
revision = 'a3fe8c8a344'
down_revision = '525162a280bd'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
submission = table('submission',
column('group_id', sa.Integer),
column('id', sa.Integer))
subtofile = table('submissiontofile',
column('file_id', sa.Integer),
column('submission_id'))
usertofile = table('user_to_file',
column('user_id', sa.Integer),
column('file_id', sa.Integer))
usertogroup = table('user_to_group',
column('group_id', sa.Integer),
column('user_id', sa.Integer))
def upgrade():
conn = op.get_bind()
group_files = defaultdict(set)
group_users = defaultdict(list)
sub_to_group = {}
to_add = set()
user_files = defaultdict(set)
# Fetch mapping of users to files
for (user_id, file_id) in conn.execute(usertofile.select()):
user_files[user_id].add(file_id)
# Fetch mapping of groups to users
for (group_id, user_id) in conn.execute(usertogroup.select()):
group_users[group_id].append(user_id)
# Fetch mapping of submissions to groups
for (group_id, sub_id) in conn.execute(submission.select()):
sub_to_group[sub_id] = group_id
# Build mapping of groups to files
for (file_id, sub_id) in conn.execute(subtofile.select()):
group_files[sub_to_group[sub_id]].add(file_id)
# Build set of user to file associations to add
for group_id, files in group_files.items():
for user_id in group_users[group_id]:
for file_id in files - user_files[user_id]:
to_add.add((user_id, file_id))
if to_add:
op.bulk_insert(usertofile,
[{'user_id': x[0], 'file_id': x[1]} for x in to_add])
def downgrade():
pass
|
<commit_before><commit_msg>Write migration to associate group users with the group's files.<commit_after>"""Associate groups with files.
Revision ID: a3fe8c8a344
Revises: 525162a280bd
Create Date: 2013-11-05 13:55:04.498181
"""
# revision identifiers, used by Alembic.
revision = 'a3fe8c8a344'
down_revision = '525162a280bd'
from alembic import op
from collections import defaultdict
from sqlalchemy.sql import table, column
import sqlalchemy as sa
submission = table('submission',
column('group_id', sa.Integer),
column('id', sa.Integer))
subtofile = table('submissiontofile',
column('file_id', sa.Integer),
column('submission_id'))
usertofile = table('user_to_file',
column('user_id', sa.Integer),
column('file_id', sa.Integer))
usertogroup = table('user_to_group',
column('group_id', sa.Integer),
column('user_id', sa.Integer))
def upgrade():
conn = op.get_bind()
group_files = defaultdict(set)
group_users = defaultdict(list)
sub_to_group = {}
to_add = set()
user_files = defaultdict(set)
# Fetch mapping of users to files
for (user_id, file_id) in conn.execute(usertofile.select()):
user_files[user_id].add(file_id)
# Fetch mapping of groups to users
for (group_id, user_id) in conn.execute(usertogroup.select()):
group_users[group_id].append(user_id)
# Fetch mapping of submissions to groups
for (group_id, sub_id) in conn.execute(submission.select()):
sub_to_group[sub_id] = group_id
# Build mapping of groups to files
for (file_id, sub_id) in conn.execute(subtofile.select()):
group_files[sub_to_group[sub_id]].add(file_id)
# Build set of user to file associations to add
for group_id, files in group_files.items():
for user_id in group_users[group_id]:
for file_id in files - user_files[user_id]:
to_add.add((user_id, file_id))
if to_add:
op.bulk_insert(usertofile,
[{'user_id': x[0], 'file_id': x[1]} for x in to_add])
def downgrade():
pass
|
|
cc57a70be1e5b9da6aca69f4728291214c353469
|
self-post-stream/stream.py
|
self-post-stream/stream.py
|
import praw
r = praw.Reddit(user_agent='stream only self posts from a sub by /u/km97')
posts = [post for post in r.get_subreddit('all').get_hot(limit=50) if post.is_self]
# print(dir(posts[0]))
for post in posts:
print("Title:", post.title)
print("Score: {}, Comments: {}".format(post.score, post.num_comments))
print()
print(post.selftext.replace('**', '').replace('*', ''))
print()
print("Link:", post.permalink)
print('=' * 30)
|
Add praw with post generator for selfposts
|
Add praw with post generator for selfposts
|
Python
|
mit
|
kshvmdn/reddit-bots
|
Add praw with post generator for selfposts
|
import praw
r = praw.Reddit(user_agent='stream only self posts from a sub by /u/km97')
posts = [post for post in r.get_subreddit('all').get_hot(limit=50) if post.is_self]
# print(dir(posts[0]))
for post in posts:
print("Title:", post.title)
print("Score: {}, Comments: {}".format(post.score, post.num_comments))
print()
print(post.selftext.replace('**', '').replace('*', ''))
print()
print("Link:", post.permalink)
print('=' * 30)
|
<commit_before><commit_msg>Add praw with post generator for selfposts<commit_after>
|
import praw
r = praw.Reddit(user_agent='stream only self posts from a sub by /u/km97')
posts = [post for post in r.get_subreddit('all').get_hot(limit=50) if post.is_self]
# print(dir(posts[0]))
for post in posts:
print("Title:", post.title)
print("Score: {}, Comments: {}".format(post.score, post.num_comments))
print()
print(post.selftext.replace('**', '').replace('*', ''))
print()
print("Link:", post.permalink)
print('=' * 30)
|
Add praw with post generator for selfpostsimport praw
r = praw.Reddit(user_agent='stream only self posts from a sub by /u/km97')
posts = [post for post in r.get_subreddit('all').get_hot(limit=50) if post.is_self]
# print(dir(posts[0]))
for post in posts:
print("Title:", post.title)
print("Score: {}, Comments: {}".format(post.score, post.num_comments))
print()
print(post.selftext.replace('**', '').replace('*', ''))
print()
print("Link:", post.permalink)
print('=' * 30)
|
<commit_before><commit_msg>Add praw with post generator for selfposts<commit_after>import praw
r = praw.Reddit(user_agent='stream only self posts from a sub by /u/km97')
posts = [post for post in r.get_subreddit('all').get_hot(limit=50) if post.is_self]
# print(dir(posts[0]))
for post in posts:
print("Title:", post.title)
print("Score: {}, Comments: {}".format(post.score, post.num_comments))
print()
print(post.selftext.replace('**', '').replace('*', ''))
print()
print("Link:", post.permalink)
print('=' * 30)
|
|
c0f5554e2259055c423098f6f6a83fc99e4d0789
|
solutions/uri/1015/1015.py
|
solutions/uri/1015/1015.py
|
from math import pow, sqrt
x1, y1 = map(float, input().split())
x2, y2 = map(float, input().split())
distance = sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
print(f"{distance:.4f}")
|
Solve Distance Between Two Points in python
|
Solve Distance Between Two Points in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Distance Between Two Points in python
|
from math import pow, sqrt
x1, y1 = map(float, input().split())
x2, y2 = map(float, input().split())
distance = sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
print(f"{distance:.4f}")
|
<commit_before><commit_msg>Solve Distance Between Two Points in python<commit_after>
|
from math import pow, sqrt
x1, y1 = map(float, input().split())
x2, y2 = map(float, input().split())
distance = sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
print(f"{distance:.4f}")
|
Solve Distance Between Two Points in pythonfrom math import pow, sqrt
x1, y1 = map(float, input().split())
x2, y2 = map(float, input().split())
distance = sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
print(f"{distance:.4f}")
|
<commit_before><commit_msg>Solve Distance Between Two Points in python<commit_after>from math import pow, sqrt
x1, y1 = map(float, input().split())
x2, y2 = map(float, input().split())
distance = sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
print(f"{distance:.4f}")
|
|
907caa83f4aed45ba31c6aa796e7bcb589521abc
|
tests/test_docker_stream_adapter.py
|
tests/test_docker_stream_adapter.py
|
import unittest
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
class CaptureAdapter(object):
def __init__(self):
self._captured = ''
def write(self, data):
self._captured += data
def captured(self):
return self._captured
class TestDemultiplexerPushAdapter(unittest.TestCase):
def testSinglePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testAdapterBrokenUp(self):
data = [
'\x02\x00\x00\x00', '\x00\x00' '\x00\x14', 'this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testMultiplePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n',
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n',
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
def testMultiplePayloadOneRead(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
|
Move docker_stream_adapter tests from plugins/ into modern testsuite
|
Move docker_stream_adapter tests from plugins/ into modern testsuite
|
Python
|
apache-2.0
|
girder/girder_worker,girder/girder_worker,girder/girder_worker
|
Move docker_stream_adapter tests from plugins/ into modern testsuite
|
import unittest
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
class CaptureAdapter(object):
def __init__(self):
self._captured = ''
def write(self, data):
self._captured += data
def captured(self):
return self._captured
class TestDemultiplexerPushAdapter(unittest.TestCase):
def testSinglePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testAdapterBrokenUp(self):
data = [
'\x02\x00\x00\x00', '\x00\x00' '\x00\x14', 'this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testMultiplePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n',
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n',
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
def testMultiplePayloadOneRead(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
|
<commit_before><commit_msg>Move docker_stream_adapter tests from plugins/ into modern testsuite<commit_after>
|
import unittest
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
class CaptureAdapter(object):
def __init__(self):
self._captured = ''
def write(self, data):
self._captured += data
def captured(self):
return self._captured
class TestDemultiplexerPushAdapter(unittest.TestCase):
def testSinglePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testAdapterBrokenUp(self):
data = [
'\x02\x00\x00\x00', '\x00\x00' '\x00\x14', 'this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testMultiplePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n',
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n',
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
def testMultiplePayloadOneRead(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
|
Move docker_stream_adapter tests from plugins/ into modern testsuiteimport unittest
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
class CaptureAdapter(object):
def __init__(self):
self._captured = ''
def write(self, data):
self._captured += data
def captured(self):
return self._captured
class TestDemultiplexerPushAdapter(unittest.TestCase):
def testSinglePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testAdapterBrokenUp(self):
data = [
'\x02\x00\x00\x00', '\x00\x00' '\x00\x14', 'this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testMultiplePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n',
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n',
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
def testMultiplePayloadOneRead(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
|
<commit_before><commit_msg>Move docker_stream_adapter tests from plugins/ into modern testsuite<commit_after>import unittest
from girder_worker.docker.stream_adapter import DockerStreamPushAdapter
class CaptureAdapter(object):
def __init__(self):
self._captured = ''
def write(self, data):
self._captured += data
def captured(self):
return self._captured
class TestDemultiplexerPushAdapter(unittest.TestCase):
def testSinglePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testAdapterBrokenUp(self):
data = [
'\x02\x00\x00\x00', '\x00\x00' '\x00\x14', 'this is stderr data\n'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(), 'this is stderr data\n')
def testMultiplePayload(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n',
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n',
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
def testMultiplePayloadOneRead(self):
data = [
'\x02\x00\x00\x00\x00\x00\x00\x14this is stderr data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x14this is stdout data\n' +
'\x01\x00\x00\x00\x00\x00\x00\x0chello world!'
]
capture = CaptureAdapter()
adapter = DockerStreamPushAdapter(capture)
for d in data:
adapter.write(d)
self.assertEqual(capture.captured(),
'this is stderr data\nthis is stdout data\nhello world!')
|
|
733d0ed4b39c2632129d7604474995badfe0321d
|
solutions/uri/1025/1025.py
|
solutions/uri/1025/1025.py
|
import sys
def binary_search(marbles, query):
begin = 0
end = len(marbles) - 1
middle = end // 2
while begin <= end:
if marbles[middle] < query:
begin = middle + 1
middle = (end + begin) // 2
elif marbles[middle] > query or (
middle > 0 and marbles[middle - 1] == query
):
end = middle - 1
middle = (end + begin) // 2
else:
return middle + 1
return -1
case = 1
for line in sys.stdin:
n, q = map(int, line.strip().split())
if n == 0 and q == 0:
break
marbles = []
while n:
marble = int(sys.stdin.readline())
marbles.append(marble)
n -= 1
marbles.sort()
print(f'CASE# {case}:')
while q:
query = int(sys.stdin.readline())
position = binary_search(marbles, query)
if position == -1:
print(f'{query} not found')
else:
print(f'{query} found at {position}')
q -= 1
case += 1
|
Solve Where is the Marble? in python
|
Solve Where is the Marble? in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Where is the Marble? in python
|
import sys
def binary_search(marbles, query):
begin = 0
end = len(marbles) - 1
middle = end // 2
while begin <= end:
if marbles[middle] < query:
begin = middle + 1
middle = (end + begin) // 2
elif marbles[middle] > query or (
middle > 0 and marbles[middle - 1] == query
):
end = middle - 1
middle = (end + begin) // 2
else:
return middle + 1
return -1
case = 1
for line in sys.stdin:
n, q = map(int, line.strip().split())
if n == 0 and q == 0:
break
marbles = []
while n:
marble = int(sys.stdin.readline())
marbles.append(marble)
n -= 1
marbles.sort()
print(f'CASE# {case}:')
while q:
query = int(sys.stdin.readline())
position = binary_search(marbles, query)
if position == -1:
print(f'{query} not found')
else:
print(f'{query} found at {position}')
q -= 1
case += 1
|
<commit_before><commit_msg>Solve Where is the Marble? in python<commit_after>
|
import sys
def binary_search(marbles, query):
begin = 0
end = len(marbles) - 1
middle = end // 2
while begin <= end:
if marbles[middle] < query:
begin = middle + 1
middle = (end + begin) // 2
elif marbles[middle] > query or (
middle > 0 and marbles[middle - 1] == query
):
end = middle - 1
middle = (end + begin) // 2
else:
return middle + 1
return -1
case = 1
for line in sys.stdin:
n, q = map(int, line.strip().split())
if n == 0 and q == 0:
break
marbles = []
while n:
marble = int(sys.stdin.readline())
marbles.append(marble)
n -= 1
marbles.sort()
print(f'CASE# {case}:')
while q:
query = int(sys.stdin.readline())
position = binary_search(marbles, query)
if position == -1:
print(f'{query} not found')
else:
print(f'{query} found at {position}')
q -= 1
case += 1
|
Solve Where is the Marble? in pythonimport sys
def binary_search(marbles, query):
begin = 0
end = len(marbles) - 1
middle = end // 2
while begin <= end:
if marbles[middle] < query:
begin = middle + 1
middle = (end + begin) // 2
elif marbles[middle] > query or (
middle > 0 and marbles[middle - 1] == query
):
end = middle - 1
middle = (end + begin) // 2
else:
return middle + 1
return -1
case = 1
for line in sys.stdin:
n, q = map(int, line.strip().split())
if n == 0 and q == 0:
break
marbles = []
while n:
marble = int(sys.stdin.readline())
marbles.append(marble)
n -= 1
marbles.sort()
print(f'CASE# {case}:')
while q:
query = int(sys.stdin.readline())
position = binary_search(marbles, query)
if position == -1:
print(f'{query} not found')
else:
print(f'{query} found at {position}')
q -= 1
case += 1
|
<commit_before><commit_msg>Solve Where is the Marble? in python<commit_after>import sys
def binary_search(marbles, query):
begin = 0
end = len(marbles) - 1
middle = end // 2
while begin <= end:
if marbles[middle] < query:
begin = middle + 1
middle = (end + begin) // 2
elif marbles[middle] > query or (
middle > 0 and marbles[middle - 1] == query
):
end = middle - 1
middle = (end + begin) // 2
else:
return middle + 1
return -1
case = 1
for line in sys.stdin:
n, q = map(int, line.strip().split())
if n == 0 and q == 0:
break
marbles = []
while n:
marble = int(sys.stdin.readline())
marbles.append(marble)
n -= 1
marbles.sort()
print(f'CASE# {case}:')
while q:
query = int(sys.stdin.readline())
position = binary_search(marbles, query)
if position == -1:
print(f'{query} not found')
else:
print(f'{query} found at {position}')
q -= 1
case += 1
|
|
c5852893e3b3dccf94e4c2845d5cb773b07d084f
|
gtkmvco/tests/container_observation.py
|
gtkmvco/tests/container_observation.py
|
# This tests the observation of observables into lists and maps.
# This test should be converted to unittest
import _importer
from gtkmvc import Model, Observer, Observable
# ----------------------------------------------------------------------
# An ad-hoc class which has a chaging method 'change'
class MyObservable (Observable):
def __init__(self, name):
# name is used to distinguish instances in verbosity
Observable.__init__(self)
self.name = name
return
@Observable.observed
def change(self):
print "called change:", self.name
return
pass # end of class
# ----------------------------------------------------------------------
class MyModel (Model):
# this model contains only dynamically declared observables in to
# the list and the map.
def __init__(self):
Model.__init__(self)
# self.list and self.map are not observable here, althought
# that might be observable of course.
self.list = [ MyObservable("ob.%02d" % i) for i in range(5) ]
self.map = { 'k0' : MyObservable("k0"),
'k1' : [MyObservable("k1[0]"), MyObservable("k1[1]")],
}
for i in range(len(self.list)):
self.register_property("list[%d]" % i)
pass
# notice tha way keys are represented: map[k0], and not
# map['k0']. This may change (TBD):
self.register_property("map[k0]")
self.register_property("map[k1][0]")
self.register_property("map[k1][1]")
return
pass # end of class
# ----------------------------------------------------------------------
class MyObserver (Observer):
# The observer exploits both dynamic and static declaration of
# notification methods.
def __init__(self, m):
# notice that the observation of is delayed here, as in 1.99.1
# dynamic observation work only before the model registration.
Observer.__init__(self)
# dynamically observes list[0]
self.observe(self.content_changed, "list[0]", before=True)
# dynamically observes map[k0]
self.observe(self.content_changed, "map[k0]", before=True)
self.observe_model(m)
return
# statically observes list[1] and list[3]
@Observer.observe("list[1]", after=True)
@Observer.observe("list[3]", before=True)
@Observer.observe("map[k1][0]", after=True)
def content_changed(self, model, name, info):
print "Observer:", model, name, info
return
pass # end of class
# ----------------------------------------------------------------------
if "__main__" == __name__:
m = MyModel()
o = MyObserver(m)
# change the list's content
for o in m.list: o.change()
# change the map's content
m.map['k0'].change()
for i in range(2): m.map['k1'][i].change()
pass
|
TEST (Still not automatic, to be converted eventually.)
|
TEST
(Still not automatic, to be converted eventually.)
Test/example for experimental feature introduced in r283
|
Python
|
lgpl-2.1
|
roboogle/gtkmvc3,roboogle/gtkmvc3
|
TEST
(Still not automatic, to be converted eventually.)
Test/example for experimental feature introduced in r283
|
# This tests the observation of observables into lists and maps.
# This test should be converted to unittest
import _importer
from gtkmvc import Model, Observer, Observable
# ----------------------------------------------------------------------
# An ad-hoc class which has a chaging method 'change'
class MyObservable (Observable):
def __init__(self, name):
# name is used to distinguish instances in verbosity
Observable.__init__(self)
self.name = name
return
@Observable.observed
def change(self):
print "called change:", self.name
return
pass # end of class
# ----------------------------------------------------------------------
class MyModel (Model):
# this model contains only dynamically declared observables in to
# the list and the map.
def __init__(self):
Model.__init__(self)
# self.list and self.map are not observable here, althought
# that might be observable of course.
self.list = [ MyObservable("ob.%02d" % i) for i in range(5) ]
self.map = { 'k0' : MyObservable("k0"),
'k1' : [MyObservable("k1[0]"), MyObservable("k1[1]")],
}
for i in range(len(self.list)):
self.register_property("list[%d]" % i)
pass
# notice tha way keys are represented: map[k0], and not
# map['k0']. This may change (TBD):
self.register_property("map[k0]")
self.register_property("map[k1][0]")
self.register_property("map[k1][1]")
return
pass # end of class
# ----------------------------------------------------------------------
class MyObserver (Observer):
# The observer exploits both dynamic and static declaration of
# notification methods.
def __init__(self, m):
# notice that the observation of is delayed here, as in 1.99.1
# dynamic observation work only before the model registration.
Observer.__init__(self)
# dynamically observes list[0]
self.observe(self.content_changed, "list[0]", before=True)
# dynamically observes map[k0]
self.observe(self.content_changed, "map[k0]", before=True)
self.observe_model(m)
return
# statically observes list[1] and list[3]
@Observer.observe("list[1]", after=True)
@Observer.observe("list[3]", before=True)
@Observer.observe("map[k1][0]", after=True)
def content_changed(self, model, name, info):
print "Observer:", model, name, info
return
pass # end of class
# ----------------------------------------------------------------------
if "__main__" == __name__:
m = MyModel()
o = MyObserver(m)
# change the list's content
for o in m.list: o.change()
# change the map's content
m.map['k0'].change()
for i in range(2): m.map['k1'][i].change()
pass
|
<commit_before><commit_msg>TEST
(Still not automatic, to be converted eventually.)
Test/example for experimental feature introduced in r283<commit_after>
|
# This tests the observation of observables into lists and maps.
# This test should be converted to unittest
import _importer
from gtkmvc import Model, Observer, Observable
# ----------------------------------------------------------------------
# An ad-hoc class which has a chaging method 'change'
class MyObservable (Observable):
def __init__(self, name):
# name is used to distinguish instances in verbosity
Observable.__init__(self)
self.name = name
return
@Observable.observed
def change(self):
print "called change:", self.name
return
pass # end of class
# ----------------------------------------------------------------------
class MyModel (Model):
# this model contains only dynamically declared observables in to
# the list and the map.
def __init__(self):
Model.__init__(self)
# self.list and self.map are not observable here, althought
# that might be observable of course.
self.list = [ MyObservable("ob.%02d" % i) for i in range(5) ]
self.map = { 'k0' : MyObservable("k0"),
'k1' : [MyObservable("k1[0]"), MyObservable("k1[1]")],
}
for i in range(len(self.list)):
self.register_property("list[%d]" % i)
pass
# notice tha way keys are represented: map[k0], and not
# map['k0']. This may change (TBD):
self.register_property("map[k0]")
self.register_property("map[k1][0]")
self.register_property("map[k1][1]")
return
pass # end of class
# ----------------------------------------------------------------------
class MyObserver (Observer):
# The observer exploits both dynamic and static declaration of
# notification methods.
def __init__(self, m):
# notice that the observation of is delayed here, as in 1.99.1
# dynamic observation work only before the model registration.
Observer.__init__(self)
# dynamically observes list[0]
self.observe(self.content_changed, "list[0]", before=True)
# dynamically observes map[k0]
self.observe(self.content_changed, "map[k0]", before=True)
self.observe_model(m)
return
# statically observes list[1] and list[3]
@Observer.observe("list[1]", after=True)
@Observer.observe("list[3]", before=True)
@Observer.observe("map[k1][0]", after=True)
def content_changed(self, model, name, info):
print "Observer:", model, name, info
return
pass # end of class
# ----------------------------------------------------------------------
if "__main__" == __name__:
m = MyModel()
o = MyObserver(m)
# change the list's content
for o in m.list: o.change()
# change the map's content
m.map['k0'].change()
for i in range(2): m.map['k1'][i].change()
pass
|
TEST
(Still not automatic, to be converted eventually.)
Test/example for experimental feature introduced in r283# This tests the observation of observables into lists and maps.
# This test should be converted to unittest
import _importer
from gtkmvc import Model, Observer, Observable
# ----------------------------------------------------------------------
# An ad-hoc class which has a chaging method 'change'
class MyObservable (Observable):
def __init__(self, name):
# name is used to distinguish instances in verbosity
Observable.__init__(self)
self.name = name
return
@Observable.observed
def change(self):
print "called change:", self.name
return
pass # end of class
# ----------------------------------------------------------------------
class MyModel (Model):
# this model contains only dynamically declared observables in to
# the list and the map.
def __init__(self):
Model.__init__(self)
# self.list and self.map are not observable here, althought
# that might be observable of course.
self.list = [ MyObservable("ob.%02d" % i) for i in range(5) ]
self.map = { 'k0' : MyObservable("k0"),
'k1' : [MyObservable("k1[0]"), MyObservable("k1[1]")],
}
for i in range(len(self.list)):
self.register_property("list[%d]" % i)
pass
# notice tha way keys are represented: map[k0], and not
# map['k0']. This may change (TBD):
self.register_property("map[k0]")
self.register_property("map[k1][0]")
self.register_property("map[k1][1]")
return
pass # end of class
# ----------------------------------------------------------------------
class MyObserver (Observer):
# The observer exploits both dynamic and static declaration of
# notification methods.
def __init__(self, m):
# notice that the observation of is delayed here, as in 1.99.1
# dynamic observation work only before the model registration.
Observer.__init__(self)
# dynamically observes list[0]
self.observe(self.content_changed, "list[0]", before=True)
# dynamically observes map[k0]
self.observe(self.content_changed, "map[k0]", before=True)
self.observe_model(m)
return
# statically observes list[1] and list[3]
@Observer.observe("list[1]", after=True)
@Observer.observe("list[3]", before=True)
@Observer.observe("map[k1][0]", after=True)
def content_changed(self, model, name, info):
print "Observer:", model, name, info
return
pass # end of class
# ----------------------------------------------------------------------
if "__main__" == __name__:
m = MyModel()
o = MyObserver(m)
# change the list's content
for o in m.list: o.change()
# change the map's content
m.map['k0'].change()
for i in range(2): m.map['k1'][i].change()
pass
|
<commit_before><commit_msg>TEST
(Still not automatic, to be converted eventually.)
Test/example for experimental feature introduced in r283<commit_after># This tests the observation of observables into lists and maps.
# This test should be converted to unittest
import _importer
from gtkmvc import Model, Observer, Observable
# ----------------------------------------------------------------------
# An ad-hoc class which has a chaging method 'change'
class MyObservable (Observable):
def __init__(self, name):
# name is used to distinguish instances in verbosity
Observable.__init__(self)
self.name = name
return
@Observable.observed
def change(self):
print "called change:", self.name
return
pass # end of class
# ----------------------------------------------------------------------
class MyModel (Model):
# this model contains only dynamically declared observables in to
# the list and the map.
def __init__(self):
Model.__init__(self)
# self.list and self.map are not observable here, althought
# that might be observable of course.
self.list = [ MyObservable("ob.%02d" % i) for i in range(5) ]
self.map = { 'k0' : MyObservable("k0"),
'k1' : [MyObservable("k1[0]"), MyObservable("k1[1]")],
}
for i in range(len(self.list)):
self.register_property("list[%d]" % i)
pass
# notice tha way keys are represented: map[k0], and not
# map['k0']. This may change (TBD):
self.register_property("map[k0]")
self.register_property("map[k1][0]")
self.register_property("map[k1][1]")
return
pass # end of class
# ----------------------------------------------------------------------
class MyObserver (Observer):
# The observer exploits both dynamic and static declaration of
# notification methods.
def __init__(self, m):
# notice that the observation of is delayed here, as in 1.99.1
# dynamic observation work only before the model registration.
Observer.__init__(self)
# dynamically observes list[0]
self.observe(self.content_changed, "list[0]", before=True)
# dynamically observes map[k0]
self.observe(self.content_changed, "map[k0]", before=True)
self.observe_model(m)
return
# statically observes list[1] and list[3]
@Observer.observe("list[1]", after=True)
@Observer.observe("list[3]", before=True)
@Observer.observe("map[k1][0]", after=True)
def content_changed(self, model, name, info):
print "Observer:", model, name, info
return
pass # end of class
# ----------------------------------------------------------------------
if "__main__" == __name__:
m = MyModel()
o = MyObserver(m)
# change the list's content
for o in m.list: o.change()
# change the map's content
m.map['k0'].change()
for i in range(2): m.map['k1'][i].change()
pass
|
|
1759446be100c94bee19bfb0c7e1ea800adbb7c4
|
node-test.py
|
node-test.py
|
#!/usr/bin/python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
driver = webdriver.Remote('http://127.0.0.1:4444/wd/hub', DesiredCapabilities.FIREFOX)
driver.quit()
|
Add a simple node testing script in Python
|
Add a simple node testing script in Python
|
Python
|
mit
|
saikrishna321/selenium-video-node,saikrishna321/selenium-video-node,saikrishna321/selenium-video-node
|
Add a simple node testing script in Python
|
#!/usr/bin/python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
driver = webdriver.Remote('http://127.0.0.1:4444/wd/hub', DesiredCapabilities.FIREFOX)
driver.quit()
|
<commit_before><commit_msg>Add a simple node testing script in Python<commit_after>
|
#!/usr/bin/python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
driver = webdriver.Remote('http://127.0.0.1:4444/wd/hub', DesiredCapabilities.FIREFOX)
driver.quit()
|
Add a simple node testing script in Python#!/usr/bin/python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
driver = webdriver.Remote('http://127.0.0.1:4444/wd/hub', DesiredCapabilities.FIREFOX)
driver.quit()
|
<commit_before><commit_msg>Add a simple node testing script in Python<commit_after>#!/usr/bin/python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
driver = webdriver.Remote('http://127.0.0.1:4444/wd/hub', DesiredCapabilities.FIREFOX)
driver.quit()
|
|
d666e4f79e6646dc2f084a61b70c6cee3bf90d13
|
tests/health_checks/test_per_gene_AND_ld_snp.py
|
tests/health_checks/test_per_gene_AND_ld_snp.py
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerGeneANDLdSnp(TestPostgapBase):
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_VEP(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.VEP
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_GTEx(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.GTEx
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_PCHiC(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.PCHiC
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_DHS(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.DHS
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Fantom5(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Fantom5
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Nearest(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Nearest
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Regulome(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Regulome
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_score(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.score
)
if __name__ == '__main__':
unittest.main()
|
Add tests for gene and ld snp pairs
|
Add tests for gene and ld snp pairs
|
Python
|
apache-2.0
|
Ensembl/cttv024,Ensembl/cttv024
|
Add tests for gene and ld snp pairs
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerGeneANDLdSnp(TestPostgapBase):
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_VEP(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.VEP
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_GTEx(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.GTEx
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_PCHiC(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.PCHiC
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_DHS(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.DHS
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Fantom5(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Fantom5
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Nearest(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Nearest
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Regulome(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Regulome
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_score(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.score
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for gene and ld snp pairs<commit_after>
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerGeneANDLdSnp(TestPostgapBase):
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_VEP(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.VEP
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_GTEx(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.GTEx
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_PCHiC(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.PCHiC
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_DHS(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.DHS
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Fantom5(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Fantom5
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Nearest(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Nearest
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Regulome(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Regulome
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_score(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.score
)
if __name__ == '__main__':
unittest.main()
|
Add tests for gene and ld snp pairs# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerGeneANDLdSnp(TestPostgapBase):
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_VEP(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.VEP
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_GTEx(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.GTEx
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_PCHiC(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.PCHiC
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_DHS(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.DHS
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Fantom5(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Fantom5
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Nearest(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Nearest
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Regulome(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Regulome
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_score(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.score
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for gene and ld snp pairs<commit_after># ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerGeneANDLdSnp(TestPostgapBase):
def setUp(self):
self.per_gene_and_ld_snp = self.pg.groupby(['gene_id', 'ld_snp_rsID'])
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_VEP(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.VEP
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_GTEx(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.GTEx
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_PCHiC(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.PCHiC
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_DHS(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.DHS
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Fantom5(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Fantom5
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Nearest(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Nearest
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_Regulome(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.Regulome
)
def test_each_gene_id_and_ld_snp_rsID_pair_has_unique_score(self):
self.assert_groupby_series_is_unique_per_group(
self.per_gene_and_ld_snp.score
)
if __name__ == '__main__':
unittest.main()
|
|
52e4e2d7511b672ee022fe62ea726fbf2511185f
|
src/testing/drawVtkObject.py
|
src/testing/drawVtkObject.py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class DrawTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_draw(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("testing/").click()
driver.find_element_by_link_text("drawVtkObject.html").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
Add simple selenium test for vtkObject.
|
Add simple selenium test for vtkObject.
|
Python
|
apache-2.0
|
OpenGeoscience/vgl,OpenGeoscience/vgl,OpenGeoscience/vgl,OpenGeoscience/vgl
|
Add simple selenium test for vtkObject.
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class DrawTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_draw(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("testing/").click()
driver.find_element_by_link_text("drawVtkObject.html").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add simple selenium test for vtkObject.<commit_after>
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class DrawTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_draw(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("testing/").click()
driver.find_element_by_link_text("drawVtkObject.html").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
Add simple selenium test for vtkObject.from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class DrawTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_draw(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("testing/").click()
driver.find_element_by_link_text("drawVtkObject.html").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add simple selenium test for vtkObject.<commit_after>from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class DrawTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_draw(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_link_text("testing/").click()
driver.find_element_by_link_text("drawVtkObject.html").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
|
8aedd861032d0bf73c5ea397b73827398d278dda
|
tests/test_equivalency.py
|
tests/test_equivalency.py
|
import numpy as np
import pytest
from hypothesis import given
# To test the equivalence of code to check if it does the same thing:
def test_convolution_indexing():
""" To test equivalence of code to repalce for speed"""
wav_extended = np.arange(100, 200)
flux_extended = np.random.random(size=wav_extended.size)
wav_val = 145
R = 8
FWHM = wav_val/R
FWHM_lim = 5
# Replace this code
indexes = [i for i in range(len(wav_extended)) if ((wav_val - FWHM_lim*FWHM) < wav_extended[i] < (wav_val + FWHM_lim*FWHM))]
old_flux_2convolve = flux_extended[indexes[0]:indexes[-1]+1]
old_wav_2convolve = wav_extended[indexes[0]:indexes[-1]+1]
# With this code
# Mask of wavelength range within 5 FWHM of wav
index_mask = ((wav_extended > (wav_val - FWHM_lim*FWHM)) &
(wav_extended < (wav_val + FWHM_lim*FWHM)))
flux_2convolve = flux_extended[index_mask]
wav_2convolve = wav_extended[index_mask]
assert np.all(old_flux_2convolve == flux_2convolve)
assert np.all(old_wav_2convolve == wav_2convolve)
|
Test mask resulted in the same value as comprehension list
|
Test mask resulted in the same value as comprehension list
Former-commit-id: f1e3c2e5a3ddeab54e47746398620c57931170d6
|
Python
|
mit
|
jason-neal/eniric,jason-neal/eniric
|
Test mask resulted in the same value as comprehension list
Former-commit-id: f1e3c2e5a3ddeab54e47746398620c57931170d6
|
import numpy as np
import pytest
from hypothesis import given
# To test the equivalence of code to check if it does the same thing:
def test_convolution_indexing():
""" To test equivalence of code to repalce for speed"""
wav_extended = np.arange(100, 200)
flux_extended = np.random.random(size=wav_extended.size)
wav_val = 145
R = 8
FWHM = wav_val/R
FWHM_lim = 5
# Replace this code
indexes = [i for i in range(len(wav_extended)) if ((wav_val - FWHM_lim*FWHM) < wav_extended[i] < (wav_val + FWHM_lim*FWHM))]
old_flux_2convolve = flux_extended[indexes[0]:indexes[-1]+1]
old_wav_2convolve = wav_extended[indexes[0]:indexes[-1]+1]
# With this code
# Mask of wavelength range within 5 FWHM of wav
index_mask = ((wav_extended > (wav_val - FWHM_lim*FWHM)) &
(wav_extended < (wav_val + FWHM_lim*FWHM)))
flux_2convolve = flux_extended[index_mask]
wav_2convolve = wav_extended[index_mask]
assert np.all(old_flux_2convolve == flux_2convolve)
assert np.all(old_wav_2convolve == wav_2convolve)
|
<commit_before><commit_msg>Test mask resulted in the same value as comprehension list
Former-commit-id: f1e3c2e5a3ddeab54e47746398620c57931170d6<commit_after>
|
import numpy as np
import pytest
from hypothesis import given
# To test the equivalence of code to check if it does the same thing:
def test_convolution_indexing():
""" To test equivalence of code to repalce for speed"""
wav_extended = np.arange(100, 200)
flux_extended = np.random.random(size=wav_extended.size)
wav_val = 145
R = 8
FWHM = wav_val/R
FWHM_lim = 5
# Replace this code
indexes = [i for i in range(len(wav_extended)) if ((wav_val - FWHM_lim*FWHM) < wav_extended[i] < (wav_val + FWHM_lim*FWHM))]
old_flux_2convolve = flux_extended[indexes[0]:indexes[-1]+1]
old_wav_2convolve = wav_extended[indexes[0]:indexes[-1]+1]
# With this code
# Mask of wavelength range within 5 FWHM of wav
index_mask = ((wav_extended > (wav_val - FWHM_lim*FWHM)) &
(wav_extended < (wav_val + FWHM_lim*FWHM)))
flux_2convolve = flux_extended[index_mask]
wav_2convolve = wav_extended[index_mask]
assert np.all(old_flux_2convolve == flux_2convolve)
assert np.all(old_wav_2convolve == wav_2convolve)
|
Test mask resulted in the same value as comprehension list
Former-commit-id: f1e3c2e5a3ddeab54e47746398620c57931170d6
import numpy as np
import pytest
from hypothesis import given
# To test the equivalence of code to check if it does the same thing:
def test_convolution_indexing():
""" To test equivalence of code to repalce for speed"""
wav_extended = np.arange(100, 200)
flux_extended = np.random.random(size=wav_extended.size)
wav_val = 145
R = 8
FWHM = wav_val/R
FWHM_lim = 5
# Replace this code
indexes = [i for i in range(len(wav_extended)) if ((wav_val - FWHM_lim*FWHM) < wav_extended[i] < (wav_val + FWHM_lim*FWHM))]
old_flux_2convolve = flux_extended[indexes[0]:indexes[-1]+1]
old_wav_2convolve = wav_extended[indexes[0]:indexes[-1]+1]
# With this code
# Mask of wavelength range within 5 FWHM of wav
index_mask = ((wav_extended > (wav_val - FWHM_lim*FWHM)) &
(wav_extended < (wav_val + FWHM_lim*FWHM)))
flux_2convolve = flux_extended[index_mask]
wav_2convolve = wav_extended[index_mask]
assert np.all(old_flux_2convolve == flux_2convolve)
assert np.all(old_wav_2convolve == wav_2convolve)
|
<commit_before><commit_msg>Test mask resulted in the same value as comprehension list
Former-commit-id: f1e3c2e5a3ddeab54e47746398620c57931170d6<commit_after>
import numpy as np
import pytest
from hypothesis import given
# To test the equivalence of code to check if it does the same thing:
def test_convolution_indexing():
""" To test equivalence of code to repalce for speed"""
wav_extended = np.arange(100, 200)
flux_extended = np.random.random(size=wav_extended.size)
wav_val = 145
R = 8
FWHM = wav_val/R
FWHM_lim = 5
# Replace this code
indexes = [i for i in range(len(wav_extended)) if ((wav_val - FWHM_lim*FWHM) < wav_extended[i] < (wav_val + FWHM_lim*FWHM))]
old_flux_2convolve = flux_extended[indexes[0]:indexes[-1]+1]
old_wav_2convolve = wav_extended[indexes[0]:indexes[-1]+1]
# With this code
# Mask of wavelength range within 5 FWHM of wav
index_mask = ((wav_extended > (wav_val - FWHM_lim*FWHM)) &
(wav_extended < (wav_val + FWHM_lim*FWHM)))
flux_2convolve = flux_extended[index_mask]
wav_2convolve = wav_extended[index_mask]
assert np.all(old_flux_2convolve == flux_2convolve)
assert np.all(old_wav_2convolve == wav_2convolve)
|
|
0ae8934d4d1e1a6e57c73eebd85ede01326e8ca1
|
scripts/run_on_swarming_bots/apt-full-upgrade.py
|
scripts/run_on_swarming_bots/apt-full-upgrade.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upgrade a bot via apt-get, then reboot. Aborts on error."""
import subprocess
# Copied from
# https://skia.googlesource.com/buildbot/+/d864d83d992f2968cf4d229cebf2d3104ee11ebf/go/gce/swarming/base-image/setup-script.sh#20
base_cmd = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt',
'-o', 'quiet=2', '--assume-yes',
'-o', 'Dpkg::Options::=--force-confdef',
'-o', 'Dpkg::Options::=--force-confold']
subprocess.check_call(base_cmd + ['update'])
subprocess.check_call(base_cmd + ['full-upgrade'])
subprocess.check_call(base_cmd + ['autoremove'])
subprocess.check_call(['sudo', 'reboot'])
|
Add script to upgrade Debian/Ubuntu bots.
|
Add script to upgrade Debian/Ubuntu bots.
Bug: skia:6890
Change-Id: I30eea5d64c502ac7ef51cf2d8f6ef3c583e60b3e
Reviewed-on: https://skia-review.googlesource.com/28840
Commit-Queue: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
Python
|
bsd-3-clause
|
google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot
|
Add script to upgrade Debian/Ubuntu bots.
Bug: skia:6890
Change-Id: I30eea5d64c502ac7ef51cf2d8f6ef3c583e60b3e
Reviewed-on: https://skia-review.googlesource.com/28840
Commit-Queue: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upgrade a bot via apt-get, then reboot. Aborts on error."""
import subprocess
# Copied from
# https://skia.googlesource.com/buildbot/+/d864d83d992f2968cf4d229cebf2d3104ee11ebf/go/gce/swarming/base-image/setup-script.sh#20
base_cmd = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt',
'-o', 'quiet=2', '--assume-yes',
'-o', 'Dpkg::Options::=--force-confdef',
'-o', 'Dpkg::Options::=--force-confold']
subprocess.check_call(base_cmd + ['update'])
subprocess.check_call(base_cmd + ['full-upgrade'])
subprocess.check_call(base_cmd + ['autoremove'])
subprocess.check_call(['sudo', 'reboot'])
|
<commit_before><commit_msg>Add script to upgrade Debian/Ubuntu bots.
Bug: skia:6890
Change-Id: I30eea5d64c502ac7ef51cf2d8f6ef3c583e60b3e
Reviewed-on: https://skia-review.googlesource.com/28840
Commit-Queue: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upgrade a bot via apt-get, then reboot. Aborts on error."""
import subprocess
# Copied from
# https://skia.googlesource.com/buildbot/+/d864d83d992f2968cf4d229cebf2d3104ee11ebf/go/gce/swarming/base-image/setup-script.sh#20
base_cmd = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt',
'-o', 'quiet=2', '--assume-yes',
'-o', 'Dpkg::Options::=--force-confdef',
'-o', 'Dpkg::Options::=--force-confold']
subprocess.check_call(base_cmd + ['update'])
subprocess.check_call(base_cmd + ['full-upgrade'])
subprocess.check_call(base_cmd + ['autoremove'])
subprocess.check_call(['sudo', 'reboot'])
|
Add script to upgrade Debian/Ubuntu bots.
Bug: skia:6890
Change-Id: I30eea5d64c502ac7ef51cf2d8f6ef3c583e60b3e
Reviewed-on: https://skia-review.googlesource.com/28840
Commit-Queue: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upgrade a bot via apt-get, then reboot. Aborts on error."""
import subprocess
# Copied from
# https://skia.googlesource.com/buildbot/+/d864d83d992f2968cf4d229cebf2d3104ee11ebf/go/gce/swarming/base-image/setup-script.sh#20
base_cmd = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt',
'-o', 'quiet=2', '--assume-yes',
'-o', 'Dpkg::Options::=--force-confdef',
'-o', 'Dpkg::Options::=--force-confold']
subprocess.check_call(base_cmd + ['update'])
subprocess.check_call(base_cmd + ['full-upgrade'])
subprocess.check_call(base_cmd + ['autoremove'])
subprocess.check_call(['sudo', 'reboot'])
|
<commit_before><commit_msg>Add script to upgrade Debian/Ubuntu bots.
Bug: skia:6890
Change-Id: I30eea5d64c502ac7ef51cf2d8f6ef3c583e60b3e
Reviewed-on: https://skia-review.googlesource.com/28840
Commit-Queue: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Reviewed-by: Eric Boren <0e499112533c8544f0505ea0d08394fb5ad7d8fa@google.com><commit_after>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Upgrade a bot via apt-get, then reboot. Aborts on error."""
import subprocess
# Copied from
# https://skia.googlesource.com/buildbot/+/d864d83d992f2968cf4d229cebf2d3104ee11ebf/go/gce/swarming/base-image/setup-script.sh#20
base_cmd = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt',
'-o', 'quiet=2', '--assume-yes',
'-o', 'Dpkg::Options::=--force-confdef',
'-o', 'Dpkg::Options::=--force-confold']
subprocess.check_call(base_cmd + ['update'])
subprocess.check_call(base_cmd + ['full-upgrade'])
subprocess.check_call(base_cmd + ['autoremove'])
subprocess.check_call(['sudo', 'reboot'])
|
|
eda1d37c697956c92cf930c640a9d8d1a382bea5
|
tests/matchers/test_contain.py
|
tests/matchers/test_contain.py
|
import unittest
from robber import expect
from robber.matchers.contain import Contain
class TestAbove(unittest.TestCase):
def test_matches(self):
expect(Contain({'key': 'value'}, 'key').matches()) == True
expect(Contain({1, 2, 3}, 1).matches()) == True
expect(Contain([1, 2, 3], 2).matches()) == True
expect(Contain((1, 2, 3), 3).matches()) == True
expect(Contain({'key': 'value'}, 'other').matches()) == False
expect(Contain({1, 2, 3}, 4).matches()) == False
expect(Contain([1, 2, 3], 4).matches()) == False
expect(Contain((1, 2, 3), 4).matches()) == False
def test_failure_message(self):
contain = Contain([1, 2, 3], 4)
expect(contain.failure_message()) == 'Expected {} to contain 4'.format([1, 2, 3])
def test_register(self):
expect(expect.matcher('contain')) == Contain
|
Add tests for 'contain' matcher
|
Add tests for 'contain' matcher
|
Python
|
mit
|
vesln/robber.py,taoenator/robber.py
|
Add tests for 'contain' matcher
|
import unittest
from robber import expect
from robber.matchers.contain import Contain
class TestAbove(unittest.TestCase):
def test_matches(self):
expect(Contain({'key': 'value'}, 'key').matches()) == True
expect(Contain({1, 2, 3}, 1).matches()) == True
expect(Contain([1, 2, 3], 2).matches()) == True
expect(Contain((1, 2, 3), 3).matches()) == True
expect(Contain({'key': 'value'}, 'other').matches()) == False
expect(Contain({1, 2, 3}, 4).matches()) == False
expect(Contain([1, 2, 3], 4).matches()) == False
expect(Contain((1, 2, 3), 4).matches()) == False
def test_failure_message(self):
contain = Contain([1, 2, 3], 4)
expect(contain.failure_message()) == 'Expected {} to contain 4'.format([1, 2, 3])
def test_register(self):
expect(expect.matcher('contain')) == Contain
|
<commit_before><commit_msg>Add tests for 'contain' matcher<commit_after>
|
import unittest
from robber import expect
from robber.matchers.contain import Contain
class TestAbove(unittest.TestCase):
def test_matches(self):
expect(Contain({'key': 'value'}, 'key').matches()) == True
expect(Contain({1, 2, 3}, 1).matches()) == True
expect(Contain([1, 2, 3], 2).matches()) == True
expect(Contain((1, 2, 3), 3).matches()) == True
expect(Contain({'key': 'value'}, 'other').matches()) == False
expect(Contain({1, 2, 3}, 4).matches()) == False
expect(Contain([1, 2, 3], 4).matches()) == False
expect(Contain((1, 2, 3), 4).matches()) == False
def test_failure_message(self):
contain = Contain([1, 2, 3], 4)
expect(contain.failure_message()) == 'Expected {} to contain 4'.format([1, 2, 3])
def test_register(self):
expect(expect.matcher('contain')) == Contain
|
Add tests for 'contain' matcherimport unittest
from robber import expect
from robber.matchers.contain import Contain
class TestAbove(unittest.TestCase):
def test_matches(self):
expect(Contain({'key': 'value'}, 'key').matches()) == True
expect(Contain({1, 2, 3}, 1).matches()) == True
expect(Contain([1, 2, 3], 2).matches()) == True
expect(Contain((1, 2, 3), 3).matches()) == True
expect(Contain({'key': 'value'}, 'other').matches()) == False
expect(Contain({1, 2, 3}, 4).matches()) == False
expect(Contain([1, 2, 3], 4).matches()) == False
expect(Contain((1, 2, 3), 4).matches()) == False
def test_failure_message(self):
contain = Contain([1, 2, 3], 4)
expect(contain.failure_message()) == 'Expected {} to contain 4'.format([1, 2, 3])
def test_register(self):
expect(expect.matcher('contain')) == Contain
|
<commit_before><commit_msg>Add tests for 'contain' matcher<commit_after>import unittest
from robber import expect
from robber.matchers.contain import Contain
class TestAbove(unittest.TestCase):
def test_matches(self):
expect(Contain({'key': 'value'}, 'key').matches()) == True
expect(Contain({1, 2, 3}, 1).matches()) == True
expect(Contain([1, 2, 3], 2).matches()) == True
expect(Contain((1, 2, 3), 3).matches()) == True
expect(Contain({'key': 'value'}, 'other').matches()) == False
expect(Contain({1, 2, 3}, 4).matches()) == False
expect(Contain([1, 2, 3], 4).matches()) == False
expect(Contain((1, 2, 3), 4).matches()) == False
def test_failure_message(self):
contain = Contain([1, 2, 3], 4)
expect(contain.failure_message()) == 'Expected {} to contain 4'.format([1, 2, 3])
def test_register(self):
expect(expect.matcher('contain')) == Contain
|
|
b53adea2d02458e20bb165c18c856b6816ab1983
|
nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
|
nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))
|
Add expire reservations in backport position.
|
Add expire reservations in backport position.
Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2
Related-bug: #1348720
(cherry picked from commit f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b)
|
Python
|
apache-2.0
|
luogangyi/bcec-nova,leilihh/novaha,leilihh/novaha,luogangyi/bcec-nova,leilihh/nova,leilihh/nova
|
Add expire reservations in backport position.
Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2
Related-bug: #1348720
(cherry picked from commit f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b)
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))
|
<commit_before><commit_msg>Add expire reservations in backport position.
Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2
Related-bug: #1348720
(cherry picked from commit f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b)<commit_after>
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))
|
Add expire reservations in backport position.
Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2
Related-bug: #1348720
(cherry picked from commit f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b)# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))
|
<commit_before><commit_msg>Add expire reservations in backport position.
Change-Id: If0e58da50ebe9b50b414737a9bd81d93752506e2
Related-bug: #1348720
(cherry picked from commit f4454f4c6962dd2c57c08dc7fecfcdebe7924e3b)<commit_after># All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _get_deleted_expire_index(table):
members = sorted(['deleted', 'expire'])
for idx in table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
LOG.info(_('Skipped adding reservations_deleted_expire_idx '
'because an equivalent index already exists.'))
return
# Based on expire_reservations query
# from: nova/db/sqlalchemy/api.py
index = Index('reservations_deleted_expire_idx',
reservations.c.deleted, reservations.c.expire)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
reservations = Table('reservations', meta, autoload=True)
index = _get_deleted_expire_index(reservations)
if index:
index.drop(migrate_engine)
else:
LOG.info(_('Skipped removing reservations_deleted_expire_idx '
'because index does not exist.'))
|
|
282d6ae5911e4fcf4625a7e82e7024c5dc0722d8
|
tests/test_simple_persistence.py
|
tests/test_simple_persistence.py
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
|
from __future__ import unicode_literals, division, absolute_import
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
def test_withsession(self):
session = Session()
persist = SimplePersistence('testplugin', session=session)
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it didn't commit or close our session
session.rollback()
assert 'aoeu' not in persist
|
Add some more tests for simple_persistence
|
Add some more tests for simple_persistence
|
Python
|
mit
|
cvium/Flexget,OmgOhnoes/Flexget,jawilson/Flexget,patsissons/Flexget,ZefQ/Flexget,tarzasai/Flexget,X-dark/Flexget,qk4l/Flexget,tsnoam/Flexget,Danfocus/Flexget,spencerjanssen/Flexget,crawln45/Flexget,oxc/Flexget,tvcsantos/Flexget,Flexget/Flexget,tarzasai/Flexget,tobinjt/Flexget,qvazzler/Flexget,Pretagonist/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,thalamus/Flexget,vfrc2/Flexget,LynxyssCZ/Flexget,camon/Flexget,thalamus/Flexget,spencerjanssen/Flexget,sean797/Flexget,offbyone/Flexget,offbyone/Flexget,xfouloux/Flexget,malkavi/Flexget,poulpito/Flexget,sean797/Flexget,dsemi/Flexget,patsissons/Flexget,ZefQ/Flexget,malkavi/Flexget,ZefQ/Flexget,drwyrm/Flexget,malkavi/Flexget,tvcsantos/Flexget,tsnoam/Flexget,grrr2/Flexget,Flexget/Flexget,jacobmetrick/Flexget,OmgOhnoes/Flexget,ibrahimkarahan/Flexget,Pretagonist/Flexget,qk4l/Flexget,ratoaq2/Flexget,gazpachoking/Flexget,drwyrm/Flexget,jawilson/Flexget,tarzasai/Flexget,spencerjanssen/Flexget,lildadou/Flexget,LynxyssCZ/Flexget,qvazzler/Flexget,asm0dey/Flexget,crawln45/Flexget,cvium/Flexget,tobinjt/Flexget,poulpito/Flexget,vfrc2/Flexget,JorisDeRieck/Flexget,jawilson/Flexget,offbyone/Flexget,qvazzler/Flexget,antivirtel/Flexget,xfouloux/Flexget,tobinjt/Flexget,thalamus/Flexget,Danfocus/Flexget,lildadou/Flexget,JorisDeRieck/Flexget,v17al/Flexget,malkavi/Flexget,X-dark/Flexget,lildadou/Flexget,dsemi/Flexget,oxc/Flexget,dsemi/Flexget,cvium/Flexget,v17al/Flexget,jawilson/Flexget,qk4l/Flexget,voriux/Flexget,voriux/Flexget,jacobmetrick/Flexget,antivirtel/Flexget,grrr2/Flexget,ianstalk/Flexget,tobinjt/Flexget,X-dark/Flexget,oxc/Flexget,jacobmetrick/Flexget,poulpito/Flexget,Danfocus/Flexget,ibrahimkarahan/Flexget,crawln45/Flexget,ibrahimkarahan/Flexget,gazpachoking/Flexget,v17al/Flexget,ianstalk/Flexget,tsnoam/Flexget,drwyrm/Flexget,patsissons/Flexget,Flexget/Flexget,ratoaq2/Flexget,JorisDeRieck/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,sean797/Flexget,asm0dey/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,vfrc2/Flexget,ratoaq2/Flexget,Danfocus/Flexget,grrr2/Flexget,antivirtel/Flexget,asm0dey/Flexget,Pretagonist/Flexget,xfouloux/Flexget,camon/Flexget,crawln45/Flexget
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
Add some more tests for simple_persistence
|
from __future__ import unicode_literals, division, absolute_import
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
def test_withsession(self):
session = Session()
persist = SimplePersistence('testplugin', session=session)
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it didn't commit or close our session
session.rollback()
assert 'aoeu' not in persist
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
<commit_msg>Add some more tests for simple_persistence<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
def test_withsession(self):
session = Session()
persist = SimplePersistence('testplugin', session=session)
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it didn't commit or close our session
session.rollback()
assert 'aoeu' not in persist
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
Add some more tests for simple_persistencefrom __future__ import unicode_literals, division, absolute_import
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
def test_withsession(self):
session = Session()
persist = SimplePersistence('testplugin', session=session)
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it didn't commit or close our session
session.rollback()
assert 'aoeu' not in persist
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
<commit_msg>Add some more tests for simple_persistence<commit_after>from __future__ import unicode_literals, division, absolute_import
from flexget.manager import Session
from flexget.utils.simple_persistence import SimplePersistence
from tests import FlexGetBase
class TestSimplePersistence(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'irrelevant'}
"""
def test_setdefault(self):
self.execute_task('test')
task = self.task
value1 = task.simple_persistence.setdefault('test', 'abc')
value2 = task.simple_persistence.setdefault('test', 'def')
assert value1 == value2, 'set default broken'
def test_nosession(self):
persist = SimplePersistence('testplugin')
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it commits and actually persists
persist = SimplePersistence('testplugin')
assert persist['aoeu'] == 'test'
def test_withsession(self):
session = Session()
persist = SimplePersistence('testplugin', session=session)
persist['aoeu'] = 'test'
assert persist['aoeu'] == 'test'
# Make sure it didn't commit or close our session
session.rollback()
assert 'aoeu' not in persist
|
0c0ae76c33800951e718611a3c5b84518b43589d
|
repeatWordAThousandTimes.py
|
repeatWordAThousandTimes.py
|
#!/usr/bin/env python
def main():
word = raw_input("Say a word and I will repeat it a thousand times: ").strip()
if (word.count(" ") > 0):
print "More than one word!! Please try again."
main()
return
print " ".join([word for x in range(0, 100)])
main()
|
Add repeat a word a thousand times exercise
|
Add repeat a word a thousand times exercise
|
Python
|
apache-2.0
|
MindCookin/python-exercises
|
Add repeat a word a thousand times exercise
|
#!/usr/bin/env python
def main():
word = raw_input("Say a word and I will repeat it a thousand times: ").strip()
if (word.count(" ") > 0):
print "More than one word!! Please try again."
main()
return
print " ".join([word for x in range(0, 100)])
main()
|
<commit_before><commit_msg>Add repeat a word a thousand times exercise<commit_after>
|
#!/usr/bin/env python
def main():
word = raw_input("Say a word and I will repeat it a thousand times: ").strip()
if (word.count(" ") > 0):
print "More than one word!! Please try again."
main()
return
print " ".join([word for x in range(0, 100)])
main()
|
Add repeat a word a thousand times exercise#!/usr/bin/env python
def main():
word = raw_input("Say a word and I will repeat it a thousand times: ").strip()
if (word.count(" ") > 0):
print "More than one word!! Please try again."
main()
return
print " ".join([word for x in range(0, 100)])
main()
|
<commit_before><commit_msg>Add repeat a word a thousand times exercise<commit_after>#!/usr/bin/env python
def main():
word = raw_input("Say a word and I will repeat it a thousand times: ").strip()
if (word.count(" ") > 0):
print "More than one word!! Please try again."
main()
return
print " ".join([word for x in range(0, 100)])
main()
|
|
3b35b313fad8e1faf3a2eaa3ca03f64eb31ed421
|
web/management/commands/fixcardsfamily.py
|
web/management/commands/fixcardsfamily.py
|
from django.core.management.base import BaseCommand, CommandError
from web import models
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
cards = models.Card.objects.filter(parent__isnull=False)
for card in cards:
card.rarity = card.parent.rarity
card.performer = card.parent.performer
card.attributes = card.parent.attributes
card.save()
print "Updated {}".format(card)
|
Duplicate info in parent and children
|
Duplicate info in parent and children
|
Python
|
apache-2.0
|
SchoolIdolTomodachi/frgl,SchoolIdolTomodachi/frgl,SchoolIdolTomodachi/frgl
|
Duplicate info in parent and children
|
from django.core.management.base import BaseCommand, CommandError
from web import models
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
cards = models.Card.objects.filter(parent__isnull=False)
for card in cards:
card.rarity = card.parent.rarity
card.performer = card.parent.performer
card.attributes = card.parent.attributes
card.save()
print "Updated {}".format(card)
|
<commit_before><commit_msg>Duplicate info in parent and children<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from web import models
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
cards = models.Card.objects.filter(parent__isnull=False)
for card in cards:
card.rarity = card.parent.rarity
card.performer = card.parent.performer
card.attributes = card.parent.attributes
card.save()
print "Updated {}".format(card)
|
Duplicate info in parent and childrenfrom django.core.management.base import BaseCommand, CommandError
from web import models
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
cards = models.Card.objects.filter(parent__isnull=False)
for card in cards:
card.rarity = card.parent.rarity
card.performer = card.parent.performer
card.attributes = card.parent.attributes
card.save()
print "Updated {}".format(card)
|
<commit_before><commit_msg>Duplicate info in parent and children<commit_after>from django.core.management.base import BaseCommand, CommandError
from web import models
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
cards = models.Card.objects.filter(parent__isnull=False)
for card in cards:
card.rarity = card.parent.rarity
card.performer = card.parent.performer
card.attributes = card.parent.attributes
card.save()
print "Updated {}".format(card)
|
|
a1e7a7cff8ee6d15dac1dee67a5ea5bd932252de
|
nemubot/message/printer/test_socket.py
|
nemubot/message/printer/test_socket.py
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nemubot.message import Command, DirectAsk, Text
from nemubot.message.printer.socket import Socket as SocketVisitor
class TestSocketPrinter(unittest.TestCase):
def setUp(self):
self.msgs = [
# Texts
(
Text(message="TEXT",
),
"TEXT"
),
(
Text(message="TEXT TEXT2",
),
"TEXT TEXT2"
),
(
Text(message="TEXT @ARG=1 TEXT2",
),
"TEXT @ARG=1 TEXT2"
),
# DirectAsk
(
DirectAsk(message="TEXT",
designated="someone",
to=["#somechannel"]
),
"someone: TEXT"
),
(
# Private message to someone
DirectAsk(message="TEXT",
designated="someone",
to=["someone"]
),
"TEXT"
),
# Commands
(
Command(cmd="COMMAND",
),
"!COMMAND"
),
(
Command(cmd="COMMAND",
args=["TEXT"],
),
"!COMMAND TEXT"
),
]
def test_printer(self):
for msg, pp in self.msgs:
sv = SocketVisitor()
msg.accept(sv)
self.assertEqual(sv.pp, pp)
if __name__ == '__main__':
unittest.main()
|
Add test for socket printer
|
Add test for socket printer
|
Python
|
agpl-3.0
|
nemunaire/nemubot,nbr23/nemubot
|
Add test for socket printer
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nemubot.message import Command, DirectAsk, Text
from nemubot.message.printer.socket import Socket as SocketVisitor
class TestSocketPrinter(unittest.TestCase):
def setUp(self):
self.msgs = [
# Texts
(
Text(message="TEXT",
),
"TEXT"
),
(
Text(message="TEXT TEXT2",
),
"TEXT TEXT2"
),
(
Text(message="TEXT @ARG=1 TEXT2",
),
"TEXT @ARG=1 TEXT2"
),
# DirectAsk
(
DirectAsk(message="TEXT",
designated="someone",
to=["#somechannel"]
),
"someone: TEXT"
),
(
# Private message to someone
DirectAsk(message="TEXT",
designated="someone",
to=["someone"]
),
"TEXT"
),
# Commands
(
Command(cmd="COMMAND",
),
"!COMMAND"
),
(
Command(cmd="COMMAND",
args=["TEXT"],
),
"!COMMAND TEXT"
),
]
def test_printer(self):
for msg, pp in self.msgs:
sv = SocketVisitor()
msg.accept(sv)
self.assertEqual(sv.pp, pp)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for socket printer<commit_after>
|
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nemubot.message import Command, DirectAsk, Text
from nemubot.message.printer.socket import Socket as SocketVisitor
class TestSocketPrinter(unittest.TestCase):
def setUp(self):
self.msgs = [
# Texts
(
Text(message="TEXT",
),
"TEXT"
),
(
Text(message="TEXT TEXT2",
),
"TEXT TEXT2"
),
(
Text(message="TEXT @ARG=1 TEXT2",
),
"TEXT @ARG=1 TEXT2"
),
# DirectAsk
(
DirectAsk(message="TEXT",
designated="someone",
to=["#somechannel"]
),
"someone: TEXT"
),
(
# Private message to someone
DirectAsk(message="TEXT",
designated="someone",
to=["someone"]
),
"TEXT"
),
# Commands
(
Command(cmd="COMMAND",
),
"!COMMAND"
),
(
Command(cmd="COMMAND",
args=["TEXT"],
),
"!COMMAND TEXT"
),
]
def test_printer(self):
for msg, pp in self.msgs:
sv = SocketVisitor()
msg.accept(sv)
self.assertEqual(sv.pp, pp)
if __name__ == '__main__':
unittest.main()
|
Add test for socket printer# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nemubot.message import Command, DirectAsk, Text
from nemubot.message.printer.socket import Socket as SocketVisitor
class TestSocketPrinter(unittest.TestCase):
def setUp(self):
self.msgs = [
# Texts
(
Text(message="TEXT",
),
"TEXT"
),
(
Text(message="TEXT TEXT2",
),
"TEXT TEXT2"
),
(
Text(message="TEXT @ARG=1 TEXT2",
),
"TEXT @ARG=1 TEXT2"
),
# DirectAsk
(
DirectAsk(message="TEXT",
designated="someone",
to=["#somechannel"]
),
"someone: TEXT"
),
(
# Private message to someone
DirectAsk(message="TEXT",
designated="someone",
to=["someone"]
),
"TEXT"
),
# Commands
(
Command(cmd="COMMAND",
),
"!COMMAND"
),
(
Command(cmd="COMMAND",
args=["TEXT"],
),
"!COMMAND TEXT"
),
]
def test_printer(self):
for msg, pp in self.msgs:
sv = SocketVisitor()
msg.accept(sv)
self.assertEqual(sv.pp, pp)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for socket printer<commit_after># Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from nemubot.message import Command, DirectAsk, Text
from nemubot.message.printer.socket import Socket as SocketVisitor
class TestSocketPrinter(unittest.TestCase):
def setUp(self):
self.msgs = [
# Texts
(
Text(message="TEXT",
),
"TEXT"
),
(
Text(message="TEXT TEXT2",
),
"TEXT TEXT2"
),
(
Text(message="TEXT @ARG=1 TEXT2",
),
"TEXT @ARG=1 TEXT2"
),
# DirectAsk
(
DirectAsk(message="TEXT",
designated="someone",
to=["#somechannel"]
),
"someone: TEXT"
),
(
# Private message to someone
DirectAsk(message="TEXT",
designated="someone",
to=["someone"]
),
"TEXT"
),
# Commands
(
Command(cmd="COMMAND",
),
"!COMMAND"
),
(
Command(cmd="COMMAND",
args=["TEXT"],
),
"!COMMAND TEXT"
),
]
def test_printer(self):
for msg, pp in self.msgs:
sv = SocketVisitor()
msg.accept(sv)
self.assertEqual(sv.pp, pp)
if __name__ == '__main__':
unittest.main()
|
|
abd359086c9ad99becfe1f61c8e22212c76bcc58
|
teuthology/test/test_repo_utils.py
|
teuthology/test/test_repo_utils.py
|
import logging
import os.path
from pytest import raises
import shutil
from .. import repo_utils
repo_utils.log.setLevel(logging.WARNING)
class TestRepoUtils(object):
empty_repo = 'https://github.com/ceph/empty'
local_dir = '/tmp/empty'
def setup(self):
assert not os.path.exists(self.local_dir)
def teardown(self):
shutil.rmtree(self.local_dir, ignore_errors=True)
def test_existing_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_non_existing_branch(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
def test_multiple_calls_same_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_multiple_calls_different_branches(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
|
Add unit tests for repo_utils
|
Add unit tests for repo_utils
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
Python
|
mit
|
robbat2/teuthology,robbat2/teuthology,caibo2014/teuthology,tchaikov/teuthology,ktdreyer/teuthology,t-miyamae/teuthology,tchaikov/teuthology,SUSE/teuthology,zhouyuan/teuthology,yghannam/teuthology,dreamhost/teuthology,michaelsevilla/teuthology,ivotron/teuthology,ceph/teuthology,ivotron/teuthology,ktdreyer/teuthology,dreamhost/teuthology,michaelsevilla/teuthology,SUSE/teuthology,SUSE/teuthology,yghannam/teuthology,dmick/teuthology,dmick/teuthology,t-miyamae/teuthology,ceph/teuthology,caibo2014/teuthology,zhouyuan/teuthology,dmick/teuthology
|
Add unit tests for repo_utils
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
import logging
import os.path
from pytest import raises
import shutil
from .. import repo_utils
repo_utils.log.setLevel(logging.WARNING)
class TestRepoUtils(object):
empty_repo = 'https://github.com/ceph/empty'
local_dir = '/tmp/empty'
def setup(self):
assert not os.path.exists(self.local_dir)
def teardown(self):
shutil.rmtree(self.local_dir, ignore_errors=True)
def test_existing_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_non_existing_branch(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
def test_multiple_calls_same_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_multiple_calls_different_branches(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
|
<commit_before><commit_msg>Add unit tests for repo_utils
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>
|
import logging
import os.path
from pytest import raises
import shutil
from .. import repo_utils
repo_utils.log.setLevel(logging.WARNING)
class TestRepoUtils(object):
empty_repo = 'https://github.com/ceph/empty'
local_dir = '/tmp/empty'
def setup(self):
assert not os.path.exists(self.local_dir)
def teardown(self):
shutil.rmtree(self.local_dir, ignore_errors=True)
def test_existing_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_non_existing_branch(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
def test_multiple_calls_same_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_multiple_calls_different_branches(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
|
Add unit tests for repo_utils
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>import logging
import os.path
from pytest import raises
import shutil
from .. import repo_utils
repo_utils.log.setLevel(logging.WARNING)
class TestRepoUtils(object):
empty_repo = 'https://github.com/ceph/empty'
local_dir = '/tmp/empty'
def setup(self):
assert not os.path.exists(self.local_dir)
def teardown(self):
shutil.rmtree(self.local_dir, ignore_errors=True)
def test_existing_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_non_existing_branch(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
def test_multiple_calls_same_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_multiple_calls_different_branches(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
|
<commit_before><commit_msg>Add unit tests for repo_utils
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>import logging
import os.path
from pytest import raises
import shutil
from .. import repo_utils
repo_utils.log.setLevel(logging.WARNING)
class TestRepoUtils(object):
empty_repo = 'https://github.com/ceph/empty'
local_dir = '/tmp/empty'
def setup(self):
assert not os.path.exists(self.local_dir)
def teardown(self):
shutil.rmtree(self.local_dir, ignore_errors=True)
def test_existing_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_non_existing_branch(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
def test_multiple_calls_same_branch(self):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
def test_multiple_calls_different_branches(self):
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
with raises(repo_utils.BranchNotFoundError):
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'blah')
assert not os.path.exists(self.local_dir)
repo_utils.checkout_repo(self.empty_repo, self.local_dir, 'master')
assert os.path.exists(self.local_dir)
|
|
7143075a6150134bb61655ca9bef8b9a4092eeab
|
tests/test_skip_comments.py
|
tests/test_skip_comments.py
|
# import pytest
from nodev.specs.generic import FlatContainer
#
# possible evolution of a ``skip_comments`` function
#
def skip_comments_v0(stream):
return [line.partition('#')[0] for line in stream]
def skip_comments_v1(stream):
for line in stream:
yield line.partition('#')[0]
def skip_comments_v2(stream):
for index, line in enumerate(stream):
value = line.partition('#')[0]
if value:
yield index, value
def skip_comments_v3(stream):
for index, line in enumerate(stream):
value, sep, comment = line.partition('#')
if value:
yield index, value, sep + comment
skip_comments = skip_comments_v0
def test_skip_comments_will_break_soon():
assert skip_comments(['# comment']) == ['']
assert skip_comments(['value # comment']) == ['value ']
assert skip_comments(['value 1', '', 'value 2']) == ['value 1', '', 'value 2']
def test_skip_comments_will_break_eventually():
assert 'value ' in skip_comments(['value # comment'])
assert 'value 1' in skip_comments(['value 1', '', 'value 2'])
assert 'value 2' in skip_comments(['value 1', '', 'value 2'])
# @pytest.mark.candidate('skip_comments')
def test_skip_comments_will_not_break():
assert 'value ' in FlatContainer(skip_comments(['value # comment']))
assert 'value 1' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
assert 'value 2' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
|
Add the nodev.specs example code.
|
Add the nodev.specs example code.
|
Python
|
mit
|
nodev-io/nodev-starter-kit,nodev-io/nodev-tutorial,nodev-io/nodev-starter-kit
|
Add the nodev.specs example code.
|
# import pytest
from nodev.specs.generic import FlatContainer
#
# possible evolution of a ``skip_comments`` function
#
def skip_comments_v0(stream):
return [line.partition('#')[0] for line in stream]
def skip_comments_v1(stream):
for line in stream:
yield line.partition('#')[0]
def skip_comments_v2(stream):
for index, line in enumerate(stream):
value = line.partition('#')[0]
if value:
yield index, value
def skip_comments_v3(stream):
for index, line in enumerate(stream):
value, sep, comment = line.partition('#')
if value:
yield index, value, sep + comment
skip_comments = skip_comments_v0
def test_skip_comments_will_break_soon():
assert skip_comments(['# comment']) == ['']
assert skip_comments(['value # comment']) == ['value ']
assert skip_comments(['value 1', '', 'value 2']) == ['value 1', '', 'value 2']
def test_skip_comments_will_break_eventually():
assert 'value ' in skip_comments(['value # comment'])
assert 'value 1' in skip_comments(['value 1', '', 'value 2'])
assert 'value 2' in skip_comments(['value 1', '', 'value 2'])
# @pytest.mark.candidate('skip_comments')
def test_skip_comments_will_not_break():
assert 'value ' in FlatContainer(skip_comments(['value # comment']))
assert 'value 1' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
assert 'value 2' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
|
<commit_before><commit_msg>Add the nodev.specs example code.<commit_after>
|
# import pytest
from nodev.specs.generic import FlatContainer
#
# possible evolution of a ``skip_comments`` function
#
def skip_comments_v0(stream):
return [line.partition('#')[0] for line in stream]
def skip_comments_v1(stream):
for line in stream:
yield line.partition('#')[0]
def skip_comments_v2(stream):
for index, line in enumerate(stream):
value = line.partition('#')[0]
if value:
yield index, value
def skip_comments_v3(stream):
for index, line in enumerate(stream):
value, sep, comment = line.partition('#')
if value:
yield index, value, sep + comment
skip_comments = skip_comments_v0
def test_skip_comments_will_break_soon():
assert skip_comments(['# comment']) == ['']
assert skip_comments(['value # comment']) == ['value ']
assert skip_comments(['value 1', '', 'value 2']) == ['value 1', '', 'value 2']
def test_skip_comments_will_break_eventually():
assert 'value ' in skip_comments(['value # comment'])
assert 'value 1' in skip_comments(['value 1', '', 'value 2'])
assert 'value 2' in skip_comments(['value 1', '', 'value 2'])
# @pytest.mark.candidate('skip_comments')
def test_skip_comments_will_not_break():
assert 'value ' in FlatContainer(skip_comments(['value # comment']))
assert 'value 1' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
assert 'value 2' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
|
Add the nodev.specs example code.
# import pytest
from nodev.specs.generic import FlatContainer
#
# possible evolution of a ``skip_comments`` function
#
def skip_comments_v0(stream):
return [line.partition('#')[0] for line in stream]
def skip_comments_v1(stream):
for line in stream:
yield line.partition('#')[0]
def skip_comments_v2(stream):
for index, line in enumerate(stream):
value = line.partition('#')[0]
if value:
yield index, value
def skip_comments_v3(stream):
for index, line in enumerate(stream):
value, sep, comment = line.partition('#')
if value:
yield index, value, sep + comment
skip_comments = skip_comments_v0
def test_skip_comments_will_break_soon():
assert skip_comments(['# comment']) == ['']
assert skip_comments(['value # comment']) == ['value ']
assert skip_comments(['value 1', '', 'value 2']) == ['value 1', '', 'value 2']
def test_skip_comments_will_break_eventually():
assert 'value ' in skip_comments(['value # comment'])
assert 'value 1' in skip_comments(['value 1', '', 'value 2'])
assert 'value 2' in skip_comments(['value 1', '', 'value 2'])
# @pytest.mark.candidate('skip_comments')
def test_skip_comments_will_not_break():
assert 'value ' in FlatContainer(skip_comments(['value # comment']))
assert 'value 1' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
assert 'value 2' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
|
<commit_before><commit_msg>Add the nodev.specs example code.<commit_after>
# import pytest
from nodev.specs.generic import FlatContainer
#
# possible evolution of a ``skip_comments`` function
#
def skip_comments_v0(stream):
return [line.partition('#')[0] for line in stream]
def skip_comments_v1(stream):
for line in stream:
yield line.partition('#')[0]
def skip_comments_v2(stream):
for index, line in enumerate(stream):
value = line.partition('#')[0]
if value:
yield index, value
def skip_comments_v3(stream):
for index, line in enumerate(stream):
value, sep, comment = line.partition('#')
if value:
yield index, value, sep + comment
skip_comments = skip_comments_v0
def test_skip_comments_will_break_soon():
assert skip_comments(['# comment']) == ['']
assert skip_comments(['value # comment']) == ['value ']
assert skip_comments(['value 1', '', 'value 2']) == ['value 1', '', 'value 2']
def test_skip_comments_will_break_eventually():
assert 'value ' in skip_comments(['value # comment'])
assert 'value 1' in skip_comments(['value 1', '', 'value 2'])
assert 'value 2' in skip_comments(['value 1', '', 'value 2'])
# @pytest.mark.candidate('skip_comments')
def test_skip_comments_will_not_break():
assert 'value ' in FlatContainer(skip_comments(['value # comment']))
assert 'value 1' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
assert 'value 2' in FlatContainer(skip_comments(['value 1', '', 'value 2']))
|
|
85340b0453fff6b78a0f8c5aec98eaf359ce0b09
|
tests/test_uploader.py
|
tests/test_uploader.py
|
"""Tests for the uploader module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os.path import join
from datapackage import DataPackage
from pytest import fixture
from json import loads
from gobble.config import ASSETS_DIR
from gobble.uploader import Uploader
from gobble.user import User
@fixture
def user():
return User()
@fixture
def package():
filepath = join(ASSETS_DIR, 'mexican-budget-samples', 'datapackage.json')
return DataPackage(filepath)
# noinspection PyShadowingNames
def test_build_payloads(user, package):
uploader = Uploader(user, package)
expected = join(ASSETS_DIR, 'mexican-budget-samples', 'payload.json')
with open(expected) as json:
assert uploader.payload == loads(json.read())
|
Add a test for the payload generator of the Uploader class.
|
Add a test for the payload generator of the Uploader class.
|
Python
|
mit
|
openspending/gobble
|
Add a test for the payload generator of the Uploader class.
|
"""Tests for the uploader module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os.path import join
from datapackage import DataPackage
from pytest import fixture
from json import loads
from gobble.config import ASSETS_DIR
from gobble.uploader import Uploader
from gobble.user import User
@fixture
def user():
return User()
@fixture
def package():
filepath = join(ASSETS_DIR, 'mexican-budget-samples', 'datapackage.json')
return DataPackage(filepath)
# noinspection PyShadowingNames
def test_build_payloads(user, package):
uploader = Uploader(user, package)
expected = join(ASSETS_DIR, 'mexican-budget-samples', 'payload.json')
with open(expected) as json:
assert uploader.payload == loads(json.read())
|
<commit_before><commit_msg>Add a test for the payload generator of the Uploader class.<commit_after>
|
"""Tests for the uploader module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os.path import join
from datapackage import DataPackage
from pytest import fixture
from json import loads
from gobble.config import ASSETS_DIR
from gobble.uploader import Uploader
from gobble.user import User
@fixture
def user():
return User()
@fixture
def package():
filepath = join(ASSETS_DIR, 'mexican-budget-samples', 'datapackage.json')
return DataPackage(filepath)
# noinspection PyShadowingNames
def test_build_payloads(user, package):
uploader = Uploader(user, package)
expected = join(ASSETS_DIR, 'mexican-budget-samples', 'payload.json')
with open(expected) as json:
assert uploader.payload == loads(json.read())
|
Add a test for the payload generator of the Uploader class."""Tests for the uploader module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os.path import join
from datapackage import DataPackage
from pytest import fixture
from json import loads
from gobble.config import ASSETS_DIR
from gobble.uploader import Uploader
from gobble.user import User
@fixture
def user():
return User()
@fixture
def package():
filepath = join(ASSETS_DIR, 'mexican-budget-samples', 'datapackage.json')
return DataPackage(filepath)
# noinspection PyShadowingNames
def test_build_payloads(user, package):
uploader = Uploader(user, package)
expected = join(ASSETS_DIR, 'mexican-budget-samples', 'payload.json')
with open(expected) as json:
assert uploader.payload == loads(json.read())
|
<commit_before><commit_msg>Add a test for the payload generator of the Uploader class.<commit_after>"""Tests for the uploader module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os.path import join
from datapackage import DataPackage
from pytest import fixture
from json import loads
from gobble.config import ASSETS_DIR
from gobble.uploader import Uploader
from gobble.user import User
@fixture
def user():
return User()
@fixture
def package():
filepath = join(ASSETS_DIR, 'mexican-budget-samples', 'datapackage.json')
return DataPackage(filepath)
# noinspection PyShadowingNames
def test_build_payloads(user, package):
uploader = Uploader(user, package)
expected = join(ASSETS_DIR, 'mexican-budget-samples', 'payload.json')
with open(expected) as json:
assert uploader.payload == loads(json.read())
|
|
1f55c424c96dd06f21762bbe730bb4302e954539
|
tracking-id-injector.py
|
tracking-id-injector.py
|
#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print('usage: python {} input_filename output_filename'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
outfile.write(infile.read())
|
Copy input without modification to begin with
|
Copy input without modification to begin with
|
Python
|
apache-2.0
|
msufa/tracking-id-injector,msufa/tracking-id-injector
|
Copy input without modification to begin with
|
#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print('usage: python {} input_filename output_filename'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
outfile.write(infile.read())
|
<commit_before><commit_msg>Copy input without modification to begin with<commit_after>
|
#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print('usage: python {} input_filename output_filename'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
outfile.write(infile.read())
|
Copy input without modification to begin with#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print('usage: python {} input_filename output_filename'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
outfile.write(infile.read())
|
<commit_before><commit_msg>Copy input without modification to begin with<commit_after>#!/usr/bin/python
import sys
if len(sys.argv) < 3:
print('usage: python {} input_filename output_filename'.format(sys.argv[0]))
exit(1)
with open(sys.argv[1], 'r') as infile:
with open(sys.argv[2], 'w') as outfile:
outfile.write(infile.read())
|
|
c01a771e577994bffcbe79f84195f2ea35470c97
|
benchmarks/bench_gameoflife.py
|
benchmarks/bench_gameoflife.py
|
"""
Benchmark a game of life implementation.
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def wrap(k, max_k):
if k == -1:
return max_k - 1
elif k == max_k:
return 0
else:
return k
@jit(nopython=True)
def increment_neighbors(i, j, neighbors):
ni, nj = neighbors.shape
for delta_i in (-1, 0, 1):
neighbor_i = wrap(i + delta_i, ni)
for delta_j in (-1, 0, 1):
if delta_i != 0 or delta_j != 0:
neighbor_j = wrap(j + delta_j, nj)
neighbors[neighbor_i, neighbor_j] += 1
@jit
def numba_life_step(X):
# Compute # of live neighbours per cell
neighbors = np.zeros_like(X, dtype=np.int8)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]:
increment_neighbors(i, j, neighbors)
# Return next iteration of the game state
return (neighbors == 3) | (X & (neighbors == 2))
start_state = np.random.RandomState(0).random_sample((300, 200)) > 0.5
def run_game(nb_iters):
state = start_state
for i in range(nb_iters):
state = numba_life_step(state)
return state
def setup():
"""
Precompile jitted functions.
"""
run_game(10)
class GameOfLife:
def time_gameoflife(self):
run_game(10)
|
Add a game of life benchmark
|
Add a game of life benchmark
|
Python
|
bsd-2-clause
|
gmarkall/numba-benchmark,numba/numba-benchmark
|
Add a game of life benchmark
|
"""
Benchmark a game of life implementation.
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def wrap(k, max_k):
if k == -1:
return max_k - 1
elif k == max_k:
return 0
else:
return k
@jit(nopython=True)
def increment_neighbors(i, j, neighbors):
ni, nj = neighbors.shape
for delta_i in (-1, 0, 1):
neighbor_i = wrap(i + delta_i, ni)
for delta_j in (-1, 0, 1):
if delta_i != 0 or delta_j != 0:
neighbor_j = wrap(j + delta_j, nj)
neighbors[neighbor_i, neighbor_j] += 1
@jit
def numba_life_step(X):
# Compute # of live neighbours per cell
neighbors = np.zeros_like(X, dtype=np.int8)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]:
increment_neighbors(i, j, neighbors)
# Return next iteration of the game state
return (neighbors == 3) | (X & (neighbors == 2))
start_state = np.random.RandomState(0).random_sample((300, 200)) > 0.5
def run_game(nb_iters):
state = start_state
for i in range(nb_iters):
state = numba_life_step(state)
return state
def setup():
"""
Precompile jitted functions.
"""
run_game(10)
class GameOfLife:
def time_gameoflife(self):
run_game(10)
|
<commit_before><commit_msg>Add a game of life benchmark<commit_after>
|
"""
Benchmark a game of life implementation.
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def wrap(k, max_k):
if k == -1:
return max_k - 1
elif k == max_k:
return 0
else:
return k
@jit(nopython=True)
def increment_neighbors(i, j, neighbors):
ni, nj = neighbors.shape
for delta_i in (-1, 0, 1):
neighbor_i = wrap(i + delta_i, ni)
for delta_j in (-1, 0, 1):
if delta_i != 0 or delta_j != 0:
neighbor_j = wrap(j + delta_j, nj)
neighbors[neighbor_i, neighbor_j] += 1
@jit
def numba_life_step(X):
# Compute # of live neighbours per cell
neighbors = np.zeros_like(X, dtype=np.int8)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]:
increment_neighbors(i, j, neighbors)
# Return next iteration of the game state
return (neighbors == 3) | (X & (neighbors == 2))
start_state = np.random.RandomState(0).random_sample((300, 200)) > 0.5
def run_game(nb_iters):
state = start_state
for i in range(nb_iters):
state = numba_life_step(state)
return state
def setup():
"""
Precompile jitted functions.
"""
run_game(10)
class GameOfLife:
def time_gameoflife(self):
run_game(10)
|
Add a game of life benchmark"""
Benchmark a game of life implementation.
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def wrap(k, max_k):
if k == -1:
return max_k - 1
elif k == max_k:
return 0
else:
return k
@jit(nopython=True)
def increment_neighbors(i, j, neighbors):
ni, nj = neighbors.shape
for delta_i in (-1, 0, 1):
neighbor_i = wrap(i + delta_i, ni)
for delta_j in (-1, 0, 1):
if delta_i != 0 or delta_j != 0:
neighbor_j = wrap(j + delta_j, nj)
neighbors[neighbor_i, neighbor_j] += 1
@jit
def numba_life_step(X):
# Compute # of live neighbours per cell
neighbors = np.zeros_like(X, dtype=np.int8)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]:
increment_neighbors(i, j, neighbors)
# Return next iteration of the game state
return (neighbors == 3) | (X & (neighbors == 2))
start_state = np.random.RandomState(0).random_sample((300, 200)) > 0.5
def run_game(nb_iters):
state = start_state
for i in range(nb_iters):
state = numba_life_step(state)
return state
def setup():
"""
Precompile jitted functions.
"""
run_game(10)
class GameOfLife:
def time_gameoflife(self):
run_game(10)
|
<commit_before><commit_msg>Add a game of life benchmark<commit_after>"""
Benchmark a game of life implementation.
"""
import numpy as np
from numba import jit
@jit(nopython=True)
def wrap(k, max_k):
if k == -1:
return max_k - 1
elif k == max_k:
return 0
else:
return k
@jit(nopython=True)
def increment_neighbors(i, j, neighbors):
ni, nj = neighbors.shape
for delta_i in (-1, 0, 1):
neighbor_i = wrap(i + delta_i, ni)
for delta_j in (-1, 0, 1):
if delta_i != 0 or delta_j != 0:
neighbor_j = wrap(j + delta_j, nj)
neighbors[neighbor_i, neighbor_j] += 1
@jit
def numba_life_step(X):
# Compute # of live neighbours per cell
neighbors = np.zeros_like(X, dtype=np.int8)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]:
increment_neighbors(i, j, neighbors)
# Return next iteration of the game state
return (neighbors == 3) | (X & (neighbors == 2))
start_state = np.random.RandomState(0).random_sample((300, 200)) > 0.5
def run_game(nb_iters):
state = start_state
for i in range(nb_iters):
state = numba_life_step(state)
return state
def setup():
"""
Precompile jitted functions.
"""
run_game(10)
class GameOfLife:
def time_gameoflife(self):
run_game(10)
|
|
7334de5358ba1efb942c6f7725114ddddd52af83
|
apps/polls/migrations/0002_auto_20170503_1524.py
|
apps/polls/migrations/0002_auto_20170503_1524.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([]),
),
]
|
Remove falsy unique together constraint
|
Remove falsy unique together constraint
|
Python
|
agpl-3.0
|
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
|
Remove falsy unique together constraint
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([]),
),
]
|
<commit_before><commit_msg>Remove falsy unique together constraint<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([]),
),
]
|
Remove falsy unique together constraint# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([]),
),
]
|
<commit_before><commit_msg>Remove falsy unique together constraint<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([]),
),
]
|
|
04d19c4a97836f725d602896aa00a08040660e72
|
python/format_string_precedence.py
|
python/format_string_precedence.py
|
#!/usr/bin/env python
"""Format string precedence"""
class Foo(object):
def __str__(self):
return "i am a __str__"
def __repr__(self):
return "representation of F"
def __format__(self, str_):
# wtf is this argument?
return "serious formatting brah %s" % str_
def foo(self):
return "foo"
if __name__ == "__main__":
print(" __format__ has highest precedence except for %:")
foo = Foo()
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__format__")
print("")
print("__str__ next:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__str__")
print("")
print("__repr__ finally:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__repr__")
print("")
print("I have no idea where this is defined(in object?):")
print("%s" % foo)
print("{}".format(foo))
|
Create python example on formatting string
|
Create python example on formatting string
|
Python
|
mit
|
brycepg/how-to
|
Create python example on formatting string
|
#!/usr/bin/env python
"""Format string precedence"""
class Foo(object):
def __str__(self):
return "i am a __str__"
def __repr__(self):
return "representation of F"
def __format__(self, str_):
# wtf is this argument?
return "serious formatting brah %s" % str_
def foo(self):
return "foo"
if __name__ == "__main__":
print(" __format__ has highest precedence except for %:")
foo = Foo()
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__format__")
print("")
print("__str__ next:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__str__")
print("")
print("__repr__ finally:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__repr__")
print("")
print("I have no idea where this is defined(in object?):")
print("%s" % foo)
print("{}".format(foo))
|
<commit_before><commit_msg>Create python example on formatting string<commit_after>
|
#!/usr/bin/env python
"""Format string precedence"""
class Foo(object):
def __str__(self):
return "i am a __str__"
def __repr__(self):
return "representation of F"
def __format__(self, str_):
# wtf is this argument?
return "serious formatting brah %s" % str_
def foo(self):
return "foo"
if __name__ == "__main__":
print(" __format__ has highest precedence except for %:")
foo = Foo()
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__format__")
print("")
print("__str__ next:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__str__")
print("")
print("__repr__ finally:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__repr__")
print("")
print("I have no idea where this is defined(in object?):")
print("%s" % foo)
print("{}".format(foo))
|
Create python example on formatting string#!/usr/bin/env python
"""Format string precedence"""
class Foo(object):
def __str__(self):
return "i am a __str__"
def __repr__(self):
return "representation of F"
def __format__(self, str_):
# wtf is this argument?
return "serious formatting brah %s" % str_
def foo(self):
return "foo"
if __name__ == "__main__":
print(" __format__ has highest precedence except for %:")
foo = Foo()
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__format__")
print("")
print("__str__ next:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__str__")
print("")
print("__repr__ finally:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__repr__")
print("")
print("I have no idea where this is defined(in object?):")
print("%s" % foo)
print("{}".format(foo))
|
<commit_before><commit_msg>Create python example on formatting string<commit_after>#!/usr/bin/env python
"""Format string precedence"""
class Foo(object):
def __str__(self):
return "i am a __str__"
def __repr__(self):
return "representation of F"
def __format__(self, str_):
# wtf is this argument?
return "serious formatting brah %s" % str_
def foo(self):
return "foo"
if __name__ == "__main__":
print(" __format__ has highest precedence except for %:")
foo = Foo()
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__format__")
print("")
print("__str__ next:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__str__")
print("")
print("__repr__ finally:")
print("%s" % foo)
print("{}".format(foo))
delattr(Foo,"__repr__")
print("")
print("I have no idea where this is defined(in object?):")
print("%s" % foo)
print("{}".format(foo))
|
|
e3d9a92ac816b3406033d30d29eecf4c606e1e54
|
update_snapshot_version.py
|
update_snapshot_version.py
|
from sys import argv
from tempfile import mkstemp
from shutil import move
from os import remove, close
# import os, fileinput
services = ["alchemy", "conversation", "dialog", "discovery", \
"document-conversion", "language-translation", "language-translator",\
"natural-language-classifier", "personality-insights", "retrieve-and-rank",\
"speech-to-text", "text-to-speech", "tone-analyzer", "tradeoff-analytics",\
"visual-recognition"]
print ("What's the current version of the snapshot? Please answer in format 0.0.0")
orig_version = raw_input("> ")
orig_version_string = "version = '%s-SNAPSHOT'" %orig_version
print ("What version would you like to release?")
new_version = raw_input("> ")
new_version_string = "version = '%s-SNAPSHOT'" %new_version
def replace(file_path, pattern, subst):
# Create temp file_path
fh, abs_path = mkstemp()
found = False
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
if pattern in line:
found = True
if found:
print "Updating version to %s for service: %s\n" %(subst, file_path)
remove(file_path)
move(abs_path, file_path)
else:
print "\nDid not find version specified. Please check which version you want to replace.\n"
close(fh)
return found
for service in services:
file_path = "%s/build.gradle" %service
success = replace(file_path, orig_version_string, new_version_string)
if not success:
break
|
Add script to update snapshot version.
|
Add script to update snapshot version.
|
Python
|
apache-2.0
|
JoshSharpe/java-sdk,JoshSharpe/java-sdk,JoshSharpe/java-sdk
|
Add script to update snapshot version.
|
from sys import argv
from tempfile import mkstemp
from shutil import move
from os import remove, close
# import os, fileinput
services = ["alchemy", "conversation", "dialog", "discovery", \
"document-conversion", "language-translation", "language-translator",\
"natural-language-classifier", "personality-insights", "retrieve-and-rank",\
"speech-to-text", "text-to-speech", "tone-analyzer", "tradeoff-analytics",\
"visual-recognition"]
print ("What's the current version of the snapshot? Please answer in format 0.0.0")
orig_version = raw_input("> ")
orig_version_string = "version = '%s-SNAPSHOT'" %orig_version
print ("What version would you like to release?")
new_version = raw_input("> ")
new_version_string = "version = '%s-SNAPSHOT'" %new_version
def replace(file_path, pattern, subst):
# Create temp file_path
fh, abs_path = mkstemp()
found = False
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
if pattern in line:
found = True
if found:
print "Updating version to %s for service: %s\n" %(subst, file_path)
remove(file_path)
move(abs_path, file_path)
else:
print "\nDid not find version specified. Please check which version you want to replace.\n"
close(fh)
return found
for service in services:
file_path = "%s/build.gradle" %service
success = replace(file_path, orig_version_string, new_version_string)
if not success:
break
|
<commit_before><commit_msg>Add script to update snapshot version.<commit_after>
|
from sys import argv
from tempfile import mkstemp
from shutil import move
from os import remove, close
# import os, fileinput
services = ["alchemy", "conversation", "dialog", "discovery", \
"document-conversion", "language-translation", "language-translator",\
"natural-language-classifier", "personality-insights", "retrieve-and-rank",\
"speech-to-text", "text-to-speech", "tone-analyzer", "tradeoff-analytics",\
"visual-recognition"]
print ("What's the current version of the snapshot? Please answer in format 0.0.0")
orig_version = raw_input("> ")
orig_version_string = "version = '%s-SNAPSHOT'" %orig_version
print ("What version would you like to release?")
new_version = raw_input("> ")
new_version_string = "version = '%s-SNAPSHOT'" %new_version
def replace(file_path, pattern, subst):
# Create temp file_path
fh, abs_path = mkstemp()
found = False
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
if pattern in line:
found = True
if found:
print "Updating version to %s for service: %s\n" %(subst, file_path)
remove(file_path)
move(abs_path, file_path)
else:
print "\nDid not find version specified. Please check which version you want to replace.\n"
close(fh)
return found
for service in services:
file_path = "%s/build.gradle" %service
success = replace(file_path, orig_version_string, new_version_string)
if not success:
break
|
Add script to update snapshot version.from sys import argv
from tempfile import mkstemp
from shutil import move
from os import remove, close
# import os, fileinput
services = ["alchemy", "conversation", "dialog", "discovery", \
"document-conversion", "language-translation", "language-translator",\
"natural-language-classifier", "personality-insights", "retrieve-and-rank",\
"speech-to-text", "text-to-speech", "tone-analyzer", "tradeoff-analytics",\
"visual-recognition"]
print ("What's the current version of the snapshot? Please answer in format 0.0.0")
orig_version = raw_input("> ")
orig_version_string = "version = '%s-SNAPSHOT'" %orig_version
print ("What version would you like to release?")
new_version = raw_input("> ")
new_version_string = "version = '%s-SNAPSHOT'" %new_version
def replace(file_path, pattern, subst):
# Create temp file_path
fh, abs_path = mkstemp()
found = False
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
if pattern in line:
found = True
if found:
print "Updating version to %s for service: %s\n" %(subst, file_path)
remove(file_path)
move(abs_path, file_path)
else:
print "\nDid not find version specified. Please check which version you want to replace.\n"
close(fh)
return found
for service in services:
file_path = "%s/build.gradle" %service
success = replace(file_path, orig_version_string, new_version_string)
if not success:
break
|
<commit_before><commit_msg>Add script to update snapshot version.<commit_after>from sys import argv
from tempfile import mkstemp
from shutil import move
from os import remove, close
# import os, fileinput
services = ["alchemy", "conversation", "dialog", "discovery", \
"document-conversion", "language-translation", "language-translator",\
"natural-language-classifier", "personality-insights", "retrieve-and-rank",\
"speech-to-text", "text-to-speech", "tone-analyzer", "tradeoff-analytics",\
"visual-recognition"]
print ("What's the current version of the snapshot? Please answer in format 0.0.0")
orig_version = raw_input("> ")
orig_version_string = "version = '%s-SNAPSHOT'" %orig_version
print ("What version would you like to release?")
new_version = raw_input("> ")
new_version_string = "version = '%s-SNAPSHOT'" %new_version
def replace(file_path, pattern, subst):
# Create temp file_path
fh, abs_path = mkstemp()
found = False
with open(abs_path, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
if pattern in line:
found = True
if found:
print "Updating version to %s for service: %s\n" %(subst, file_path)
remove(file_path)
move(abs_path, file_path)
else:
print "\nDid not find version specified. Please check which version you want to replace.\n"
close(fh)
return found
for service in services:
file_path = "%s/build.gradle" %service
success = replace(file_path, orig_version_string, new_version_string)
if not success:
break
|
|
86a92e78634ca259daa9f7cc681fd8ffbf67aed6
|
miso-tables.py
|
miso-tables.py
|
"""
write MISO summary tables out to tidy format
"""
from argparse import ArgumentParser
import pandas as pd
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
logger = logging.getLogger("miso-compare")
def is_misosummary(filename):
return filename.endswith(".miso_summary")
def get_summarytype(filename):
stem = os.path.basename(os.path.splitext(filename)[0])
return stem.split("-")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("summarydir")
args = parser.parse_args()
outfile = os.path.join(args.summarydir, "combined-miso.csv")
misofiles = [x for x in os.listdir(args.summarydir) if is_misosummary(x)]
frame = pd.DataFrame()
reslist = []
for misofile in misofiles:
logger.info("Parsing %s." % misofile)
samplename, eventtype = get_summarytype(misofile)
misopath = os.path.join(args.summarydir, misofile)
df = pd.read_table(misopath, sep="\t", header=0)
df['samplename'] = samplename
df['eventtype'] = eventtype
reslist.append(df)
frame = pd.concat(reslist)
logger.info("Writing tidy MISO summaries to %s." % outfile)
frame.to_csv(outfile, index=False)
|
Make a tidy file of all miso summaries.
|
Make a tidy file of all miso summaries.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Make a tidy file of all miso summaries.
|
"""
write MISO summary tables out to tidy format
"""
from argparse import ArgumentParser
import pandas as pd
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
logger = logging.getLogger("miso-compare")
def is_misosummary(filename):
return filename.endswith(".miso_summary")
def get_summarytype(filename):
stem = os.path.basename(os.path.splitext(filename)[0])
return stem.split("-")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("summarydir")
args = parser.parse_args()
outfile = os.path.join(args.summarydir, "combined-miso.csv")
misofiles = [x for x in os.listdir(args.summarydir) if is_misosummary(x)]
frame = pd.DataFrame()
reslist = []
for misofile in misofiles:
logger.info("Parsing %s." % misofile)
samplename, eventtype = get_summarytype(misofile)
misopath = os.path.join(args.summarydir, misofile)
df = pd.read_table(misopath, sep="\t", header=0)
df['samplename'] = samplename
df['eventtype'] = eventtype
reslist.append(df)
frame = pd.concat(reslist)
logger.info("Writing tidy MISO summaries to %s." % outfile)
frame.to_csv(outfile, index=False)
|
<commit_before><commit_msg>Make a tidy file of all miso summaries.<commit_after>
|
"""
write MISO summary tables out to tidy format
"""
from argparse import ArgumentParser
import pandas as pd
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
logger = logging.getLogger("miso-compare")
def is_misosummary(filename):
return filename.endswith(".miso_summary")
def get_summarytype(filename):
stem = os.path.basename(os.path.splitext(filename)[0])
return stem.split("-")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("summarydir")
args = parser.parse_args()
outfile = os.path.join(args.summarydir, "combined-miso.csv")
misofiles = [x for x in os.listdir(args.summarydir) if is_misosummary(x)]
frame = pd.DataFrame()
reslist = []
for misofile in misofiles:
logger.info("Parsing %s." % misofile)
samplename, eventtype = get_summarytype(misofile)
misopath = os.path.join(args.summarydir, misofile)
df = pd.read_table(misopath, sep="\t", header=0)
df['samplename'] = samplename
df['eventtype'] = eventtype
reslist.append(df)
frame = pd.concat(reslist)
logger.info("Writing tidy MISO summaries to %s." % outfile)
frame.to_csv(outfile, index=False)
|
Make a tidy file of all miso summaries."""
write MISO summary tables out to tidy format
"""
from argparse import ArgumentParser
import pandas as pd
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
logger = logging.getLogger("miso-compare")
def is_misosummary(filename):
return filename.endswith(".miso_summary")
def get_summarytype(filename):
stem = os.path.basename(os.path.splitext(filename)[0])
return stem.split("-")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("summarydir")
args = parser.parse_args()
outfile = os.path.join(args.summarydir, "combined-miso.csv")
misofiles = [x for x in os.listdir(args.summarydir) if is_misosummary(x)]
frame = pd.DataFrame()
reslist = []
for misofile in misofiles:
logger.info("Parsing %s." % misofile)
samplename, eventtype = get_summarytype(misofile)
misopath = os.path.join(args.summarydir, misofile)
df = pd.read_table(misopath, sep="\t", header=0)
df['samplename'] = samplename
df['eventtype'] = eventtype
reslist.append(df)
frame = pd.concat(reslist)
logger.info("Writing tidy MISO summaries to %s." % outfile)
frame.to_csv(outfile, index=False)
|
<commit_before><commit_msg>Make a tidy file of all miso summaries.<commit_after>"""
write MISO summary tables out to tidy format
"""
from argparse import ArgumentParser
import pandas as pd
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
logger = logging.getLogger("miso-compare")
def is_misosummary(filename):
return filename.endswith(".miso_summary")
def get_summarytype(filename):
stem = os.path.basename(os.path.splitext(filename)[0])
return stem.split("-")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("summarydir")
args = parser.parse_args()
outfile = os.path.join(args.summarydir, "combined-miso.csv")
misofiles = [x for x in os.listdir(args.summarydir) if is_misosummary(x)]
frame = pd.DataFrame()
reslist = []
for misofile in misofiles:
logger.info("Parsing %s." % misofile)
samplename, eventtype = get_summarytype(misofile)
misopath = os.path.join(args.summarydir, misofile)
df = pd.read_table(misopath, sep="\t", header=0)
df['samplename'] = samplename
df['eventtype'] = eventtype
reslist.append(df)
frame = pd.concat(reslist)
logger.info("Writing tidy MISO summaries to %s." % outfile)
frame.to_csv(outfile, index=False)
|
|
10695f2ce7488184f8c0c306cf33e35533708ef4
|
games/management/commands/populate_popularity.py
|
games/management/commands/populate_popularity.py
|
"""Updates the popularity of all games"""
from django.core.management.base import BaseCommand
from django.db.models import Count
from games.models import Game
class Command(BaseCommand):
"""Command to update the popularity"""
help = "My shiny new management command."
def handle(self, *args, **options):
for game in Game.objects.all():
game.popularity = game.libraries.all().count()
game.save()
|
Add command to update popularity of games
|
Add command to update popularity of games
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add command to update popularity of games
|
"""Updates the popularity of all games"""
from django.core.management.base import BaseCommand
from django.db.models import Count
from games.models import Game
class Command(BaseCommand):
"""Command to update the popularity"""
help = "My shiny new management command."
def handle(self, *args, **options):
for game in Game.objects.all():
game.popularity = game.libraries.all().count()
game.save()
|
<commit_before><commit_msg>Add command to update popularity of games<commit_after>
|
"""Updates the popularity of all games"""
from django.core.management.base import BaseCommand
from django.db.models import Count
from games.models import Game
class Command(BaseCommand):
"""Command to update the popularity"""
help = "My shiny new management command."
def handle(self, *args, **options):
for game in Game.objects.all():
game.popularity = game.libraries.all().count()
game.save()
|
Add command to update popularity of games"""Updates the popularity of all games"""
from django.core.management.base import BaseCommand
from django.db.models import Count
from games.models import Game
class Command(BaseCommand):
"""Command to update the popularity"""
help = "My shiny new management command."
def handle(self, *args, **options):
for game in Game.objects.all():
game.popularity = game.libraries.all().count()
game.save()
|
<commit_before><commit_msg>Add command to update popularity of games<commit_after>"""Updates the popularity of all games"""
from django.core.management.base import BaseCommand
from django.db.models import Count
from games.models import Game
class Command(BaseCommand):
"""Command to update the popularity"""
help = "My shiny new management command."
def handle(self, *args, **options):
for game in Game.objects.all():
game.popularity = game.libraries.all().count()
game.save()
|
|
dab3b241552734d6810013f53d55c2fec3e1e512
|
CodeFights/createDie.py
|
CodeFights/createDie.py
|
#!/usr/local/bin/python
# Code Fights Create Die Problem
import random
def createDie(seed, n):
class Die(object):
pass
class Game(object):
die = Die(seed, n)
return Game.die
def main():
tests = [
[37237, 5, 3],
[36706, 12, 9],
[21498, 10, 10],
[2998, 6, 3],
[5509, 10, 4]
]
for t in tests:
res = createDie(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: createDie({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: createDie({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights create die problem
|
Set up Code Fights create die problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights create die problem
|
#!/usr/local/bin/python
# Code Fights Create Die Problem
import random
def createDie(seed, n):
class Die(object):
pass
class Game(object):
die = Die(seed, n)
return Game.die
def main():
tests = [
[37237, 5, 3],
[36706, 12, 9],
[21498, 10, 10],
[2998, 6, 3],
[5509, 10, 4]
]
for t in tests:
res = createDie(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: createDie({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: createDie({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights create die problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Create Die Problem
import random
def createDie(seed, n):
class Die(object):
pass
class Game(object):
die = Die(seed, n)
return Game.die
def main():
tests = [
[37237, 5, 3],
[36706, 12, 9],
[21498, 10, 10],
[2998, 6, 3],
[5509, 10, 4]
]
for t in tests:
res = createDie(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: createDie({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: createDie({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights create die problem#!/usr/local/bin/python
# Code Fights Create Die Problem
import random
def createDie(seed, n):
class Die(object):
pass
class Game(object):
die = Die(seed, n)
return Game.die
def main():
tests = [
[37237, 5, 3],
[36706, 12, 9],
[21498, 10, 10],
[2998, 6, 3],
[5509, 10, 4]
]
for t in tests:
res = createDie(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: createDie({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: createDie({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights create die problem<commit_after>#!/usr/local/bin/python
# Code Fights Create Die Problem
import random
def createDie(seed, n):
class Die(object):
pass
class Game(object):
die = Die(seed, n)
return Game.die
def main():
tests = [
[37237, 5, 3],
[36706, 12, 9],
[21498, 10, 10],
[2998, 6, 3],
[5509, 10, 4]
]
for t in tests:
res = createDie(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: createDie({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: createDie({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
ca32188cf154ed3f32aeb82ef15d42aee472cc77
|
tests/test_data_generator.py
|
tests/test_data_generator.py
|
from chai import Chai
from datetime import datetime
from dfaker.data_generator import dfaker
class Test_Pump_Settings(Chai):
def test_smoke(self):
pass
#def test_type(self):
#start_time = datetime(2015, 1, 1, 0, 0, 0)
#zone_name = 'US/Pacific'
#pump_name = 'Medtronic'
#settings_list = make_pump_settings(start_time, zone_name, pump_name)
#settings_data = settings_list[0]
#self.assert_equals(settings_data['type'], 'pumpSettings')
|
Add test for last commit
|
Add test for last commit
|
Python
|
bsd-2-clause
|
tidepool-org/dfaker
|
Add test for last commit
|
from chai import Chai
from datetime import datetime
from dfaker.data_generator import dfaker
class Test_Pump_Settings(Chai):
def test_smoke(self):
pass
#def test_type(self):
#start_time = datetime(2015, 1, 1, 0, 0, 0)
#zone_name = 'US/Pacific'
#pump_name = 'Medtronic'
#settings_list = make_pump_settings(start_time, zone_name, pump_name)
#settings_data = settings_list[0]
#self.assert_equals(settings_data['type'], 'pumpSettings')
|
<commit_before><commit_msg>Add test for last commit<commit_after>
|
from chai import Chai
from datetime import datetime
from dfaker.data_generator import dfaker
class Test_Pump_Settings(Chai):
def test_smoke(self):
pass
#def test_type(self):
#start_time = datetime(2015, 1, 1, 0, 0, 0)
#zone_name = 'US/Pacific'
#pump_name = 'Medtronic'
#settings_list = make_pump_settings(start_time, zone_name, pump_name)
#settings_data = settings_list[0]
#self.assert_equals(settings_data['type'], 'pumpSettings')
|
Add test for last commitfrom chai import Chai
from datetime import datetime
from dfaker.data_generator import dfaker
class Test_Pump_Settings(Chai):
def test_smoke(self):
pass
#def test_type(self):
#start_time = datetime(2015, 1, 1, 0, 0, 0)
#zone_name = 'US/Pacific'
#pump_name = 'Medtronic'
#settings_list = make_pump_settings(start_time, zone_name, pump_name)
#settings_data = settings_list[0]
#self.assert_equals(settings_data['type'], 'pumpSettings')
|
<commit_before><commit_msg>Add test for last commit<commit_after>from chai import Chai
from datetime import datetime
from dfaker.data_generator import dfaker
class Test_Pump_Settings(Chai):
def test_smoke(self):
pass
#def test_type(self):
#start_time = datetime(2015, 1, 1, 0, 0, 0)
#zone_name = 'US/Pacific'
#pump_name = 'Medtronic'
#settings_list = make_pump_settings(start_time, zone_name, pump_name)
#settings_data = settings_list[0]
#self.assert_equals(settings_data['type'], 'pumpSettings')
|
|
af83fd1f043696f5408c878306d4cc4928af97ba
|
tests/test_iterable_event.py
|
tests/test_iterable_event.py
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
class TestClassIterable(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_start_stop_loc(self):
test_starts = []
test_stops = []
for event in self.events:
test_starts.append(event.start)
test_stops.append(event.stop)
npt.assert_array_equal(self.events.starts, test_starts)
npt.assert_array_equal(self.events.stops, test_stops)
def test_durations(self):
test_durations = []
for event in self.events:
test_durations.append(event.duration)
npt.assert_array_equal(self.events.durations, test_durations)
|
Add unit tests for class as iterable
|
Add unit tests for class as iterable
|
Python
|
mit
|
rwhitt2049/nimble,rwhitt2049/trouve
|
Add unit tests for class as iterable
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
class TestClassIterable(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_start_stop_loc(self):
test_starts = []
test_stops = []
for event in self.events:
test_starts.append(event.start)
test_stops.append(event.stop)
npt.assert_array_equal(self.events.starts, test_starts)
npt.assert_array_equal(self.events.stops, test_stops)
def test_durations(self):
test_durations = []
for event in self.events:
test_durations.append(event.duration)
npt.assert_array_equal(self.events.durations, test_durations)
|
<commit_before><commit_msg>Add unit tests for class as iterable<commit_after>
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
class TestClassIterable(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_start_stop_loc(self):
test_starts = []
test_stops = []
for event in self.events:
test_starts.append(event.start)
test_stops.append(event.stop)
npt.assert_array_equal(self.events.starts, test_starts)
npt.assert_array_equal(self.events.stops, test_stops)
def test_durations(self):
test_durations = []
for event in self.events:
test_durations.append(event.duration)
npt.assert_array_equal(self.events.durations, test_durations)
|
Add unit tests for class as iterablefrom unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
class TestClassIterable(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_start_stop_loc(self):
test_starts = []
test_stops = []
for event in self.events:
test_starts.append(event.start)
test_stops.append(event.stop)
npt.assert_array_equal(self.events.starts, test_starts)
npt.assert_array_equal(self.events.stops, test_stops)
def test_durations(self):
test_durations = []
for event in self.events:
test_durations.append(event.duration)
npt.assert_array_equal(self.events.durations, test_durations)
|
<commit_before><commit_msg>Add unit tests for class as iterable<commit_after>from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
class TestClassIterable(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_start_stop_loc(self):
test_starts = []
test_stops = []
for event in self.events:
test_starts.append(event.start)
test_stops.append(event.stop)
npt.assert_array_equal(self.events.starts, test_starts)
npt.assert_array_equal(self.events.stops, test_stops)
def test_durations(self):
test_durations = []
for event in self.events:
test_durations.append(event.duration)
npt.assert_array_equal(self.events.durations, test_durations)
|
|
0eb9e9731e8f998abe8364fab25f6da01d57e93a
|
tests/test_util.py
|
tests/test_util.py
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
Add unit tests for the methods in util
|
Add unit tests for the methods in util
|
Python
|
mit
|
bauerj/electrumx,bauerj/electrumx,Groestlcoin/electrumx-grs,shsmith/electrumx,Crowndev/electrumx,erasmospunk/electrumx,Groestlcoin/electrumx-grs,thelazier/electrumx,shsmith/electrumx,thelazier/electrumx,Crowndev/electrumx,erasmospunk/electrumx
|
Add unit tests for the methods in util
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
<commit_before><commit_msg>Add unit tests for the methods in util<commit_after>
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
Add unit tests for the methods in utilfrom lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
<commit_before><commit_msg>Add unit tests for the methods in util<commit_after>from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
|
461c008ebb5b1c048fc8117ee1730e84ee3d2a93
|
osf/migrations/0169_merge_20190618_1429.py
|
osf/migrations/0169_merge_20190618_1429.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-18 14:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0163_populate_conference_submissions'),
('osf', '0168_merge_20190610_2308'),
]
operations = [
]
|
Add merge migration - submissions migrations with develop (guardian).
|
Add merge migration - submissions migrations with develop (guardian).
|
Python
|
apache-2.0
|
Johnetordoff/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,felliott/osf.io,mfraezz/osf.io,felliott/osf.io,cslzchen/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,cslzchen/osf.io,mattclark/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,adlius/osf.io,baylee-d/osf.io,mfraezz/osf.io,baylee-d/osf.io,aaxelb/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mattclark/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,adlius/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,mfraezz/osf.io,aaxelb/osf.io,aaxelb/osf.io,mattclark/osf.io,adlius/osf.io,adlius/osf.io,felliott/osf.io,cslzchen/osf.io
|
Add merge migration - submissions migrations with develop (guardian).
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-18 14:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0163_populate_conference_submissions'),
('osf', '0168_merge_20190610_2308'),
]
operations = [
]
|
<commit_before><commit_msg>Add merge migration - submissions migrations with develop (guardian).<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-18 14:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0163_populate_conference_submissions'),
('osf', '0168_merge_20190610_2308'),
]
operations = [
]
|
Add merge migration - submissions migrations with develop (guardian).# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-18 14:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0163_populate_conference_submissions'),
('osf', '0168_merge_20190610_2308'),
]
operations = [
]
|
<commit_before><commit_msg>Add merge migration - submissions migrations with develop (guardian).<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-18 14:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0163_populate_conference_submissions'),
('osf', '0168_merge_20190610_2308'),
]
operations = [
]
|
|
63a161f596ab166d54e525eaa6185067ea76c891
|
tornado/test/run_pyversion_tests.py
|
tornado/test/run_pyversion_tests.py
|
#!/usr/bin/env python
"""Runs the tornado test suite with all supported python interpreters."""
import os
import subprocess
import sys
INTERPRETERS = [
"python2.5",
"python2.6",
"python2.7",
"auto2to3",
"pypy",
]
def exists_on_path(filename):
for dir in os.environ["PATH"].split(":"):
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def main():
for interpreter in INTERPRETERS:
print "=================== %s =======================" % interpreter
if not exists_on_path(interpreter):
print "Interpreter not found, skipping..."
continue
args = [interpreter, "-m", "tornado.test.runtests"] + sys.argv[1:]
ret = subprocess.call(args)
if ret != 0:
print "Tests on %s failed with exit code %d" % (interpreter, ret)
sys.exit(ret)
print "All tests passed"
if __name__ == "__main__":
main()
|
Add script to run test suite with multiple python versions at once
|
Add script to run test suite with multiple python versions at once
|
Python
|
apache-2.0
|
djt5019/tornado,fengshao0907/tornado,QuanZag/tornado,bywbilly/tornado,frtmelody/tornado,ovidiucp/tornado,ifduyue/tornado,cyrilMargaria/tornado,liqueur/tornado,allenl203/tornado,gitchs/tornado,AlphaStaxLLC/tornado,zguangyu/tornado,ListFranz/tornado,tornadoweb/tornado,kevinge314gh/tornado,icejoywoo/tornado,0xkag/tornado,frtmelody/tornado,Windsooon/tornado,djt5019/tornado,wujuguang/tornado,xinyu7/tornado,icejoywoo/tornado,nbargnesi/tornado,Geoion/tornado,Snamint/tornado,noxiouz/tornado,insflow/tornado,VShangxiao/tornado,mr-ping/tornado,mlyundin/tornado,VShangxiao/tornado,coderhaoxin/tornado,gitchs/tornado,mivade/tornado,ColorFuzzy/tornado,wxhzk/tornado-1,elijah513/tornado,304471720/tornado,dongpinglai/my_tornado,nordaux/tornado,Geoion/tornado,dsseter/tornado,jparise/tornado,anjan-srivastava/tornado,bdarnell/tornado,xinyu7/tornado,Fydot/tornado,LTD-Beget/tornado,Geoion/tornado,Drooids/tornado,yuyangit/tornado,mehmetkose/tornado,kevinge314gh/tornado,ymero/tornado,importcjj/tornado,z-fork/tornado,cyrusin/tornado,gwillem/tornado,lujinda/tornado,chenxiaba/tornado,Fydot/tornado,kangbiao/tornado,yangkf1985/tornado,Batterfii/tornado,Drooids/tornado,felixonmars/tornado,kangbiao/tornado,mehmetkose/tornado,shaohung001/tornado,codeb2cc/tornado,mlyundin/tornado,ListFranz/tornado,akalipetis/tornado,fengsp/tornado,cyrusin/tornado,ydaniv/tornado,allenl203/tornado,ovidiucp/tornado,leekchan/tornado_test,shaohung001/tornado,elelianghh/tornado,Windsooon/tornado,takeshineshiro/tornado,dsseter/tornado,Snamint/tornado,arthurdarcet/tornado,Drooids/tornado,lsanotes/tornado,wechasing/tornado,jparise/tornado,zguangyu/tornado,legnaleurc/tornado,wujuguang/tornado,futurechallenger/tornado,gwillem/tornado,NoyaInRain/tornado,Acidburn0zzz/tornado,insflow/tornado,wechasing/tornado,hzruandd/tornado,eXcomm/tornado,fengsp/tornado,jsjohnst/tornado,drewmiller/tornado,eklitzke/tornado,ColorFuzzy/tornado,0xkag/tornado,tianyk/tornado-research,djt5019/tornado,mr-ping/tornado,yuezhonghua/tornado,yangkf1985/tornado,xinyu7/tornado,wsyzxcn/tornado,bufferx/tornado,importcjj/tornado,nephics/tornado,Windsooon/tornado,liqueur/tornado,codecov/tornado,sevenguin/tornado,coderhaoxin/tornado,shashankbassi92/tornado,ms7s/tornado,icejoywoo/tornado,hhru/tornado,MjAbuz/tornado,Acidburn0zzz/tornado,Lancher/tornado,gitchs/tornado,mlyundin/tornado,InverseLina/tornado,icejoywoo/tornado,ZhuPeng/tornado,dongpinglai/my_tornado,0xkag/tornado,VShangxiao/tornado,ms7s/tornado,bufferx/tornado,Aaron1992/tornado,zhuochenKIDD/tornado,anandology/tornado,drewmiller/tornado,MjAbuz/tornado,mr-ping/tornado,yuezhonghua/tornado,tornadoweb/tornado,legnaleurc/tornado,Lancher/tornado,elijah513/tornado,0x73/tornado,kaushik94/tornado,kangbiao/tornado,InverseLina/tornado,ovidiucp/tornado,jonashagstedt/tornado,kaushik94/tornado,BencoLee/tornado,anandology/tornado,InverseLina/tornado,NoyaInRain/tornado,bufferx/tornado,304471720/tornado,kevinge314gh/tornado,liqueur/tornado,jonashagstedt/tornado,Polyconseil/tornado,futurechallenger/tornado,arthurdarcet/tornado,dsseter/tornado,QuanZag/tornado,sunjeammy/tornado,bywbilly/tornado,sxfmol/tornado,nordaux/tornado,kevinge314gh/tornado,takeshineshiro/tornado,lsanotes/tornado,andyaguiar/tornado,elelianghh/tornado,zguangyu/tornado,ColorFuzzy/tornado,ajdavis/tornado,anandology/tornado,zhuochenKIDD/tornado,sevenguin/tornado,ajdavis/tornado,shashankbassi92/tornado,obsh/tornado,ydaniv/tornado,allenl203/tornado,codecov/tornado,elelianghh/tornado,hzruandd/tornado,Snamint/tornado,sxfmol/tornado,anandology/tornado,leekchan/tornado_test,ifduyue/tornado,AlphaStaxLLC/tornado,Windsooon/tornado,Drooids/tornado,yuyangit/tornado,johan--/tornado,SuminAndrew/tornado,djt5019/tornado,erichuang1994/tornado,shashankbassi92/tornado,wujuguang/tornado,QuanZag/tornado,jarrahwu/tornado,wxhzk/tornado-1,anjan-srivastava/tornado,jsjohnst/tornado,Snamint/tornado,nbargnesi/tornado,ajdavis/tornado,Polyconseil/tornado,bdarnell/tornado,dsseter/tornado,jehiah/tornado,whip112/tornado,Callwoola/tornado,ovidiucp/tornado,mivade/tornado,lilydjwg/tornado,lujinda/tornado,kippandrew/tornado,Polyconseil/tornado,eklitzke/tornado,dongpinglai/my_tornado,coderhaoxin/tornado,z-fork/tornado,icejoywoo/tornado,erichuang1994/tornado,304471720/tornado,importcjj/tornado,takeshineshiro/tornado,yangkf1985/tornado,andyaguiar/tornado,0x73/tornado,Geoion/tornado,pombredanne/tornado,chenxiaba/tornado,Batterfii/tornado,nephics/tornado,AlphaStaxLLC/tornado,obsh/tornado,lsanotes/tornado,zguangyu/tornado,gitchs/tornado,jonashagstedt/tornado,icejoywoo/tornado,mr-ping/tornado,mehmetkose/tornado,whip112/tornado,bywbilly/tornado,NoyaInRain/tornado,fengsp/tornado,erichuang1994/tornado,johan--/tornado,ydaniv/tornado,lujinda/tornado,hzruandd/tornado,yuezhonghua/tornado,jampp/tornado,johan--/tornado,fengshao0907/tornado,ymero/tornado,ifduyue/tornado,ZhuPeng/tornado,anandology/tornado,Polyconseil/tornado,wujuguang/tornado,kippandrew/tornado,ColorFuzzy/tornado,yangkf1985/tornado,andyaguiar/tornado,ListFranz/tornado,ovidiucp/tornado,jonashagstedt/tornado,djt5019/tornado,sunjeammy/tornado,coderhaoxin/tornado,pombredanne/tornado,jampp/tornado,gitchs/tornado,0xkag/tornado,kippandrew/tornado,mr-ping/tornado,z-fork/tornado,ydaniv/tornado,bdarnell/tornado,shashankbassi92/tornado,zhuochenKIDD/tornado,drewmiller/tornado,nbargnesi/tornado,sevenguin/tornado,mehmetkose/tornado,kangbiao/tornado,fengshao0907/tornado,lilydjwg/tornado,wsyzxcn/tornado,lilydjwg/tornado,mivade/tornado,eXcomm/tornado,BencoLee/tornado,ListFranz/tornado,futurechallenger/tornado,frtmelody/tornado,wechasing/tornado,felixonmars/tornado,tornadoweb/tornado,dsseter/tornado,ajdavis/tornado,tianyk/tornado-research,hzruandd/tornado,cyrilMargaria/tornado,jparise/tornado,xinyu7/tornado,arthurdarcet/tornado,andyaguiar/tornado,nephics/tornado,gwillem/tornado,anandology/tornado,jsjohnst/tornado,0x73/tornado,eXcomm/tornado,drewmiller/tornado,wsyzxcn/tornado,bdarnell/tornado,InverseLina/tornado,kippandrew/tornado,ms7s/tornado,anjan-srivastava/tornado,tianyk/tornado-research,lsanotes/tornado,nbargnesi/tornado,LTD-Beget/tornado,wujuguang/tornado,gwillem/tornado,codeb2cc/tornado,whip112/tornado,jparise/tornado,chenxiaba/tornado,ZhuPeng/tornado,eklitzke/tornado,ubear/tornado,Polyconseil/tornado,NoyaInRain/tornado,noxiouz/tornado,Acidburn0zzz/tornado,wsyzxcn/tornado,wxhzk/tornado-1,whip112/tornado,eXcomm/tornado,ydaniv/tornado,kevinge314gh/tornado,pombredanne/tornado,elelianghh/tornado,erichuang1994/tornado,mlyundin/tornado,z-fork/tornado,AlphaStaxLLC/tornado,Fydot/tornado,tornadoweb/tornado,legnaleurc/tornado,ovidiucp/tornado,wxhzk/tornado-1,jonashagstedt/tornado,ifduyue/tornado,z-fork/tornado,tianyk/tornado-research,codecov/tornado,leekchan/tornado_test,shaohung001/tornado,jampp/tornado,Snamint/tornado,Aaron1992/tornado,wechasing/tornado,hzruandd/tornado,arthurdarcet/tornado,Windsooon/tornado,frtmelody/tornado,akalipetis/tornado,fengsp/tornado,cyrusin/tornado,johan--/tornado,yuyangit/tornado,kippandrew/tornado,jarrahwu/tornado,LTD-Beget/tornado,SuminAndrew/tornado,lujinda/tornado,anjan-srivastava/tornado,VShangxiao/tornado,wxhzk/tornado-1,sunjeammy/tornado,BencoLee/tornado,jehiah/tornado,leekchan/tornado_test,codeb2cc/tornado,elijah513/tornado,tianyk/tornado-research,nordaux/tornado,yangkf1985/tornado,ubear/tornado,nordaux/tornado,ZhuPeng/tornado,pombredanne/tornado,insflow/tornado,kangbiao/tornado,andyaguiar/tornado,nbargnesi/tornado,sevenguin/tornado,yangkf1985/tornado,InverseLina/tornado,codeb2cc/tornado,ubear/tornado,0x73/tornado,akalipetis/tornado,Fydot/tornado,mivade/tornado,lujinda/tornado,frtmelody/tornado,akalipetis/tornado,fengshao0907/tornado,liqueur/tornado,jampp/tornado,hzruandd/tornado,InverseLina/tornado,ymero/tornado,takeshineshiro/tornado,NoyaInRain/tornado,lsanotes/tornado,jampp/tornado,Batterfii/tornado,cyrilMargaria/tornado,wechasing/tornado,yuyangit/tornado,ZhuPeng/tornado,dongpinglai/my_tornado,jehiah/tornado,allenl203/tornado,legnaleurc/tornado,Batterfii/tornado,fengsp/tornado,sevenguin/tornado,noxiouz/tornado,futurechallenger/tornado,MjAbuz/tornado,cyrilMargaria/tornado,ubear/tornado,0xkag/tornado,importcjj/tornado,VShangxiao/tornado,LTD-Beget/tornado,pombredanne/tornado,Callwoola/tornado,Batterfii/tornado,ms7s/tornado,obsh/tornado,ms7s/tornado,mivade/tornado,gitchs/tornado,frtmelody/tornado,304471720/tornado,felixonmars/tornado,importcjj/tornado,sunjeammy/tornado,obsh/tornado,bdarnell/tornado,ms7s/tornado,304471720/tornado,johan--/tornado,xinyu7/tornado,Polyconseil/tornado,Lancher/tornado,cyrusin/tornado,arthurdarcet/tornado,ifduyue/tornado,jsjohnst/tornado,obsh/tornado,Callwoola/tornado,wxhzk/tornado-1,liqueur/tornado,andyaguiar/tornado,anjan-srivastava/tornado,coderhaoxin/tornado,leekchan/tornado_test,kippandrew/tornado,dongpinglai/my_tornado,elijah513/tornado,zhuochenKIDD/tornado,johan--/tornado,drewmiller/tornado,mlyundin/tornado,codeb2cc/tornado,hhru/tornado,QuanZag/tornado,eklitzke/tornado,ZhuPeng/tornado,jarrahwu/tornado,cyrilMargaria/tornado,nordaux/tornado,sxfmol/tornado,Drooids/tornado,QuanZag/tornado,zguangyu/tornado,bywbilly/tornado,elijah513/tornado,wsyzxcn/tornado,chenxiaba/tornado,akalipetis/tornado,jparise/tornado,ListFranz/tornado,futurechallenger/tornado,bywbilly/tornado,VShangxiao/tornado,ymero/tornado,Acidburn0zzz/tornado,eklitzke/tornado,kevinge314gh/tornado,SuminAndrew/tornado,elelianghh/tornado,ajdavis/tornado,NoyaInRain/tornado,yuezhonghua/tornado,djt5019/tornado,Aaron1992/tornado,ubear/tornado,shaohung001/tornado,noxiouz/tornado,sxfmol/tornado,Windsooon/tornado,Lancher/tornado,ListFranz/tornado,kaushik94/tornado,jampp/tornado,zhuochenKIDD/tornado,elelianghh/tornado,sunjeammy/tornado,mehmetkose/tornado,mr-ping/tornado,lujinda/tornado,anjan-srivastava/tornado,jsjohnst/tornado,hhru/tornado,whip112/tornado,allenl203/tornado,whip112/tornado,importcjj/tornado,shaohung001/tornado,obsh/tornado,ydaniv/tornado,Geoion/tornado,insflow/tornado,chenxiaba/tornado,cyrilMargaria/tornado,erichuang1994/tornado,Drooids/tornado,yuezhonghua/tornado,Geoion/tornado,wsyzxcn/tornado,gwillem/tornado,noxiouz/tornado,BencoLee/tornado,insflow/tornado,lsanotes/tornado,bufferx/tornado,wsyzxcn/tornado,Snamint/tornado,jarrahwu/tornado,zhuochenKIDD/tornado,cyrusin/tornado,fengshao0907/tornado,elijah513/tornado,yuezhonghua/tornado,ymero/tornado,Batterfii/tornado,AlphaStaxLLC/tornado,takeshineshiro/tornado,MjAbuz/tornado,sxfmol/tornado,BencoLee/tornado,AlphaStaxLLC/tornado,chenxiaba/tornado,codecov/tornado,bywbilly/tornado,fengsp/tornado,shashankbassi92/tornado,felixonmars/tornado,fengshao0907/tornado,shashankbassi92/tornado,MjAbuz/tornado,jehiah/tornado,legnaleurc/tornado,Callwoola/tornado,BencoLee/tornado,felixonmars/tornado,ymero/tornado,erichuang1994/tornado,ColorFuzzy/tornado,Aaron1992/tornado,futurechallenger/tornado,Lancher/tornado,shaohung001/tornado,jarrahwu/tornado,jehiah/tornado,ubear/tornado,nephics/tornado,QuanZag/tornado,yuyangit/tornado,coderhaoxin/tornado,lilydjwg/tornado,hhru/tornado,kangbiao/tornado,LTD-Beget/tornado,Callwoola/tornado,noxiouz/tornado,kaushik94/tornado,dsseter/tornado,Fydot/tornado,mehmetkose/tornado,SuminAndrew/tornado,hhru/tornado,arthurdarcet/tornado,codeb2cc/tornado,wechasing/tornado,Aaron1992/tornado,z-fork/tornado,jparise/tornado,304471720/tornado,sxfmol/tornado,Callwoola/tornado,kaushik94/tornado,eXcomm/tornado,liqueur/tornado,Fydot/tornado,MjAbuz/tornado,akalipetis/tornado,Acidburn0zzz/tornado,dongpinglai/my_tornado,gwillem/tornado,Acidburn0zzz/tornado,jarrahwu/tornado,0x73/tornado,ColorFuzzy/tornado,drewmiller/tornado,jsjohnst/tornado,mlyundin/tornado,takeshineshiro/tornado,insflow/tornado,pombredanne/tornado,cyrusin/tornado,nephics/tornado,zguangyu/tornado,nbargnesi/tornado,LTD-Beget/tornado,eXcomm/tornado,SuminAndrew/tornado,sevenguin/tornado,bufferx/tornado,xinyu7/tornado
|
Add script to run test suite with multiple python versions at once
|
#!/usr/bin/env python
"""Runs the tornado test suite with all supported python interpreters."""
import os
import subprocess
import sys
INTERPRETERS = [
"python2.5",
"python2.6",
"python2.7",
"auto2to3",
"pypy",
]
def exists_on_path(filename):
for dir in os.environ["PATH"].split(":"):
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def main():
for interpreter in INTERPRETERS:
print "=================== %s =======================" % interpreter
if not exists_on_path(interpreter):
print "Interpreter not found, skipping..."
continue
args = [interpreter, "-m", "tornado.test.runtests"] + sys.argv[1:]
ret = subprocess.call(args)
if ret != 0:
print "Tests on %s failed with exit code %d" % (interpreter, ret)
sys.exit(ret)
print "All tests passed"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to run test suite with multiple python versions at once<commit_after>
|
#!/usr/bin/env python
"""Runs the tornado test suite with all supported python interpreters."""
import os
import subprocess
import sys
INTERPRETERS = [
"python2.5",
"python2.6",
"python2.7",
"auto2to3",
"pypy",
]
def exists_on_path(filename):
for dir in os.environ["PATH"].split(":"):
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def main():
for interpreter in INTERPRETERS:
print "=================== %s =======================" % interpreter
if not exists_on_path(interpreter):
print "Interpreter not found, skipping..."
continue
args = [interpreter, "-m", "tornado.test.runtests"] + sys.argv[1:]
ret = subprocess.call(args)
if ret != 0:
print "Tests on %s failed with exit code %d" % (interpreter, ret)
sys.exit(ret)
print "All tests passed"
if __name__ == "__main__":
main()
|
Add script to run test suite with multiple python versions at once#!/usr/bin/env python
"""Runs the tornado test suite with all supported python interpreters."""
import os
import subprocess
import sys
INTERPRETERS = [
"python2.5",
"python2.6",
"python2.7",
"auto2to3",
"pypy",
]
def exists_on_path(filename):
for dir in os.environ["PATH"].split(":"):
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def main():
for interpreter in INTERPRETERS:
print "=================== %s =======================" % interpreter
if not exists_on_path(interpreter):
print "Interpreter not found, skipping..."
continue
args = [interpreter, "-m", "tornado.test.runtests"] + sys.argv[1:]
ret = subprocess.call(args)
if ret != 0:
print "Tests on %s failed with exit code %d" % (interpreter, ret)
sys.exit(ret)
print "All tests passed"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to run test suite with multiple python versions at once<commit_after>#!/usr/bin/env python
"""Runs the tornado test suite with all supported python interpreters."""
import os
import subprocess
import sys
INTERPRETERS = [
"python2.5",
"python2.6",
"python2.7",
"auto2to3",
"pypy",
]
def exists_on_path(filename):
for dir in os.environ["PATH"].split(":"):
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def main():
for interpreter in INTERPRETERS:
print "=================== %s =======================" % interpreter
if not exists_on_path(interpreter):
print "Interpreter not found, skipping..."
continue
args = [interpreter, "-m", "tornado.test.runtests"] + sys.argv[1:]
ret = subprocess.call(args)
if ret != 0:
print "Tests on %s failed with exit code %d" % (interpreter, ret)
sys.exit(ret)
print "All tests passed"
if __name__ == "__main__":
main()
|
|
6cc9f6b7bcb77ca8dc4a0904bf2cafd01d60028b
|
turbustat/simulator/threeD_pspec.py
|
turbustat/simulator/threeD_pspec.py
|
import numpy as np
def threeD_pspec(arr):
'''
Return a 1D power spectrum from a 3D array.
Parameters
----------
arr : `~numpy.ndarray`
Three dimensional array.
Returns
-------
freq_bins : `~numpy.ndarray`
Radial frequency bins.
ps1D : `~numpy.ndarray`
One-dimensional azimuthally-averaged power spectrum.
ps1D_stderr : `~numpy.ndarray`
Standard deviation of `ps1D`.
'''
if arr.ndim != 3:
raise ValueError("arr must have three dimensions.")
ps3D = np.abs(np.fft.fftn(arr))**2
xfreq = np.fft.fftfreq(arr.shape[0])
yfreq = np.fft.fftfreq(arr.shape[1])
zfreq = np.fft.fftfreq(arr.shape[2])
xx, yy, zz = np.meshgrid(xfreq, yfreq, zfreq, indexing='ij')
rr = np.sqrt(xx**2 + yy**2 + zz**2)
freq_min = 1 / float(max(arr.shape))
freq_max = 1 / 2.
freq_bins = np.arange(freq_min, freq_max, freq_min)
whichbin = np.digitize(rr.flat, freq_bins)
ncount = np.bincount(whichbin)
ps1D = np.zeros(len(ncount) - 1)
ps1D_stderr = np.zeros(len(ncount) - 1)
for n in range(1, len(ncount)):
ps1D[n - 1] = np.mean(ps3D.flat[whichbin == n])
ps1D_stderr[n - 1] = np.std(ps3D.flat[whichbin == n])
return freq_bins, ps1D, ps1D_stderr
|
Add 3D power spectrum for comparing with generated fields
|
Add 3D power spectrum for comparing with generated fields
|
Python
|
mit
|
e-koch/TurbuStat,Astroua/TurbuStat
|
Add 3D power spectrum for comparing with generated fields
|
import numpy as np
def threeD_pspec(arr):
'''
Return a 1D power spectrum from a 3D array.
Parameters
----------
arr : `~numpy.ndarray`
Three dimensional array.
Returns
-------
freq_bins : `~numpy.ndarray`
Radial frequency bins.
ps1D : `~numpy.ndarray`
One-dimensional azimuthally-averaged power spectrum.
ps1D_stderr : `~numpy.ndarray`
Standard deviation of `ps1D`.
'''
if arr.ndim != 3:
raise ValueError("arr must have three dimensions.")
ps3D = np.abs(np.fft.fftn(arr))**2
xfreq = np.fft.fftfreq(arr.shape[0])
yfreq = np.fft.fftfreq(arr.shape[1])
zfreq = np.fft.fftfreq(arr.shape[2])
xx, yy, zz = np.meshgrid(xfreq, yfreq, zfreq, indexing='ij')
rr = np.sqrt(xx**2 + yy**2 + zz**2)
freq_min = 1 / float(max(arr.shape))
freq_max = 1 / 2.
freq_bins = np.arange(freq_min, freq_max, freq_min)
whichbin = np.digitize(rr.flat, freq_bins)
ncount = np.bincount(whichbin)
ps1D = np.zeros(len(ncount) - 1)
ps1D_stderr = np.zeros(len(ncount) - 1)
for n in range(1, len(ncount)):
ps1D[n - 1] = np.mean(ps3D.flat[whichbin == n])
ps1D_stderr[n - 1] = np.std(ps3D.flat[whichbin == n])
return freq_bins, ps1D, ps1D_stderr
|
<commit_before><commit_msg>Add 3D power spectrum for comparing with generated fields<commit_after>
|
import numpy as np
def threeD_pspec(arr):
'''
Return a 1D power spectrum from a 3D array.
Parameters
----------
arr : `~numpy.ndarray`
Three dimensional array.
Returns
-------
freq_bins : `~numpy.ndarray`
Radial frequency bins.
ps1D : `~numpy.ndarray`
One-dimensional azimuthally-averaged power spectrum.
ps1D_stderr : `~numpy.ndarray`
Standard deviation of `ps1D`.
'''
if arr.ndim != 3:
raise ValueError("arr must have three dimensions.")
ps3D = np.abs(np.fft.fftn(arr))**2
xfreq = np.fft.fftfreq(arr.shape[0])
yfreq = np.fft.fftfreq(arr.shape[1])
zfreq = np.fft.fftfreq(arr.shape[2])
xx, yy, zz = np.meshgrid(xfreq, yfreq, zfreq, indexing='ij')
rr = np.sqrt(xx**2 + yy**2 + zz**2)
freq_min = 1 / float(max(arr.shape))
freq_max = 1 / 2.
freq_bins = np.arange(freq_min, freq_max, freq_min)
whichbin = np.digitize(rr.flat, freq_bins)
ncount = np.bincount(whichbin)
ps1D = np.zeros(len(ncount) - 1)
ps1D_stderr = np.zeros(len(ncount) - 1)
for n in range(1, len(ncount)):
ps1D[n - 1] = np.mean(ps3D.flat[whichbin == n])
ps1D_stderr[n - 1] = np.std(ps3D.flat[whichbin == n])
return freq_bins, ps1D, ps1D_stderr
|
Add 3D power spectrum for comparing with generated fields
import numpy as np
def threeD_pspec(arr):
'''
Return a 1D power spectrum from a 3D array.
Parameters
----------
arr : `~numpy.ndarray`
Three dimensional array.
Returns
-------
freq_bins : `~numpy.ndarray`
Radial frequency bins.
ps1D : `~numpy.ndarray`
One-dimensional azimuthally-averaged power spectrum.
ps1D_stderr : `~numpy.ndarray`
Standard deviation of `ps1D`.
'''
if arr.ndim != 3:
raise ValueError("arr must have three dimensions.")
ps3D = np.abs(np.fft.fftn(arr))**2
xfreq = np.fft.fftfreq(arr.shape[0])
yfreq = np.fft.fftfreq(arr.shape[1])
zfreq = np.fft.fftfreq(arr.shape[2])
xx, yy, zz = np.meshgrid(xfreq, yfreq, zfreq, indexing='ij')
rr = np.sqrt(xx**2 + yy**2 + zz**2)
freq_min = 1 / float(max(arr.shape))
freq_max = 1 / 2.
freq_bins = np.arange(freq_min, freq_max, freq_min)
whichbin = np.digitize(rr.flat, freq_bins)
ncount = np.bincount(whichbin)
ps1D = np.zeros(len(ncount) - 1)
ps1D_stderr = np.zeros(len(ncount) - 1)
for n in range(1, len(ncount)):
ps1D[n - 1] = np.mean(ps3D.flat[whichbin == n])
ps1D_stderr[n - 1] = np.std(ps3D.flat[whichbin == n])
return freq_bins, ps1D, ps1D_stderr
|
<commit_before><commit_msg>Add 3D power spectrum for comparing with generated fields<commit_after>
import numpy as np
def threeD_pspec(arr):
'''
Return a 1D power spectrum from a 3D array.
Parameters
----------
arr : `~numpy.ndarray`
Three dimensional array.
Returns
-------
freq_bins : `~numpy.ndarray`
Radial frequency bins.
ps1D : `~numpy.ndarray`
One-dimensional azimuthally-averaged power spectrum.
ps1D_stderr : `~numpy.ndarray`
Standard deviation of `ps1D`.
'''
if arr.ndim != 3:
raise ValueError("arr must have three dimensions.")
ps3D = np.abs(np.fft.fftn(arr))**2
xfreq = np.fft.fftfreq(arr.shape[0])
yfreq = np.fft.fftfreq(arr.shape[1])
zfreq = np.fft.fftfreq(arr.shape[2])
xx, yy, zz = np.meshgrid(xfreq, yfreq, zfreq, indexing='ij')
rr = np.sqrt(xx**2 + yy**2 + zz**2)
freq_min = 1 / float(max(arr.shape))
freq_max = 1 / 2.
freq_bins = np.arange(freq_min, freq_max, freq_min)
whichbin = np.digitize(rr.flat, freq_bins)
ncount = np.bincount(whichbin)
ps1D = np.zeros(len(ncount) - 1)
ps1D_stderr = np.zeros(len(ncount) - 1)
for n in range(1, len(ncount)):
ps1D[n - 1] = np.mean(ps3D.flat[whichbin == n])
ps1D_stderr[n - 1] = np.std(ps3D.flat[whichbin == n])
return freq_bins, ps1D, ps1D_stderr
|
|
04cbabc2ca4d36ea35ec564bd48927dea7919190
|
virtool/tests/api/test_protected.py
|
virtool/tests/api/test_protected.py
|
import pytest
parameters = [
("post", ("/api/viruses", {})),
("patch", ("/api/viruses/foobar", {})),
("delete", ("/api/viruses/foobar",)),
("post", ("/api/viruses/foobar/isolates", {})),
("patch", ("/api/viruses/foobar/isolates/test", {})),
("delete", ("/api/viruses/foobar/isolates/test",)),
("patch", ("/api/hmm/annotations/foobar", {})),
("get", ("/api/groups",)),
("post", ("/api/groups", {})),
("get", ("/api/groups/foobar",)),
("patch", ("/api/groups/foobar", {})),
("delete", ("/api/groups/foobar",))
]
@pytest.mark.parametrize("method, args", parameters)
async def test_not_authorized(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args)
assert resp.status == 403
assert await resp.json() == {
"message": "Not authorized"
}
@pytest.mark.parametrize("method, args", parameters)
async def test_not_permitted(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args, authorize=True)
assert resp.status == 403
assert await resp.json() == {
"message": "Not permitted"
}
|
Use parametrize to test authorization and permissions
|
Use parametrize to test authorization and permissions
|
Python
|
mit
|
igboyes/virtool,virtool/virtool,igboyes/virtool,virtool/virtool
|
Use parametrize to test authorization and permissions
|
import pytest
parameters = [
("post", ("/api/viruses", {})),
("patch", ("/api/viruses/foobar", {})),
("delete", ("/api/viruses/foobar",)),
("post", ("/api/viruses/foobar/isolates", {})),
("patch", ("/api/viruses/foobar/isolates/test", {})),
("delete", ("/api/viruses/foobar/isolates/test",)),
("patch", ("/api/hmm/annotations/foobar", {})),
("get", ("/api/groups",)),
("post", ("/api/groups", {})),
("get", ("/api/groups/foobar",)),
("patch", ("/api/groups/foobar", {})),
("delete", ("/api/groups/foobar",))
]
@pytest.mark.parametrize("method, args", parameters)
async def test_not_authorized(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args)
assert resp.status == 403
assert await resp.json() == {
"message": "Not authorized"
}
@pytest.mark.parametrize("method, args", parameters)
async def test_not_permitted(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args, authorize=True)
assert resp.status == 403
assert await resp.json() == {
"message": "Not permitted"
}
|
<commit_before><commit_msg>Use parametrize to test authorization and permissions<commit_after>
|
import pytest
parameters = [
("post", ("/api/viruses", {})),
("patch", ("/api/viruses/foobar", {})),
("delete", ("/api/viruses/foobar",)),
("post", ("/api/viruses/foobar/isolates", {})),
("patch", ("/api/viruses/foobar/isolates/test", {})),
("delete", ("/api/viruses/foobar/isolates/test",)),
("patch", ("/api/hmm/annotations/foobar", {})),
("get", ("/api/groups",)),
("post", ("/api/groups", {})),
("get", ("/api/groups/foobar",)),
("patch", ("/api/groups/foobar", {})),
("delete", ("/api/groups/foobar",))
]
@pytest.mark.parametrize("method, args", parameters)
async def test_not_authorized(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args)
assert resp.status == 403
assert await resp.json() == {
"message": "Not authorized"
}
@pytest.mark.parametrize("method, args", parameters)
async def test_not_permitted(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args, authorize=True)
assert resp.status == 403
assert await resp.json() == {
"message": "Not permitted"
}
|
Use parametrize to test authorization and permissionsimport pytest
parameters = [
("post", ("/api/viruses", {})),
("patch", ("/api/viruses/foobar", {})),
("delete", ("/api/viruses/foobar",)),
("post", ("/api/viruses/foobar/isolates", {})),
("patch", ("/api/viruses/foobar/isolates/test", {})),
("delete", ("/api/viruses/foobar/isolates/test",)),
("patch", ("/api/hmm/annotations/foobar", {})),
("get", ("/api/groups",)),
("post", ("/api/groups", {})),
("get", ("/api/groups/foobar",)),
("patch", ("/api/groups/foobar", {})),
("delete", ("/api/groups/foobar",))
]
@pytest.mark.parametrize("method, args", parameters)
async def test_not_authorized(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args)
assert resp.status == 403
assert await resp.json() == {
"message": "Not authorized"
}
@pytest.mark.parametrize("method, args", parameters)
async def test_not_permitted(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args, authorize=True)
assert resp.status == 403
assert await resp.json() == {
"message": "Not permitted"
}
|
<commit_before><commit_msg>Use parametrize to test authorization and permissions<commit_after>import pytest
parameters = [
("post", ("/api/viruses", {})),
("patch", ("/api/viruses/foobar", {})),
("delete", ("/api/viruses/foobar",)),
("post", ("/api/viruses/foobar/isolates", {})),
("patch", ("/api/viruses/foobar/isolates/test", {})),
("delete", ("/api/viruses/foobar/isolates/test",)),
("patch", ("/api/hmm/annotations/foobar", {})),
("get", ("/api/groups",)),
("post", ("/api/groups", {})),
("get", ("/api/groups/foobar",)),
("patch", ("/api/groups/foobar", {})),
("delete", ("/api/groups/foobar",))
]
@pytest.mark.parametrize("method, args", parameters)
async def test_not_authorized(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args)
assert resp.status == 403
assert await resp.json() == {
"message": "Not authorized"
}
@pytest.mark.parametrize("method, args", parameters)
async def test_not_permitted(method, args, do_get, do_post, do_patch, do_put, do_delete):
doer = {
"get": do_get,
"post": do_post,
"patch": do_patch,
"put": do_put,
"delete": do_delete
}[method]
resp = await doer(*args, authorize=True)
assert resp.status == 403
assert await resp.json() == {
"message": "Not permitted"
}
|
|
1f141791c31525d16d5281790612d7e8f162f394
|
app/tests/test_models.py
|
app/tests/test_models.py
|
import pytest
from app.models import Submission, Comment
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
def test_submission_model(session):
Submission.create(session, **MOCK_SUBMISSION)
db_submissions = session.query(Submission).all()
assert len(db_submissions) == 1
db_s = db_submissions[0]
for k in MOCK_SUBMISSION.keys():
assert getattr(db_s, k) == MOCK_SUBMISSION[k]
|
Add test for Submission model
|
Add test for Submission model
|
Python
|
mit
|
PsyBorgs/redditanalyser,PsyBorgs/redditanalyser
|
Add test for Submission model
|
import pytest
from app.models import Submission, Comment
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
def test_submission_model(session):
Submission.create(session, **MOCK_SUBMISSION)
db_submissions = session.query(Submission).all()
assert len(db_submissions) == 1
db_s = db_submissions[0]
for k in MOCK_SUBMISSION.keys():
assert getattr(db_s, k) == MOCK_SUBMISSION[k]
|
<commit_before><commit_msg>Add test for Submission model<commit_after>
|
import pytest
from app.models import Submission, Comment
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
def test_submission_model(session):
Submission.create(session, **MOCK_SUBMISSION)
db_submissions = session.query(Submission).all()
assert len(db_submissions) == 1
db_s = db_submissions[0]
for k in MOCK_SUBMISSION.keys():
assert getattr(db_s, k) == MOCK_SUBMISSION[k]
|
Add test for Submission modelimport pytest
from app.models import Submission, Comment
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
def test_submission_model(session):
Submission.create(session, **MOCK_SUBMISSION)
db_submissions = session.query(Submission).all()
assert len(db_submissions) == 1
db_s = db_submissions[0]
for k in MOCK_SUBMISSION.keys():
assert getattr(db_s, k) == MOCK_SUBMISSION[k]
|
<commit_before><commit_msg>Add test for Submission model<commit_after>import pytest
from app.models import Submission, Comment
MOCK_SUBMISSION = {
'permalink': (u'https://www.reddit.com/r/fake/comments'
u'/000000/submission_title/'
),
'score': 100,
'author': u'fakeuser1',
'num_comments': 500,
'downs': 0,
'title': u'Submission title',
'created_utc': 1415713246.0,
'subreddit_id': u't5_000000',
'ups': 100,
'selftext': u'',
'fullname': u't3_aaaaaa',
'archived': True,
'id': u'aaaaaa'
}
def test_submission_model(session):
Submission.create(session, **MOCK_SUBMISSION)
db_submissions = session.query(Submission).all()
assert len(db_submissions) == 1
db_s = db_submissions[0]
for k in MOCK_SUBMISSION.keys():
assert getattr(db_s, k) == MOCK_SUBMISSION[k]
|
|
44bd7e7b5932754c83d987af473479568ae62a16
|
epitran/test/test_malayalam.py
|
epitran/test/test_malayalam.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestMalayalamGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_malayalam(self):
self._assert_trans('മലയാളം', 'malajaːɭam')
def test_kala(self):
self._assert_trans('കല', 'kala')
def test_eniykk(self):
self._assert_trans('എനിയ്ക്ക്', 'enijkkə')
class TestMalayalamFaDisambiguation(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_phalam(self):
self._assert_trans('ഫലം', 'pʰalam')
def test_phalam(self):
self._assert_trans('ഫാൻ', 'faːn')
|
Add test cases for Malayalam transliteration
|
Add test cases for Malayalam transliteration
|
Python
|
mit
|
dmort27/epitran,dmort27/epitran
|
Add test cases for Malayalam transliteration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestMalayalamGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_malayalam(self):
self._assert_trans('മലയാളം', 'malajaːɭam')
def test_kala(self):
self._assert_trans('കല', 'kala')
def test_eniykk(self):
self._assert_trans('എനിയ്ക്ക്', 'enijkkə')
class TestMalayalamFaDisambiguation(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_phalam(self):
self._assert_trans('ഫലം', 'pʰalam')
def test_phalam(self):
self._assert_trans('ഫാൻ', 'faːn')
|
<commit_before><commit_msg>Add test cases for Malayalam transliteration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestMalayalamGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_malayalam(self):
self._assert_trans('മലയാളം', 'malajaːɭam')
def test_kala(self):
self._assert_trans('കല', 'kala')
def test_eniykk(self):
self._assert_trans('എനിയ്ക്ക്', 'enijkkə')
class TestMalayalamFaDisambiguation(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_phalam(self):
self._assert_trans('ഫലം', 'pʰalam')
def test_phalam(self):
self._assert_trans('ഫാൻ', 'faːn')
|
Add test cases for Malayalam transliteration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestMalayalamGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_malayalam(self):
self._assert_trans('മലയാളം', 'malajaːɭam')
def test_kala(self):
self._assert_trans('കല', 'kala')
def test_eniykk(self):
self._assert_trans('എനിയ്ക്ക്', 'enijkkə')
class TestMalayalamFaDisambiguation(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_phalam(self):
self._assert_trans('ഫലം', 'pʰalam')
def test_phalam(self):
self._assert_trans('ഫാൻ', 'faːn')
|
<commit_before><commit_msg>Add test cases for Malayalam transliteration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import unicodedata
import epitran
class TestMalayalamGeneral(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_malayalam(self):
self._assert_trans('മലയാളം', 'malajaːɭam')
def test_kala(self):
self._assert_trans('കല', 'kala')
def test_eniykk(self):
self._assert_trans('എനിയ്ക്ക്', 'enijkkə')
class TestMalayalamFaDisambiguation(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran('mal-Mlym')
def _assert_trans(self, src, tar):
trans = self.epi.transliterate(src)
trans = unicodedata.normalize('NFD', trans)
src = unicodedata.normalize('NFD', trans)
# print('{}\t{}\t{}'.format(trans, tar, zip(trans, tar)))
self.assertEqual(trans, tar)
def test_phalam(self):
self._assert_trans('ഫലം', 'pʰalam')
def test_phalam(self):
self._assert_trans('ഫാൻ', 'faːn')
|
|
a8aef6de4876cceb53da1335ada9163eaa184c6e
|
plugins/plugin_secure_check.py
|
plugins/plugin_secure_check.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.insert(0, "..")
import re
from libs.manager import Plugin
from libs.mail import send_mail
class SecureCheck(Plugin):
def __init__(self, **kwargs):
self.keywords = ['secure', 'check']
self.result = {}
def __process_doc(self, **kwargs):
m = re.match("^(Accepted|Failed) password for ([a-z0-9]+) from (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", kwargs['message'])
if m:
#print m.group(1),m.group(2),m.group(3)
if kwargs['host'] in self.result.keys():
if m.group(3) in self.result[kwargs['host']][m.group(1)]:
self.result[kwargs['host']][m.group(1)][m.group(3)] += 1
else:
self.result[kwargs['host']][m.group(1)][m.group(3)] = 1
else:
self.result[kwargs['host']] = {"Accepted":{}, "Failed":{}}
def process(self, **kwargs):
collection = kwargs['collection']
cond = {}
if 'condition' in kwargs:
cond = kwargs['condition']
condition = dict(
cond,
ident = {"$in":['sshd']},
)
#print condition
#print collection.database.name
#print collection.database.command("distinct", "messages", key="ident", q=condition)
# Do more HERE
for log_doc in collection.find(condition):
#print log_doc
self.__process_doc(**log_doc)
def report(self, **kwargs):
#print self.result
print "%-16s|%-16s|%8s|%-8s" % ("Host","IPs","Failed", "Accepted")
print "---------------------------------------------------"
for (host,action) in self.result.items():
ips = list(set(action['Failed'].keys() + action['Accepted'].keys()))
print "%-16s|%-16d|%8d|%8d" % (host,len(ips), sum(action['Failed'].values()), sum(action['Accepted'].values()))
for ip in ips:
fails = 0
accepts = 0
if action['Failed'].has_key(ip):
fails = action['Failed'][ip]
if action['Accepted'].has_key(ip):
accepts = action['Accepted'][ip]
print "%-16s|%-16s|%8d|%8d" % ("", ip, fails, accepts)
print "-----------------------------------------------------"
|
Add example plugin for parser secure logs
|
Add example plugin for parser secure logs
|
Python
|
apache-2.0
|
keepzero/fluent-mongo-parser
|
Add example plugin for parser secure logs
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.insert(0, "..")
import re
from libs.manager import Plugin
from libs.mail import send_mail
class SecureCheck(Plugin):
def __init__(self, **kwargs):
self.keywords = ['secure', 'check']
self.result = {}
def __process_doc(self, **kwargs):
m = re.match("^(Accepted|Failed) password for ([a-z0-9]+) from (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", kwargs['message'])
if m:
#print m.group(1),m.group(2),m.group(3)
if kwargs['host'] in self.result.keys():
if m.group(3) in self.result[kwargs['host']][m.group(1)]:
self.result[kwargs['host']][m.group(1)][m.group(3)] += 1
else:
self.result[kwargs['host']][m.group(1)][m.group(3)] = 1
else:
self.result[kwargs['host']] = {"Accepted":{}, "Failed":{}}
def process(self, **kwargs):
collection = kwargs['collection']
cond = {}
if 'condition' in kwargs:
cond = kwargs['condition']
condition = dict(
cond,
ident = {"$in":['sshd']},
)
#print condition
#print collection.database.name
#print collection.database.command("distinct", "messages", key="ident", q=condition)
# Do more HERE
for log_doc in collection.find(condition):
#print log_doc
self.__process_doc(**log_doc)
def report(self, **kwargs):
#print self.result
print "%-16s|%-16s|%8s|%-8s" % ("Host","IPs","Failed", "Accepted")
print "---------------------------------------------------"
for (host,action) in self.result.items():
ips = list(set(action['Failed'].keys() + action['Accepted'].keys()))
print "%-16s|%-16d|%8d|%8d" % (host,len(ips), sum(action['Failed'].values()), sum(action['Accepted'].values()))
for ip in ips:
fails = 0
accepts = 0
if action['Failed'].has_key(ip):
fails = action['Failed'][ip]
if action['Accepted'].has_key(ip):
accepts = action['Accepted'][ip]
print "%-16s|%-16s|%8d|%8d" % ("", ip, fails, accepts)
print "-----------------------------------------------------"
|
<commit_before><commit_msg>Add example plugin for parser secure logs<commit_after>
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.insert(0, "..")
import re
from libs.manager import Plugin
from libs.mail import send_mail
class SecureCheck(Plugin):
def __init__(self, **kwargs):
self.keywords = ['secure', 'check']
self.result = {}
def __process_doc(self, **kwargs):
m = re.match("^(Accepted|Failed) password for ([a-z0-9]+) from (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", kwargs['message'])
if m:
#print m.group(1),m.group(2),m.group(3)
if kwargs['host'] in self.result.keys():
if m.group(3) in self.result[kwargs['host']][m.group(1)]:
self.result[kwargs['host']][m.group(1)][m.group(3)] += 1
else:
self.result[kwargs['host']][m.group(1)][m.group(3)] = 1
else:
self.result[kwargs['host']] = {"Accepted":{}, "Failed":{}}
def process(self, **kwargs):
collection = kwargs['collection']
cond = {}
if 'condition' in kwargs:
cond = kwargs['condition']
condition = dict(
cond,
ident = {"$in":['sshd']},
)
#print condition
#print collection.database.name
#print collection.database.command("distinct", "messages", key="ident", q=condition)
# Do more HERE
for log_doc in collection.find(condition):
#print log_doc
self.__process_doc(**log_doc)
def report(self, **kwargs):
#print self.result
print "%-16s|%-16s|%8s|%-8s" % ("Host","IPs","Failed", "Accepted")
print "---------------------------------------------------"
for (host,action) in self.result.items():
ips = list(set(action['Failed'].keys() + action['Accepted'].keys()))
print "%-16s|%-16d|%8d|%8d" % (host,len(ips), sum(action['Failed'].values()), sum(action['Accepted'].values()))
for ip in ips:
fails = 0
accepts = 0
if action['Failed'].has_key(ip):
fails = action['Failed'][ip]
if action['Accepted'].has_key(ip):
accepts = action['Accepted'][ip]
print "%-16s|%-16s|%8d|%8d" % ("", ip, fails, accepts)
print "-----------------------------------------------------"
|
Add example plugin for parser secure logs#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.insert(0, "..")
import re
from libs.manager import Plugin
from libs.mail import send_mail
class SecureCheck(Plugin):
def __init__(self, **kwargs):
self.keywords = ['secure', 'check']
self.result = {}
def __process_doc(self, **kwargs):
m = re.match("^(Accepted|Failed) password for ([a-z0-9]+) from (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", kwargs['message'])
if m:
#print m.group(1),m.group(2),m.group(3)
if kwargs['host'] in self.result.keys():
if m.group(3) in self.result[kwargs['host']][m.group(1)]:
self.result[kwargs['host']][m.group(1)][m.group(3)] += 1
else:
self.result[kwargs['host']][m.group(1)][m.group(3)] = 1
else:
self.result[kwargs['host']] = {"Accepted":{}, "Failed":{}}
def process(self, **kwargs):
collection = kwargs['collection']
cond = {}
if 'condition' in kwargs:
cond = kwargs['condition']
condition = dict(
cond,
ident = {"$in":['sshd']},
)
#print condition
#print collection.database.name
#print collection.database.command("distinct", "messages", key="ident", q=condition)
# Do more HERE
for log_doc in collection.find(condition):
#print log_doc
self.__process_doc(**log_doc)
def report(self, **kwargs):
#print self.result
print "%-16s|%-16s|%8s|%-8s" % ("Host","IPs","Failed", "Accepted")
print "---------------------------------------------------"
for (host,action) in self.result.items():
ips = list(set(action['Failed'].keys() + action['Accepted'].keys()))
print "%-16s|%-16d|%8d|%8d" % (host,len(ips), sum(action['Failed'].values()), sum(action['Accepted'].values()))
for ip in ips:
fails = 0
accepts = 0
if action['Failed'].has_key(ip):
fails = action['Failed'][ip]
if action['Accepted'].has_key(ip):
accepts = action['Accepted'][ip]
print "%-16s|%-16s|%8d|%8d" % ("", ip, fails, accepts)
print "-----------------------------------------------------"
|
<commit_before><commit_msg>Add example plugin for parser secure logs<commit_after>#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
sys.path.insert(0, "..")
import re
from libs.manager import Plugin
from libs.mail import send_mail
class SecureCheck(Plugin):
def __init__(self, **kwargs):
self.keywords = ['secure', 'check']
self.result = {}
def __process_doc(self, **kwargs):
m = re.match("^(Accepted|Failed) password for ([a-z0-9]+) from (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", kwargs['message'])
if m:
#print m.group(1),m.group(2),m.group(3)
if kwargs['host'] in self.result.keys():
if m.group(3) in self.result[kwargs['host']][m.group(1)]:
self.result[kwargs['host']][m.group(1)][m.group(3)] += 1
else:
self.result[kwargs['host']][m.group(1)][m.group(3)] = 1
else:
self.result[kwargs['host']] = {"Accepted":{}, "Failed":{}}
def process(self, **kwargs):
collection = kwargs['collection']
cond = {}
if 'condition' in kwargs:
cond = kwargs['condition']
condition = dict(
cond,
ident = {"$in":['sshd']},
)
#print condition
#print collection.database.name
#print collection.database.command("distinct", "messages", key="ident", q=condition)
# Do more HERE
for log_doc in collection.find(condition):
#print log_doc
self.__process_doc(**log_doc)
def report(self, **kwargs):
#print self.result
print "%-16s|%-16s|%8s|%-8s" % ("Host","IPs","Failed", "Accepted")
print "---------------------------------------------------"
for (host,action) in self.result.items():
ips = list(set(action['Failed'].keys() + action['Accepted'].keys()))
print "%-16s|%-16d|%8d|%8d" % (host,len(ips), sum(action['Failed'].values()), sum(action['Accepted'].values()))
for ip in ips:
fails = 0
accepts = 0
if action['Failed'].has_key(ip):
fails = action['Failed'][ip]
if action['Accepted'].has_key(ip):
accepts = action['Accepted'][ip]
print "%-16s|%-16s|%8d|%8d" % ("", ip, fails, accepts)
print "-----------------------------------------------------"
|
|
fe79933c028d55a6ad2d8f203d33c9ca7939afbe
|
py/total-hamming-distance.py
|
py/total-hamming-distance.py
|
from collections import Counter
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
c = Counter()
for n in nums:
for i in xrange(n.bit_length()):
if n & (1 << i):
c[i] += 1
ans = 0
for k, v in c.iteritems():
ans += v * (len(nums) - v)
return ans
|
Add py solution for 477. Total Hamming Distance
|
Add py solution for 477. Total Hamming Distance
477. Total Hamming Distance: https://leetcode.com/problems/total-hamming-distance/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 477. Total Hamming Distance
477. Total Hamming Distance: https://leetcode.com/problems/total-hamming-distance/
|
from collections import Counter
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
c = Counter()
for n in nums:
for i in xrange(n.bit_length()):
if n & (1 << i):
c[i] += 1
ans = 0
for k, v in c.iteritems():
ans += v * (len(nums) - v)
return ans
|
<commit_before><commit_msg>Add py solution for 477. Total Hamming Distance
477. Total Hamming Distance: https://leetcode.com/problems/total-hamming-distance/<commit_after>
|
from collections import Counter
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
c = Counter()
for n in nums:
for i in xrange(n.bit_length()):
if n & (1 << i):
c[i] += 1
ans = 0
for k, v in c.iteritems():
ans += v * (len(nums) - v)
return ans
|
Add py solution for 477. Total Hamming Distance
477. Total Hamming Distance: https://leetcode.com/problems/total-hamming-distance/from collections import Counter
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
c = Counter()
for n in nums:
for i in xrange(n.bit_length()):
if n & (1 << i):
c[i] += 1
ans = 0
for k, v in c.iteritems():
ans += v * (len(nums) - v)
return ans
|
<commit_before><commit_msg>Add py solution for 477. Total Hamming Distance
477. Total Hamming Distance: https://leetcode.com/problems/total-hamming-distance/<commit_after>from collections import Counter
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
c = Counter()
for n in nums:
for i in xrange(n.bit_length()):
if n & (1 << i):
c[i] += 1
ans = 0
for k, v in c.iteritems():
ans += v * (len(nums) - v)
return ans
|
|
ff36ab7972220940a8e4d5396b591cd02c045380
|
south/signals.py
|
south/signals.py
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
if 'django.contrib.auth' in settings.INSTALLED_APPS:
def create_permissions_compat(app, **kwargs):
from django.db.models import get_app
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), (), 0)
post_migrate.connect(create_permissions_compat)
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
# Is causing strange errors, removing for now (we might need to fix up orm first)
#if 'django.contrib.auth' in settings.INSTALLED_APPS:
#def create_permissions_compat(app, **kwargs):
#from django.db.models import get_app
#from django.contrib.auth.management import create_permissions
#create_permissions(get_app(app), (), 0)
#post_migrate.connect(create_permissions_compat)
|
Remove the auth contenttypes thing for now, needs improvement
|
Remove the auth contenttypes thing for now, needs improvement
|
Python
|
apache-2.0
|
RaD/django-south,philipn/django-south,philipn/django-south,RaD/django-south,nimnull/django-south,RaD/django-south,nimnull/django-south
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
if 'django.contrib.auth' in settings.INSTALLED_APPS:
def create_permissions_compat(app, **kwargs):
from django.db.models import get_app
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), (), 0)
post_migrate.connect(create_permissions_compat)
Remove the auth contenttypes thing for now, needs improvement
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
# Is causing strange errors, removing for now (we might need to fix up orm first)
#if 'django.contrib.auth' in settings.INSTALLED_APPS:
#def create_permissions_compat(app, **kwargs):
#from django.db.models import get_app
#from django.contrib.auth.management import create_permissions
#create_permissions(get_app(app), (), 0)
#post_migrate.connect(create_permissions_compat)
|
<commit_before>"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
if 'django.contrib.auth' in settings.INSTALLED_APPS:
def create_permissions_compat(app, **kwargs):
from django.db.models import get_app
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), (), 0)
post_migrate.connect(create_permissions_compat)
<commit_msg>Remove the auth contenttypes thing for now, needs improvement<commit_after>
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
# Is causing strange errors, removing for now (we might need to fix up orm first)
#if 'django.contrib.auth' in settings.INSTALLED_APPS:
#def create_permissions_compat(app, **kwargs):
#from django.db.models import get_app
#from django.contrib.auth.management import create_permissions
#create_permissions(get_app(app), (), 0)
#post_migrate.connect(create_permissions_compat)
|
"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
if 'django.contrib.auth' in settings.INSTALLED_APPS:
def create_permissions_compat(app, **kwargs):
from django.db.models import get_app
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), (), 0)
post_migrate.connect(create_permissions_compat)
Remove the auth contenttypes thing for now, needs improvement"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
# Is causing strange errors, removing for now (we might need to fix up orm first)
#if 'django.contrib.auth' in settings.INSTALLED_APPS:
#def create_permissions_compat(app, **kwargs):
#from django.db.models import get_app
#from django.contrib.auth.management import create_permissions
#create_permissions(get_app(app), (), 0)
#post_migrate.connect(create_permissions_compat)
|
<commit_before>"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
if 'django.contrib.auth' in settings.INSTALLED_APPS:
def create_permissions_compat(app, **kwargs):
from django.db.models import get_app
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), (), 0)
post_migrate.connect(create_permissions_compat)
<commit_msg>Remove the auth contenttypes thing for now, needs improvement<commit_after>"""
South-specific signals
"""
from django.dispatch import Signal
from django.conf import settings
# Sent at the start of the migration of an app
pre_migrate = Signal(providing_args=["app"])
# Sent after each successful migration of an app
post_migrate = Signal(providing_args=["app"])
# Sent after each run of a particular migration in a direction
ran_migration = Signal(providing_args=["app","migration","method"])
# Compatibility code for django.contrib.auth
# Is causing strange errors, removing for now (we might need to fix up orm first)
#if 'django.contrib.auth' in settings.INSTALLED_APPS:
#def create_permissions_compat(app, **kwargs):
#from django.db.models import get_app
#from django.contrib.auth.management import create_permissions
#create_permissions(get_app(app), (), 0)
#post_migrate.connect(create_permissions_compat)
|
874794156b60caef3d6b944835ef11671b9b8c86
|
benchmarks/bench_sets.py
|
benchmarks/bench_sets.py
|
"""
Benchmark various operations on sets.
"""
from __future__ import division
import numpy as np
from numba import jit
# Set benchmarks
# Notes:
# - unless we want to benchmark marshalling a set or list back to Python,
# we return a single value to avoid conversion costs
@jit(nopython=True)
def unique(seq):
l = []
seen = set()
for v in seq:
if v not in seen:
seen.add(v)
l.append(v)
return l[-1]
@jit(nopython=True)
def setops(a, b):
sa = set(a)
sb = set(b)
return len(sa & sb), len(sa | sb), len(sa ^ sb), len(sa - sb), len(sb - sa)
class IntegerSets:
N = 100000
dtype = np.int32
def setup(self):
self.rnd = np.random.RandomState(42)
self.seq = self.duplicates_array(self.N)
self.a = self.sparse_array(self.N)
self.b = self.sparse_array(self.N)
# Warm up
self.run_unique(5)
self.run_setops(5)
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = np.arange(int(np.sqrt(n)), dtype=self.dtype)
# XXX rnd.choice() can take an integer to sample from arange()
return self.rnd.choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = np.arange(int(n ** 1.3), dtype=self.dtype)
return self.rnd.choice(a, n)
def run_unique(self, n):
unique(self.seq[:n])
def run_setops(self, n):
setops(self.a[:n], self.b[:n])
def time_unique(self):
self.run_unique(self.N)
def time_setops(self):
self.run_setops(self.N)
|
Add a couple set benchmarks
|
Add a couple set benchmarks
|
Python
|
bsd-2-clause
|
numba/numba-benchmark
|
Add a couple set benchmarks
|
"""
Benchmark various operations on sets.
"""
from __future__ import division
import numpy as np
from numba import jit
# Set benchmarks
# Notes:
# - unless we want to benchmark marshalling a set or list back to Python,
# we return a single value to avoid conversion costs
@jit(nopython=True)
def unique(seq):
l = []
seen = set()
for v in seq:
if v not in seen:
seen.add(v)
l.append(v)
return l[-1]
@jit(nopython=True)
def setops(a, b):
sa = set(a)
sb = set(b)
return len(sa & sb), len(sa | sb), len(sa ^ sb), len(sa - sb), len(sb - sa)
class IntegerSets:
N = 100000
dtype = np.int32
def setup(self):
self.rnd = np.random.RandomState(42)
self.seq = self.duplicates_array(self.N)
self.a = self.sparse_array(self.N)
self.b = self.sparse_array(self.N)
# Warm up
self.run_unique(5)
self.run_setops(5)
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = np.arange(int(np.sqrt(n)), dtype=self.dtype)
# XXX rnd.choice() can take an integer to sample from arange()
return self.rnd.choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = np.arange(int(n ** 1.3), dtype=self.dtype)
return self.rnd.choice(a, n)
def run_unique(self, n):
unique(self.seq[:n])
def run_setops(self, n):
setops(self.a[:n], self.b[:n])
def time_unique(self):
self.run_unique(self.N)
def time_setops(self):
self.run_setops(self.N)
|
<commit_before><commit_msg>Add a couple set benchmarks<commit_after>
|
"""
Benchmark various operations on sets.
"""
from __future__ import division
import numpy as np
from numba import jit
# Set benchmarks
# Notes:
# - unless we want to benchmark marshalling a set or list back to Python,
# we return a single value to avoid conversion costs
@jit(nopython=True)
def unique(seq):
l = []
seen = set()
for v in seq:
if v not in seen:
seen.add(v)
l.append(v)
return l[-1]
@jit(nopython=True)
def setops(a, b):
sa = set(a)
sb = set(b)
return len(sa & sb), len(sa | sb), len(sa ^ sb), len(sa - sb), len(sb - sa)
class IntegerSets:
N = 100000
dtype = np.int32
def setup(self):
self.rnd = np.random.RandomState(42)
self.seq = self.duplicates_array(self.N)
self.a = self.sparse_array(self.N)
self.b = self.sparse_array(self.N)
# Warm up
self.run_unique(5)
self.run_setops(5)
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = np.arange(int(np.sqrt(n)), dtype=self.dtype)
# XXX rnd.choice() can take an integer to sample from arange()
return self.rnd.choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = np.arange(int(n ** 1.3), dtype=self.dtype)
return self.rnd.choice(a, n)
def run_unique(self, n):
unique(self.seq[:n])
def run_setops(self, n):
setops(self.a[:n], self.b[:n])
def time_unique(self):
self.run_unique(self.N)
def time_setops(self):
self.run_setops(self.N)
|
Add a couple set benchmarks"""
Benchmark various operations on sets.
"""
from __future__ import division
import numpy as np
from numba import jit
# Set benchmarks
# Notes:
# - unless we want to benchmark marshalling a set or list back to Python,
# we return a single value to avoid conversion costs
@jit(nopython=True)
def unique(seq):
l = []
seen = set()
for v in seq:
if v not in seen:
seen.add(v)
l.append(v)
return l[-1]
@jit(nopython=True)
def setops(a, b):
sa = set(a)
sb = set(b)
return len(sa & sb), len(sa | sb), len(sa ^ sb), len(sa - sb), len(sb - sa)
class IntegerSets:
N = 100000
dtype = np.int32
def setup(self):
self.rnd = np.random.RandomState(42)
self.seq = self.duplicates_array(self.N)
self.a = self.sparse_array(self.N)
self.b = self.sparse_array(self.N)
# Warm up
self.run_unique(5)
self.run_setops(5)
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = np.arange(int(np.sqrt(n)), dtype=self.dtype)
# XXX rnd.choice() can take an integer to sample from arange()
return self.rnd.choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = np.arange(int(n ** 1.3), dtype=self.dtype)
return self.rnd.choice(a, n)
def run_unique(self, n):
unique(self.seq[:n])
def run_setops(self, n):
setops(self.a[:n], self.b[:n])
def time_unique(self):
self.run_unique(self.N)
def time_setops(self):
self.run_setops(self.N)
|
<commit_before><commit_msg>Add a couple set benchmarks<commit_after>"""
Benchmark various operations on sets.
"""
from __future__ import division
import numpy as np
from numba import jit
# Set benchmarks
# Notes:
# - unless we want to benchmark marshalling a set or list back to Python,
# we return a single value to avoid conversion costs
@jit(nopython=True)
def unique(seq):
l = []
seen = set()
for v in seq:
if v not in seen:
seen.add(v)
l.append(v)
return l[-1]
@jit(nopython=True)
def setops(a, b):
sa = set(a)
sb = set(b)
return len(sa & sb), len(sa | sb), len(sa ^ sb), len(sa - sb), len(sb - sa)
class IntegerSets:
N = 100000
dtype = np.int32
def setup(self):
self.rnd = np.random.RandomState(42)
self.seq = self.duplicates_array(self.N)
self.a = self.sparse_array(self.N)
self.b = self.sparse_array(self.N)
# Warm up
self.run_unique(5)
self.run_setops(5)
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = np.arange(int(np.sqrt(n)), dtype=self.dtype)
# XXX rnd.choice() can take an integer to sample from arange()
return self.rnd.choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = np.arange(int(n ** 1.3), dtype=self.dtype)
return self.rnd.choice(a, n)
def run_unique(self, n):
unique(self.seq[:n])
def run_setops(self, n):
setops(self.a[:n], self.b[:n])
def time_unique(self):
self.run_unique(self.N)
def time_setops(self):
self.run_setops(self.N)
|
|
d2a1d906b863dcf57eea228282a1badd5274d3b2
|
test/integration/test_node_ping.py
|
test/integration/test_node_ping.py
|
import uuid
from kitten.server import KittenServer
from kitten.request import KittenRequest
from mock import MagicMock
import gevent
class TestPropagation(object):
def setup_method(self, method):
self.local_port = 9812
self.remote_port = 9813
self.request = {
'id': {
'uuid': str(uuid.uuid4()),
'to': '127.0.0.1:{0}'.format(self.remote_port),
'from': '127.0.0.1:{0}'.format(self.local_port),
'kind': 'req',
},
'paradigm': 'node',
'method': 'ping',
}
self.servers = []
for port in (self.local_port, self.remote_port):
ns = MagicMock()
ns.port = port
self.servers.append(KittenServer(ns))
def test_node_ping(self):
"""
Check that two servers can ping each other.
1) Spin up two Servers
2) Add a ping request into the local server queue
3) Make sure the local server made the request and got an ack
4) Make sure the remote server made the response and got an ack
"""
return
map(lambda s: s.start(), self.servers)
gevent.sleep(0.1) # Let them start
request = KittenRequest(self.request)
self.servers[0].queue.put(request)
assert self.servers[1].queue.empty()
gevent.sleep(0.5)
self.fail()
|
Add disabled Ping integration test
|
Add disabled Ping integration test
|
Python
|
mit
|
thiderman/network-kitten
|
Add disabled Ping integration test
|
import uuid
from kitten.server import KittenServer
from kitten.request import KittenRequest
from mock import MagicMock
import gevent
class TestPropagation(object):
def setup_method(self, method):
self.local_port = 9812
self.remote_port = 9813
self.request = {
'id': {
'uuid': str(uuid.uuid4()),
'to': '127.0.0.1:{0}'.format(self.remote_port),
'from': '127.0.0.1:{0}'.format(self.local_port),
'kind': 'req',
},
'paradigm': 'node',
'method': 'ping',
}
self.servers = []
for port in (self.local_port, self.remote_port):
ns = MagicMock()
ns.port = port
self.servers.append(KittenServer(ns))
def test_node_ping(self):
"""
Check that two servers can ping each other.
1) Spin up two Servers
2) Add a ping request into the local server queue
3) Make sure the local server made the request and got an ack
4) Make sure the remote server made the response and got an ack
"""
return
map(lambda s: s.start(), self.servers)
gevent.sleep(0.1) # Let them start
request = KittenRequest(self.request)
self.servers[0].queue.put(request)
assert self.servers[1].queue.empty()
gevent.sleep(0.5)
self.fail()
|
<commit_before><commit_msg>Add disabled Ping integration test<commit_after>
|
import uuid
from kitten.server import KittenServer
from kitten.request import KittenRequest
from mock import MagicMock
import gevent
class TestPropagation(object):
def setup_method(self, method):
self.local_port = 9812
self.remote_port = 9813
self.request = {
'id': {
'uuid': str(uuid.uuid4()),
'to': '127.0.0.1:{0}'.format(self.remote_port),
'from': '127.0.0.1:{0}'.format(self.local_port),
'kind': 'req',
},
'paradigm': 'node',
'method': 'ping',
}
self.servers = []
for port in (self.local_port, self.remote_port):
ns = MagicMock()
ns.port = port
self.servers.append(KittenServer(ns))
def test_node_ping(self):
"""
Check that two servers can ping each other.
1) Spin up two Servers
2) Add a ping request into the local server queue
3) Make sure the local server made the request and got an ack
4) Make sure the remote server made the response and got an ack
"""
return
map(lambda s: s.start(), self.servers)
gevent.sleep(0.1) # Let them start
request = KittenRequest(self.request)
self.servers[0].queue.put(request)
assert self.servers[1].queue.empty()
gevent.sleep(0.5)
self.fail()
|
Add disabled Ping integration testimport uuid
from kitten.server import KittenServer
from kitten.request import KittenRequest
from mock import MagicMock
import gevent
class TestPropagation(object):
def setup_method(self, method):
self.local_port = 9812
self.remote_port = 9813
self.request = {
'id': {
'uuid': str(uuid.uuid4()),
'to': '127.0.0.1:{0}'.format(self.remote_port),
'from': '127.0.0.1:{0}'.format(self.local_port),
'kind': 'req',
},
'paradigm': 'node',
'method': 'ping',
}
self.servers = []
for port in (self.local_port, self.remote_port):
ns = MagicMock()
ns.port = port
self.servers.append(KittenServer(ns))
def test_node_ping(self):
"""
Check that two servers can ping each other.
1) Spin up two Servers
2) Add a ping request into the local server queue
3) Make sure the local server made the request and got an ack
4) Make sure the remote server made the response and got an ack
"""
return
map(lambda s: s.start(), self.servers)
gevent.sleep(0.1) # Let them start
request = KittenRequest(self.request)
self.servers[0].queue.put(request)
assert self.servers[1].queue.empty()
gevent.sleep(0.5)
self.fail()
|
<commit_before><commit_msg>Add disabled Ping integration test<commit_after>import uuid
from kitten.server import KittenServer
from kitten.request import KittenRequest
from mock import MagicMock
import gevent
class TestPropagation(object):
def setup_method(self, method):
self.local_port = 9812
self.remote_port = 9813
self.request = {
'id': {
'uuid': str(uuid.uuid4()),
'to': '127.0.0.1:{0}'.format(self.remote_port),
'from': '127.0.0.1:{0}'.format(self.local_port),
'kind': 'req',
},
'paradigm': 'node',
'method': 'ping',
}
self.servers = []
for port in (self.local_port, self.remote_port):
ns = MagicMock()
ns.port = port
self.servers.append(KittenServer(ns))
def test_node_ping(self):
"""
Check that two servers can ping each other.
1) Spin up two Servers
2) Add a ping request into the local server queue
3) Make sure the local server made the request and got an ack
4) Make sure the remote server made the response and got an ack
"""
return
map(lambda s: s.start(), self.servers)
gevent.sleep(0.1) # Let them start
request = KittenRequest(self.request)
self.servers[0].queue.put(request)
assert self.servers[1].queue.empty()
gevent.sleep(0.5)
self.fail()
|
|
993025327f83e47c0b996690f661d4b54f8d2146
|
scripts/checkpoints/average.py
|
scripts/checkpoints/average.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', nargs='+', required=True,
help="models to average")
parser.add_argument('-o', '--output', required=True,
help="output path")
args = parser.parse_args()
# *average* holds the model matrix
average = dict()
# No. of models.
n = len(args.model)
for filename in args.model:
print("Loading {}".format(filename))
with open(filename, "rb") as mfile:
# Loads matrix from model file
m = np.load(mfile)
for k in m:
if k != "history_errs":
# Initialize the key
if k not in average:
average[k] = m[k]
# Add to the appropriate value
elif average[k].shape == m[k].shape and "special" not in k:
average[k] += m[k]
# Actual averaging
for k in average:
if "special" not in k:
average[k] /= n
# Save averaged model to file
print("Saving to {}".format(args.output))
np.savez(args.output, **average)
|
Add script for checkpoint averaging
|
Add script for checkpoint averaging
|
Python
|
mit
|
emjotde/amunn,emjotde/amunn,emjotde/Marian,emjotde/amunn,emjotde/amunmt,marian-nmt/marian-train,emjotde/Marian,emjotde/amunmt,marian-nmt/marian-train,emjotde/amunmt,marian-nmt/marian-train,emjotde/amunn,emjotde/amunmt,marian-nmt/marian-train,marian-nmt/marian-train
|
Add script for checkpoint averaging
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', nargs='+', required=True,
help="models to average")
parser.add_argument('-o', '--output', required=True,
help="output path")
args = parser.parse_args()
# *average* holds the model matrix
average = dict()
# No. of models.
n = len(args.model)
for filename in args.model:
print("Loading {}".format(filename))
with open(filename, "rb") as mfile:
# Loads matrix from model file
m = np.load(mfile)
for k in m:
if k != "history_errs":
# Initialize the key
if k not in average:
average[k] = m[k]
# Add to the appropriate value
elif average[k].shape == m[k].shape and "special" not in k:
average[k] += m[k]
# Actual averaging
for k in average:
if "special" not in k:
average[k] /= n
# Save averaged model to file
print("Saving to {}".format(args.output))
np.savez(args.output, **average)
|
<commit_before><commit_msg>Add script for checkpoint averaging<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', nargs='+', required=True,
help="models to average")
parser.add_argument('-o', '--output', required=True,
help="output path")
args = parser.parse_args()
# *average* holds the model matrix
average = dict()
# No. of models.
n = len(args.model)
for filename in args.model:
print("Loading {}".format(filename))
with open(filename, "rb") as mfile:
# Loads matrix from model file
m = np.load(mfile)
for k in m:
if k != "history_errs":
# Initialize the key
if k not in average:
average[k] = m[k]
# Add to the appropriate value
elif average[k].shape == m[k].shape and "special" not in k:
average[k] += m[k]
# Actual averaging
for k in average:
if "special" not in k:
average[k] /= n
# Save averaged model to file
print("Saving to {}".format(args.output))
np.savez(args.output, **average)
|
Add script for checkpoint averaging#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', nargs='+', required=True,
help="models to average")
parser.add_argument('-o', '--output', required=True,
help="output path")
args = parser.parse_args()
# *average* holds the model matrix
average = dict()
# No. of models.
n = len(args.model)
for filename in args.model:
print("Loading {}".format(filename))
with open(filename, "rb") as mfile:
# Loads matrix from model file
m = np.load(mfile)
for k in m:
if k != "history_errs":
# Initialize the key
if k not in average:
average[k] = m[k]
# Add to the appropriate value
elif average[k].shape == m[k].shape and "special" not in k:
average[k] += m[k]
# Actual averaging
for k in average:
if "special" not in k:
average[k] /= n
# Save averaged model to file
print("Saving to {}".format(args.output))
np.savez(args.output, **average)
|
<commit_before><commit_msg>Add script for checkpoint averaging<commit_after>#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', nargs='+', required=True,
help="models to average")
parser.add_argument('-o', '--output', required=True,
help="output path")
args = parser.parse_args()
# *average* holds the model matrix
average = dict()
# No. of models.
n = len(args.model)
for filename in args.model:
print("Loading {}".format(filename))
with open(filename, "rb") as mfile:
# Loads matrix from model file
m = np.load(mfile)
for k in m:
if k != "history_errs":
# Initialize the key
if k not in average:
average[k] = m[k]
# Add to the appropriate value
elif average[k].shape == m[k].shape and "special" not in k:
average[k] += m[k]
# Actual averaging
for k in average:
if "special" not in k:
average[k] /= n
# Save averaged model to file
print("Saving to {}".format(args.output))
np.savez(args.output, **average)
|
|
144738aecf4e593ebd8cbbc60f53692d783b1799
|
pronto_feedback/feedback/migrations/0003_feedback_tags.py
|
pronto_feedback/feedback/migrations/0003_feedback_tags.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-27 01:39
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('feedback', '0002_auto_20160826_2327'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
Add migration for feedback tags
|
Add migration for feedback tags
|
Python
|
mit
|
zkan/pronto-feedback,zkan/pronto-feedback
|
Add migration for feedback tags
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-27 01:39
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('feedback', '0002_auto_20160826_2327'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
<commit_before><commit_msg>Add migration for feedback tags<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-27 01:39
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('feedback', '0002_auto_20160826_2327'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
Add migration for feedback tags# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-27 01:39
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('feedback', '0002_auto_20160826_2327'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
<commit_before><commit_msg>Add migration for feedback tags<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-27 01:39
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('feedback', '0002_auto_20160826_2327'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
|
|
dbafa0d69c5c13282ed5f4c41ccb7550f1575c74
|
example/run_example_cad.py
|
example/run_example_cad.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory with the files fetched from the remote Content-Addressed Datastore.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
cad_server = 'http://isolateserver.appspot.com/'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--result', isolated,
'--outdir', cad_server,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', cad_server + 'content/retrieve?hash_key=',
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add example of running an isolated step without using Swarm
|
Add example of running an isolated step without using Swarm
TBR=csharp@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/11070006
git-svn-id: d5a9b8648c52d490875de6588c8ee7ca688f9ed1@160586 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
apache-2.0
|
luci/luci-py,luci/luci-py,luci/luci-py,luci/luci-py
|
Add example of running an isolated step without using Swarm
TBR=csharp@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/11070006
git-svn-id: d5a9b8648c52d490875de6588c8ee7ca688f9ed1@160586 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory with the files fetched from the remote Content-Addressed Datastore.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
cad_server = 'http://isolateserver.appspot.com/'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--result', isolated,
'--outdir', cad_server,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', cad_server + 'content/retrieve?hash_key=',
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add example of running an isolated step without using Swarm
TBR=csharp@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/11070006
git-svn-id: d5a9b8648c52d490875de6588c8ee7ca688f9ed1@160586 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory with the files fetched from the remote Content-Addressed Datastore.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
cad_server = 'http://isolateserver.appspot.com/'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--result', isolated,
'--outdir', cad_server,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', cad_server + 'content/retrieve?hash_key=',
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add example of running an isolated step without using Swarm
TBR=csharp@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/11070006
git-svn-id: d5a9b8648c52d490875de6588c8ee7ca688f9ed1@160586 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory with the files fetched from the remote Content-Addressed Datastore.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
cad_server = 'http://isolateserver.appspot.com/'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--result', isolated,
'--outdir', cad_server,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', cad_server + 'content/retrieve?hash_key=',
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add example of running an isolated step without using Swarm
TBR=csharp@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/11070006
git-svn-id: d5a9b8648c52d490875de6588c8ee7ca688f9ed1@160586 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs hello_world.py, through hello_world.isolate, locally in a temporary
directory with the files fetched from the remote Content-Addressed Datastore.
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def run(cmd):
print('Running: %s' % ' '.join(cmd))
cmd = [sys.executable, os.path.join(ROOT_DIR, '..', cmd[0])] + cmd[1:]
subprocess.check_call(cmd)
def main():
# Uncomment to make isolate.py to output logs.
#os.environ['ISOLATE_DEBUG'] = '1'
cad_server = 'http://isolateserver.appspot.com/'
try:
# All the files are put in a temporary directory. This is optional and
# simply done so the current directory doesn't have the following files
# created:
# - hello_world.isolated
# - hello_world.isolated.state
# - cache/
tempdir = tempfile.mkdtemp(prefix='hello_world')
cachedir = os.path.join(tempdir, 'cache')
isolateddir = os.path.join(tempdir, 'isolated')
isolated = os.path.join(isolateddir, 'hello_world.isolated')
os.mkdir(isolateddir)
print('Archiving')
run(
[
'isolate.py',
'hashtable',
'--isolate', os.path.join(ROOT_DIR, 'hello_world.isolate'),
'--result', isolated,
'--outdir', cad_server,
])
print('\nRunning')
hashval = hashlib.sha1(open(isolated, 'rb').read()).hexdigest()
run(
[
'run_isolated.py',
'--cache', cachedir,
'--remote', cad_server + 'content/retrieve?hash_key=',
'--hash', hashval,
])
finally:
shutil.rmtree(tempdir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
0e88b500e3412b86696c892abe8207f14fb73b3a
|
tensorflow_datasets/scripts/print_num_configs.py
|
tensorflow_datasets/scripts/print_num_configs.py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that prints number of configs for a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "DatasetBuilder to print num configs for")
def main(_):
print(len(tfds.builder(FLAGS.dataset).BUILDER_CONFIGS))
if __name__ == "__main__":
app.run(main)
|
Add script to print num configs
|
Add script to print num configs
PiperOrigin-RevId: 240189946
|
Python
|
apache-2.0
|
tensorflow/datasets,tensorflow/datasets,tensorflow/datasets,tensorflow/datasets,tensorflow/datasets
|
Add script to print num configs
PiperOrigin-RevId: 240189946
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that prints number of configs for a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "DatasetBuilder to print num configs for")
def main(_):
print(len(tfds.builder(FLAGS.dataset).BUILDER_CONFIGS))
if __name__ == "__main__":
app.run(main)
|
<commit_before><commit_msg>Add script to print num configs
PiperOrigin-RevId: 240189946<commit_after>
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that prints number of configs for a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "DatasetBuilder to print num configs for")
def main(_):
print(len(tfds.builder(FLAGS.dataset).BUILDER_CONFIGS))
if __name__ == "__main__":
app.run(main)
|
Add script to print num configs
PiperOrigin-RevId: 240189946# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that prints number of configs for a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "DatasetBuilder to print num configs for")
def main(_):
print(len(tfds.builder(FLAGS.dataset).BUILDER_CONFIGS))
if __name__ == "__main__":
app.run(main)
|
<commit_before><commit_msg>Add script to print num configs
PiperOrigin-RevId: 240189946<commit_after># coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that prints number of configs for a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "DatasetBuilder to print num configs for")
def main(_):
print(len(tfds.builder(FLAGS.dataset).BUILDER_CONFIGS))
if __name__ == "__main__":
app.run(main)
|
|
d83f702a6cf0ec86c457edf8668078a53ae86579
|
restlib2/params.py
|
restlib2/params.py
|
def clean_bool(value, allow_none=False):
if isinstance(value, bool):
return value
if isinstance(value, basestring):
value = value.lower()
if value in ('t', 'true', '1', 'yes'):
return True
if value in ('f', 'false', '0', 'no'):
return False
if allow_none and value is None:
return
raise ValueError
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if hasattr(new_cls, 'param_defaults'):
defaults = new_cls.param_defaults.copy()
else:
defaults = {}
for key, value in attrs.items():
if not callable(value) and not key.startswith('__'):
defaults[key] = value
new_cls.param_defaults = defaults
return new_cls
class Parametizer(object):
__metaclass__ = ParametizerMetaclass
def clean(self, params=None, defaults=None):
if params is None:
params = {}
if defaults is None:
defaults = self.param_defaults
cleaned = defaults.copy()
cleaned.update(params)
for key in cleaned:
method = 'clean_{0}'.format(key)
if hasattr(self, method):
value = cleaned[key]
cleaner = getattr(self, method)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
# Unpack single item lists
if isinstance(value, (list, tuple)):
value = map(cleaner, value)
if len(value) == 1:
value = value[0]
else:
value = cleaner(value)
except Exception:
value = defaults.get(key)
cleaned[key] = value
return cleaned
|
Implement Parametizer class for defining and cleaning GET parameters
|
Implement Parametizer class for defining and cleaning GET parameters
A subclass can define class-level variables and clean_* methods for cleaning
input parameter values
|
Python
|
bsd-2-clause
|
bruth/restlib2
|
Implement Parametizer class for defining and cleaning GET parameters
A subclass can define class-level variables and clean_* methods for cleaning
input parameter values
|
def clean_bool(value, allow_none=False):
if isinstance(value, bool):
return value
if isinstance(value, basestring):
value = value.lower()
if value in ('t', 'true', '1', 'yes'):
return True
if value in ('f', 'false', '0', 'no'):
return False
if allow_none and value is None:
return
raise ValueError
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if hasattr(new_cls, 'param_defaults'):
defaults = new_cls.param_defaults.copy()
else:
defaults = {}
for key, value in attrs.items():
if not callable(value) and not key.startswith('__'):
defaults[key] = value
new_cls.param_defaults = defaults
return new_cls
class Parametizer(object):
__metaclass__ = ParametizerMetaclass
def clean(self, params=None, defaults=None):
if params is None:
params = {}
if defaults is None:
defaults = self.param_defaults
cleaned = defaults.copy()
cleaned.update(params)
for key in cleaned:
method = 'clean_{0}'.format(key)
if hasattr(self, method):
value = cleaned[key]
cleaner = getattr(self, method)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
# Unpack single item lists
if isinstance(value, (list, tuple)):
value = map(cleaner, value)
if len(value) == 1:
value = value[0]
else:
value = cleaner(value)
except Exception:
value = defaults.get(key)
cleaned[key] = value
return cleaned
|
<commit_before><commit_msg>Implement Parametizer class for defining and cleaning GET parameters
A subclass can define class-level variables and clean_* methods for cleaning
input parameter values<commit_after>
|
def clean_bool(value, allow_none=False):
if isinstance(value, bool):
return value
if isinstance(value, basestring):
value = value.lower()
if value in ('t', 'true', '1', 'yes'):
return True
if value in ('f', 'false', '0', 'no'):
return False
if allow_none and value is None:
return
raise ValueError
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if hasattr(new_cls, 'param_defaults'):
defaults = new_cls.param_defaults.copy()
else:
defaults = {}
for key, value in attrs.items():
if not callable(value) and not key.startswith('__'):
defaults[key] = value
new_cls.param_defaults = defaults
return new_cls
class Parametizer(object):
__metaclass__ = ParametizerMetaclass
def clean(self, params=None, defaults=None):
if params is None:
params = {}
if defaults is None:
defaults = self.param_defaults
cleaned = defaults.copy()
cleaned.update(params)
for key in cleaned:
method = 'clean_{0}'.format(key)
if hasattr(self, method):
value = cleaned[key]
cleaner = getattr(self, method)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
# Unpack single item lists
if isinstance(value, (list, tuple)):
value = map(cleaner, value)
if len(value) == 1:
value = value[0]
else:
value = cleaner(value)
except Exception:
value = defaults.get(key)
cleaned[key] = value
return cleaned
|
Implement Parametizer class for defining and cleaning GET parameters
A subclass can define class-level variables and clean_* methods for cleaning
input parameter valuesdef clean_bool(value, allow_none=False):
if isinstance(value, bool):
return value
if isinstance(value, basestring):
value = value.lower()
if value in ('t', 'true', '1', 'yes'):
return True
if value in ('f', 'false', '0', 'no'):
return False
if allow_none and value is None:
return
raise ValueError
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if hasattr(new_cls, 'param_defaults'):
defaults = new_cls.param_defaults.copy()
else:
defaults = {}
for key, value in attrs.items():
if not callable(value) and not key.startswith('__'):
defaults[key] = value
new_cls.param_defaults = defaults
return new_cls
class Parametizer(object):
__metaclass__ = ParametizerMetaclass
def clean(self, params=None, defaults=None):
if params is None:
params = {}
if defaults is None:
defaults = self.param_defaults
cleaned = defaults.copy()
cleaned.update(params)
for key in cleaned:
method = 'clean_{0}'.format(key)
if hasattr(self, method):
value = cleaned[key]
cleaner = getattr(self, method)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
# Unpack single item lists
if isinstance(value, (list, tuple)):
value = map(cleaner, value)
if len(value) == 1:
value = value[0]
else:
value = cleaner(value)
except Exception:
value = defaults.get(key)
cleaned[key] = value
return cleaned
|
<commit_before><commit_msg>Implement Parametizer class for defining and cleaning GET parameters
A subclass can define class-level variables and clean_* methods for cleaning
input parameter values<commit_after>def clean_bool(value, allow_none=False):
if isinstance(value, bool):
return value
if isinstance(value, basestring):
value = value.lower()
if value in ('t', 'true', '1', 'yes'):
return True
if value in ('f', 'false', '0', 'no'):
return False
if allow_none and value is None:
return
raise ValueError
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if hasattr(new_cls, 'param_defaults'):
defaults = new_cls.param_defaults.copy()
else:
defaults = {}
for key, value in attrs.items():
if not callable(value) and not key.startswith('__'):
defaults[key] = value
new_cls.param_defaults = defaults
return new_cls
class Parametizer(object):
__metaclass__ = ParametizerMetaclass
def clean(self, params=None, defaults=None):
if params is None:
params = {}
if defaults is None:
defaults = self.param_defaults
cleaned = defaults.copy()
cleaned.update(params)
for key in cleaned:
method = 'clean_{0}'.format(key)
if hasattr(self, method):
value = cleaned[key]
cleaner = getattr(self, method)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
# Unpack single item lists
if isinstance(value, (list, tuple)):
value = map(cleaner, value)
if len(value) == 1:
value = value[0]
else:
value = cleaner(value)
except Exception:
value = defaults.get(key)
cleaned[key] = value
return cleaned
|
|
60753aae90e1cba2e4dcb2dec94f9da530db5542
|
contrib/queue_monitor.py
|
contrib/queue_monitor.py
|
import logging
logger = logging.getLogger(__name__)
from arke.collect import Collect
class queue_monitor(Collect):
default_config = {'interval': 60,
}
def gather_data(self):
logger.info("persist_queue: %i, collect_pool: %i" % (self.persist_queue.qsize(), len(self._pool)))
|
Add a debugging plugin to log the size of the queue.
|
Add a debugging plugin to log the size of the queue.
|
Python
|
apache-2.0
|
geodelic/arke,geodelic/arke
|
Add a debugging plugin to log the size of the queue.
|
import logging
logger = logging.getLogger(__name__)
from arke.collect import Collect
class queue_monitor(Collect):
default_config = {'interval': 60,
}
def gather_data(self):
logger.info("persist_queue: %i, collect_pool: %i" % (self.persist_queue.qsize(), len(self._pool)))
|
<commit_before><commit_msg>Add a debugging plugin to log the size of the queue.<commit_after>
|
import logging
logger = logging.getLogger(__name__)
from arke.collect import Collect
class queue_monitor(Collect):
default_config = {'interval': 60,
}
def gather_data(self):
logger.info("persist_queue: %i, collect_pool: %i" % (self.persist_queue.qsize(), len(self._pool)))
|
Add a debugging plugin to log the size of the queue.
import logging
logger = logging.getLogger(__name__)
from arke.collect import Collect
class queue_monitor(Collect):
default_config = {'interval': 60,
}
def gather_data(self):
logger.info("persist_queue: %i, collect_pool: %i" % (self.persist_queue.qsize(), len(self._pool)))
|
<commit_before><commit_msg>Add a debugging plugin to log the size of the queue.<commit_after>
import logging
logger = logging.getLogger(__name__)
from arke.collect import Collect
class queue_monitor(Collect):
default_config = {'interval': 60,
}
def gather_data(self):
logger.info("persist_queue: %i, collect_pool: %i" % (self.persist_queue.qsize(), len(self._pool)))
|
|
f0413bbd3798361c6602deacb27a9da25d0c20d6
|
examples/plot_random_vs_gp.py
|
examples/plot_random_vs_gp.py
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from skopt.dummy_opt import dummy_minimize
from skopt.gp_opt import gp_minimize
digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)
def compute_mean_validation_score(forest_params):
forest_params = [int(param) for param in forest_params]
max_depth, max_features, mss, msl = forest_params
params = {
'max_depth': [max_depth], 'max_features': [max_features],
'min_samples_split': [mss], 'min_samples_leaf': [msl]}
gscv = GridSearchCV(rfc, params, n_jobs=-1)
gscv.fit(X, y)
return -gscv.best_score_
# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
print("Doing a random search for the best random forest hyperparameter.")
t = time()
dummy_model = dummy_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0)
print(time() - t)
print("Best score obtained, %0.4f" % -dummy_model.fun)
print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0,
n_start=1
)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
best_dummy_scores = [-np.min(dummy_model.func_vals[:i]) for i in range(1, 101)]
best_gp_scores = [-np.min(gp_model.func_vals[:i]) for i in range(1, 101)]
plt.title("Best score obtained at every iteration")
plt.plot(range(1, 101), best_dummy_scores, label="Dummy search")
plt.plot(range(1, 101), best_gp_scores, label="GP search")
plt.legend(loc="best")
plt.xlabel("Number of iterations.")
plt.ylabel("Mean accuracy score")
plt.ylim([0.885, 0.920])
plt.show()
|
Add example to show random search vs gp
|
Add example to show random search vs gp
|
Python
|
bsd-3-clause
|
glouppe/scikit-optimize,ccauet/scikit-optimize,betatim/BlackBox,scikit-optimize/scikit-optimize,betatim/BlackBox,scikit-optimize/scikit-optimize,glouppe/scikit-optimize
|
Add example to show random search vs gp
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from skopt.dummy_opt import dummy_minimize
from skopt.gp_opt import gp_minimize
digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)
def compute_mean_validation_score(forest_params):
forest_params = [int(param) for param in forest_params]
max_depth, max_features, mss, msl = forest_params
params = {
'max_depth': [max_depth], 'max_features': [max_features],
'min_samples_split': [mss], 'min_samples_leaf': [msl]}
gscv = GridSearchCV(rfc, params, n_jobs=-1)
gscv.fit(X, y)
return -gscv.best_score_
# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
print("Doing a random search for the best random forest hyperparameter.")
t = time()
dummy_model = dummy_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0)
print(time() - t)
print("Best score obtained, %0.4f" % -dummy_model.fun)
print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0,
n_start=1
)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
best_dummy_scores = [-np.min(dummy_model.func_vals[:i]) for i in range(1, 101)]
best_gp_scores = [-np.min(gp_model.func_vals[:i]) for i in range(1, 101)]
plt.title("Best score obtained at every iteration")
plt.plot(range(1, 101), best_dummy_scores, label="Dummy search")
plt.plot(range(1, 101), best_gp_scores, label="GP search")
plt.legend(loc="best")
plt.xlabel("Number of iterations.")
plt.ylabel("Mean accuracy score")
plt.ylim([0.885, 0.920])
plt.show()
|
<commit_before><commit_msg>Add example to show random search vs gp<commit_after>
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from skopt.dummy_opt import dummy_minimize
from skopt.gp_opt import gp_minimize
digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)
def compute_mean_validation_score(forest_params):
forest_params = [int(param) for param in forest_params]
max_depth, max_features, mss, msl = forest_params
params = {
'max_depth': [max_depth], 'max_features': [max_features],
'min_samples_split': [mss], 'min_samples_leaf': [msl]}
gscv = GridSearchCV(rfc, params, n_jobs=-1)
gscv.fit(X, y)
return -gscv.best_score_
# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
print("Doing a random search for the best random forest hyperparameter.")
t = time()
dummy_model = dummy_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0)
print(time() - t)
print("Best score obtained, %0.4f" % -dummy_model.fun)
print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0,
n_start=1
)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
best_dummy_scores = [-np.min(dummy_model.func_vals[:i]) for i in range(1, 101)]
best_gp_scores = [-np.min(gp_model.func_vals[:i]) for i in range(1, 101)]
plt.title("Best score obtained at every iteration")
plt.plot(range(1, 101), best_dummy_scores, label="Dummy search")
plt.plot(range(1, 101), best_gp_scores, label="GP search")
plt.legend(loc="best")
plt.xlabel("Number of iterations.")
plt.ylabel("Mean accuracy score")
plt.ylim([0.885, 0.920])
plt.show()
|
Add example to show random search vs gpfrom time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from skopt.dummy_opt import dummy_minimize
from skopt.gp_opt import gp_minimize
digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)
def compute_mean_validation_score(forest_params):
forest_params = [int(param) for param in forest_params]
max_depth, max_features, mss, msl = forest_params
params = {
'max_depth': [max_depth], 'max_features': [max_features],
'min_samples_split': [mss], 'min_samples_leaf': [msl]}
gscv = GridSearchCV(rfc, params, n_jobs=-1)
gscv.fit(X, y)
return -gscv.best_score_
# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
print("Doing a random search for the best random forest hyperparameter.")
t = time()
dummy_model = dummy_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0)
print(time() - t)
print("Best score obtained, %0.4f" % -dummy_model.fun)
print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0,
n_start=1
)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
best_dummy_scores = [-np.min(dummy_model.func_vals[:i]) for i in range(1, 101)]
best_gp_scores = [-np.min(gp_model.func_vals[:i]) for i in range(1, 101)]
plt.title("Best score obtained at every iteration")
plt.plot(range(1, 101), best_dummy_scores, label="Dummy search")
plt.plot(range(1, 101), best_gp_scores, label="GP search")
plt.legend(loc="best")
plt.xlabel("Number of iterations.")
plt.ylabel("Mean accuracy score")
plt.ylim([0.885, 0.920])
plt.show()
|
<commit_before><commit_msg>Add example to show random search vs gp<commit_after>from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from skopt.dummy_opt import dummy_minimize
from skopt.gp_opt import gp_minimize
digits = load_digits()
X, y = digits.data, digits.target
rfc = RandomForestClassifier(random_state=10)
def compute_mean_validation_score(forest_params):
forest_params = [int(param) for param in forest_params]
max_depth, max_features, mss, msl = forest_params
params = {
'max_depth': [max_depth], 'max_features': [max_features],
'min_samples_split': [mss], 'min_samples_leaf': [msl]}
gscv = GridSearchCV(rfc, params, n_jobs=-1)
gscv.fit(X, y)
return -gscv.best_score_
# Bounds inspired by
# http://scikit-learn.org/dev/auto_examples/model_selection/randomized_search.html#example-model-selection-randomized-search-py
bounds = [(3, 50), (1, 12), (1, 12), (1, 12)]
print("Doing a random search for the best random forest hyperparameter.")
t = time()
dummy_model = dummy_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0)
print(time() - t)
print("Best score obtained, %0.4f" % -dummy_model.fun)
print("Doing a gp-based search for the best random forest hyperparameter.")
t = time()
gp_model = gp_minimize(
compute_mean_validation_score, bounds, maxiter=100, random_state=0,
n_start=1
)
print(time() - t)
print("Best score obtained, %0.4f" % -gp_model.fun)
best_dummy_scores = [-np.min(dummy_model.func_vals[:i]) for i in range(1, 101)]
best_gp_scores = [-np.min(gp_model.func_vals[:i]) for i in range(1, 101)]
plt.title("Best score obtained at every iteration")
plt.plot(range(1, 101), best_dummy_scores, label="Dummy search")
plt.plot(range(1, 101), best_gp_scores, label="GP search")
plt.legend(loc="best")
plt.xlabel("Number of iterations.")
plt.ylabel("Mean accuracy score")
plt.ylim([0.885, 0.920])
plt.show()
|
|
4850f232528f797539ce444a11dc248ae7696842
|
numpy/core/tests/test_print.py
|
numpy/core/tests/test_print.py
|
import numpy as np
from numpy.testing import *
class TestPrint(TestCase):
def test_float_types(self) :
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(float(x)))
def test_complex_types(self) :
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(complex(x)))
assert_equal(str(t(x*1j)), str(complex(x*1j)))
assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
if __name__ == "__main__":
run_module_suite()
|
Add basic tests of number str() formatting.
|
Add basic tests of number str() formatting.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5396 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
Python
|
bsd-3-clause
|
illume/numpy3k,Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,illume/numpy3k,teoliphant/numpy-refactor,efiring/numpy-work,illume/numpy3k,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,efiring/numpy-work,Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,efiring/numpy-work,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,efiring/numpy-work,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC
|
Add basic tests of number str() formatting.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5396 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
import numpy as np
from numpy.testing import *
class TestPrint(TestCase):
def test_float_types(self) :
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(float(x)))
def test_complex_types(self) :
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(complex(x)))
assert_equal(str(t(x*1j)), str(complex(x*1j)))
assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add basic tests of number str() formatting.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5396 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>
|
import numpy as np
from numpy.testing import *
class TestPrint(TestCase):
def test_float_types(self) :
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(float(x)))
def test_complex_types(self) :
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(complex(x)))
assert_equal(str(t(x*1j)), str(complex(x*1j)))
assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
if __name__ == "__main__":
run_module_suite()
|
Add basic tests of number str() formatting.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5396 94b884b6-d6fd-0310-90d3-974f1d3f35e1import numpy as np
from numpy.testing import *
class TestPrint(TestCase):
def test_float_types(self) :
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(float(x)))
def test_complex_types(self) :
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(complex(x)))
assert_equal(str(t(x*1j)), str(complex(x*1j)))
assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add basic tests of number str() formatting.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5396 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>import numpy as np
from numpy.testing import *
class TestPrint(TestCase):
def test_float_types(self) :
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(float(x)))
def test_complex_types(self) :
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(t(x)), str(complex(x)))
assert_equal(str(t(x*1j)), str(complex(x*1j)))
assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
if __name__ == "__main__":
run_module_suite()
|
|
acd14b9239b60ab27ae38a95bd6a832bce1c4af4
|
pontoon/base/migrations/0021_auto_20150904_1007.py
|
pontoon/base/migrations/0021_auto_20150904_1007.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def remove_pa_fy(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for code in ['pa', 'fy']:
l = Locale.objects.get(code=code)
l.delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0020_auto_20150904_0857'),
]
operations = [
migrations.RunPython(remove_pa_fy)
]
|
Remove obsolete locales, pa and fy
|
Remove obsolete locales, pa and fy
|
Python
|
bsd-3-clause
|
mathjazz/pontoon,participedia/pontoon,m8ttyB/pontoon,vivekanand1101/pontoon,yfdyh000/pontoon,sudheesh001/pontoon,mastizada/pontoon,mozilla/pontoon,yfdyh000/pontoon,mastizada/pontoon,mathjazz/pontoon,jotes/pontoon,mozilla/pontoon,sudheesh001/pontoon,mathjazz/pontoon,jotes/pontoon,mozilla/pontoon,mastizada/pontoon,participedia/pontoon,vivekanand1101/pontoon,jotes/pontoon,vivekanand1101/pontoon,participedia/pontoon,yfdyh000/pontoon,mozilla/pontoon,mathjazz/pontoon,participedia/pontoon,mastizada/pontoon,m8ttyB/pontoon,m8ttyB/pontoon,sudheesh001/pontoon,vivekanand1101/pontoon,yfdyh000/pontoon,sudheesh001/pontoon,jotes/pontoon,mozilla/pontoon,mathjazz/pontoon,m8ttyB/pontoon
|
Remove obsolete locales, pa and fy
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def remove_pa_fy(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for code in ['pa', 'fy']:
l = Locale.objects.get(code=code)
l.delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0020_auto_20150904_0857'),
]
operations = [
migrations.RunPython(remove_pa_fy)
]
|
<commit_before><commit_msg>Remove obsolete locales, pa and fy<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def remove_pa_fy(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for code in ['pa', 'fy']:
l = Locale.objects.get(code=code)
l.delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0020_auto_20150904_0857'),
]
operations = [
migrations.RunPython(remove_pa_fy)
]
|
Remove obsolete locales, pa and fy# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def remove_pa_fy(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for code in ['pa', 'fy']:
l = Locale.objects.get(code=code)
l.delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0020_auto_20150904_0857'),
]
operations = [
migrations.RunPython(remove_pa_fy)
]
|
<commit_before><commit_msg>Remove obsolete locales, pa and fy<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def remove_pa_fy(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for code in ['pa', 'fy']:
l = Locale.objects.get(code=code)
l.delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0020_auto_20150904_0857'),
]
operations = [
migrations.RunPython(remove_pa_fy)
]
|
|
486613951ac8b1584720b23c2a03d427bdf38162
|
apps/core/tests/test_admin.py
|
apps/core/tests/test_admin.py
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from .. import admin
from .. import factories
from .. import models
class UUIDModelAdminMixinTestCase(TestCase):
def test_get_short_uuid(self):
omics_unit = factories.OmicsUnitFactory()
mixin = admin.UUIDModelAdminMixin()
self.assertEqual(mixin.get_short_uuid(omics_unit), str(omics_unit))
class OmicsUnitAdminTestCase(TestCase):
def setUp(self):
site = AdminSite()
self.omics_unit = factories.OmicsUnitFactory()
self.omics_unit_admin = admin.OmicsUnitAdmin(models.OmicsUnit, site)
def test_get_species(self):
self.assertEqual(
self.omics_unit_admin.get_species(self.omics_unit),
self.omics_unit.strain.species.name
)
def test_get_reference_identifier(self):
self.assertEqual(
self.omics_unit_admin.get_reference_identifier(self.omics_unit),
self.omics_unit.reference.identifier
)
|
Add example tests for core ModelAdmin
|
Add example tests for core ModelAdmin
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add example tests for core ModelAdmin
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from .. import admin
from .. import factories
from .. import models
class UUIDModelAdminMixinTestCase(TestCase):
def test_get_short_uuid(self):
omics_unit = factories.OmicsUnitFactory()
mixin = admin.UUIDModelAdminMixin()
self.assertEqual(mixin.get_short_uuid(omics_unit), str(omics_unit))
class OmicsUnitAdminTestCase(TestCase):
def setUp(self):
site = AdminSite()
self.omics_unit = factories.OmicsUnitFactory()
self.omics_unit_admin = admin.OmicsUnitAdmin(models.OmicsUnit, site)
def test_get_species(self):
self.assertEqual(
self.omics_unit_admin.get_species(self.omics_unit),
self.omics_unit.strain.species.name
)
def test_get_reference_identifier(self):
self.assertEqual(
self.omics_unit_admin.get_reference_identifier(self.omics_unit),
self.omics_unit.reference.identifier
)
|
<commit_before><commit_msg>Add example tests for core ModelAdmin<commit_after>
|
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from .. import admin
from .. import factories
from .. import models
class UUIDModelAdminMixinTestCase(TestCase):
def test_get_short_uuid(self):
omics_unit = factories.OmicsUnitFactory()
mixin = admin.UUIDModelAdminMixin()
self.assertEqual(mixin.get_short_uuid(omics_unit), str(omics_unit))
class OmicsUnitAdminTestCase(TestCase):
def setUp(self):
site = AdminSite()
self.omics_unit = factories.OmicsUnitFactory()
self.omics_unit_admin = admin.OmicsUnitAdmin(models.OmicsUnit, site)
def test_get_species(self):
self.assertEqual(
self.omics_unit_admin.get_species(self.omics_unit),
self.omics_unit.strain.species.name
)
def test_get_reference_identifier(self):
self.assertEqual(
self.omics_unit_admin.get_reference_identifier(self.omics_unit),
self.omics_unit.reference.identifier
)
|
Add example tests for core ModelAdminfrom django.contrib.admin.sites import AdminSite
from django.test import TestCase
from .. import admin
from .. import factories
from .. import models
class UUIDModelAdminMixinTestCase(TestCase):
def test_get_short_uuid(self):
omics_unit = factories.OmicsUnitFactory()
mixin = admin.UUIDModelAdminMixin()
self.assertEqual(mixin.get_short_uuid(omics_unit), str(omics_unit))
class OmicsUnitAdminTestCase(TestCase):
def setUp(self):
site = AdminSite()
self.omics_unit = factories.OmicsUnitFactory()
self.omics_unit_admin = admin.OmicsUnitAdmin(models.OmicsUnit, site)
def test_get_species(self):
self.assertEqual(
self.omics_unit_admin.get_species(self.omics_unit),
self.omics_unit.strain.species.name
)
def test_get_reference_identifier(self):
self.assertEqual(
self.omics_unit_admin.get_reference_identifier(self.omics_unit),
self.omics_unit.reference.identifier
)
|
<commit_before><commit_msg>Add example tests for core ModelAdmin<commit_after>from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from .. import admin
from .. import factories
from .. import models
class UUIDModelAdminMixinTestCase(TestCase):
def test_get_short_uuid(self):
omics_unit = factories.OmicsUnitFactory()
mixin = admin.UUIDModelAdminMixin()
self.assertEqual(mixin.get_short_uuid(omics_unit), str(omics_unit))
class OmicsUnitAdminTestCase(TestCase):
def setUp(self):
site = AdminSite()
self.omics_unit = factories.OmicsUnitFactory()
self.omics_unit_admin = admin.OmicsUnitAdmin(models.OmicsUnit, site)
def test_get_species(self):
self.assertEqual(
self.omics_unit_admin.get_species(self.omics_unit),
self.omics_unit.strain.species.name
)
def test_get_reference_identifier(self):
self.assertEqual(
self.omics_unit_admin.get_reference_identifier(self.omics_unit),
self.omics_unit.reference.identifier
)
|
|
400d2b85e1b50405539ae7f1e12db484f8545353
|
src/main/fsize.py
|
src/main/fsize.py
|
fp=open("qdisksync.cache")
fsize_all=0
for line in fp:
items=line.split("\t")
fsize_all+=int(items[1])
fp.close()
print("Size:"+str(fsize_all/1024.0/1024.0)+"MB")
|
Add a files total size scriptwq
|
Add a files total size scriptwq
|
Python
|
apache-2.0
|
jemygraw/qdiskbundle,jemygraw/qdiskbundle,jemygraw/qdisksync,jemygraw/qdisksync,jemygraw/qdisksync
|
Add a files total size scriptwq
|
fp=open("qdisksync.cache")
fsize_all=0
for line in fp:
items=line.split("\t")
fsize_all+=int(items[1])
fp.close()
print("Size:"+str(fsize_all/1024.0/1024.0)+"MB")
|
<commit_before><commit_msg>Add a files total size scriptwq<commit_after>
|
fp=open("qdisksync.cache")
fsize_all=0
for line in fp:
items=line.split("\t")
fsize_all+=int(items[1])
fp.close()
print("Size:"+str(fsize_all/1024.0/1024.0)+"MB")
|
Add a files total size scriptwqfp=open("qdisksync.cache")
fsize_all=0
for line in fp:
items=line.split("\t")
fsize_all+=int(items[1])
fp.close()
print("Size:"+str(fsize_all/1024.0/1024.0)+"MB")
|
<commit_before><commit_msg>Add a files total size scriptwq<commit_after>fp=open("qdisksync.cache")
fsize_all=0
for line in fp:
items=line.split("\t")
fsize_all+=int(items[1])
fp.close()
print("Size:"+str(fsize_all/1024.0/1024.0)+"MB")
|
|
23a626124adb9882e78744a81d13996fe1d2f5aa
|
tests/functional/test_utils.py
|
tests/functional/test_utils.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import stat
import socket
import tempfile
from tests import unittest
from tests import skip_if_windows
from s3transfer.utils import OSUtils
@skip_if_windows('Windows does not support UNIX special files')
class TestOSUtilsSpecialFiles(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'myfile')
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_character_device(self):
self.assertTrue(OSUtils().is_special_file('/dev/null'))
def test_fifo(self):
mode = 0o600 | stat.S_IFIFO
os.mknod(self.filename, mode)
self.assertTrue(OSUtils().is_special_file(self.filename))
def test_socket(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.filename)
self.assertTrue(OSUtils().is_special_file(self.filename))
|
Add functional test for OSUtils
|
Add functional test for OSUtils
Test involved detecting special UNIX files
|
Python
|
apache-2.0
|
boto/s3transfer
|
Add functional test for OSUtils
Test involved detecting special UNIX files
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import stat
import socket
import tempfile
from tests import unittest
from tests import skip_if_windows
from s3transfer.utils import OSUtils
@skip_if_windows('Windows does not support UNIX special files')
class TestOSUtilsSpecialFiles(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'myfile')
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_character_device(self):
self.assertTrue(OSUtils().is_special_file('/dev/null'))
def test_fifo(self):
mode = 0o600 | stat.S_IFIFO
os.mknod(self.filename, mode)
self.assertTrue(OSUtils().is_special_file(self.filename))
def test_socket(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.filename)
self.assertTrue(OSUtils().is_special_file(self.filename))
|
<commit_before><commit_msg>Add functional test for OSUtils
Test involved detecting special UNIX files<commit_after>
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import stat
import socket
import tempfile
from tests import unittest
from tests import skip_if_windows
from s3transfer.utils import OSUtils
@skip_if_windows('Windows does not support UNIX special files')
class TestOSUtilsSpecialFiles(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'myfile')
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_character_device(self):
self.assertTrue(OSUtils().is_special_file('/dev/null'))
def test_fifo(self):
mode = 0o600 | stat.S_IFIFO
os.mknod(self.filename, mode)
self.assertTrue(OSUtils().is_special_file(self.filename))
def test_socket(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.filename)
self.assertTrue(OSUtils().is_special_file(self.filename))
|
Add functional test for OSUtils
Test involved detecting special UNIX files# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import stat
import socket
import tempfile
from tests import unittest
from tests import skip_if_windows
from s3transfer.utils import OSUtils
@skip_if_windows('Windows does not support UNIX special files')
class TestOSUtilsSpecialFiles(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'myfile')
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_character_device(self):
self.assertTrue(OSUtils().is_special_file('/dev/null'))
def test_fifo(self):
mode = 0o600 | stat.S_IFIFO
os.mknod(self.filename, mode)
self.assertTrue(OSUtils().is_special_file(self.filename))
def test_socket(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.filename)
self.assertTrue(OSUtils().is_special_file(self.filename))
|
<commit_before><commit_msg>Add functional test for OSUtils
Test involved detecting special UNIX files<commit_after># Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import stat
import socket
import tempfile
from tests import unittest
from tests import skip_if_windows
from s3transfer.utils import OSUtils
@skip_if_windows('Windows does not support UNIX special files')
class TestOSUtilsSpecialFiles(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'myfile')
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_character_device(self):
self.assertTrue(OSUtils().is_special_file('/dev/null'))
def test_fifo(self):
mode = 0o600 | stat.S_IFIFO
os.mknod(self.filename, mode)
self.assertTrue(OSUtils().is_special_file(self.filename))
def test_socket(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.filename)
self.assertTrue(OSUtils().is_special_file(self.filename))
|
|
e45cbf1dcaf6ba6e142e7674d3a869f1492d43c7
|
tests/unit/utils/cloud_test.py
|
tests/unit/utils/cloud_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.cloud_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt-cloud utilities module
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cloud
class CloudUtilsTestCase(TestCase):
def test_ssh_password_regex(self):
'''Test matching ssh password patterns'''
for pattern in ('Password for root@127.0.0.1:',
'password for root@127.0.0.1:',
'root@127.0.0.1 Password:',
'root@127.0.0.1 password:',
' Password:',
' password:'):
self.assertNotEqual(
cloud.SSH_PASSWORD_PROMP_RE.match(pattern), None
)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudUtilsTestCase, needs_daemon=False)
|
Add unit test for the ssh password regex matching
|
Add unit test for the ssh password regex matching
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test for the ssh password regex matching
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.cloud_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt-cloud utilities module
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cloud
class CloudUtilsTestCase(TestCase):
def test_ssh_password_regex(self):
'''Test matching ssh password patterns'''
for pattern in ('Password for root@127.0.0.1:',
'password for root@127.0.0.1:',
'root@127.0.0.1 Password:',
'root@127.0.0.1 password:',
' Password:',
' password:'):
self.assertNotEqual(
cloud.SSH_PASSWORD_PROMP_RE.match(pattern), None
)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudUtilsTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for the ssh password regex matching<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.cloud_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt-cloud utilities module
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cloud
class CloudUtilsTestCase(TestCase):
def test_ssh_password_regex(self):
'''Test matching ssh password patterns'''
for pattern in ('Password for root@127.0.0.1:',
'password for root@127.0.0.1:',
'root@127.0.0.1 Password:',
'root@127.0.0.1 password:',
' Password:',
' password:'):
self.assertNotEqual(
cloud.SSH_PASSWORD_PROMP_RE.match(pattern), None
)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudUtilsTestCase, needs_daemon=False)
|
Add unit test for the ssh password regex matching# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.cloud_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt-cloud utilities module
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cloud
class CloudUtilsTestCase(TestCase):
def test_ssh_password_regex(self):
'''Test matching ssh password patterns'''
for pattern in ('Password for root@127.0.0.1:',
'password for root@127.0.0.1:',
'root@127.0.0.1 Password:',
'root@127.0.0.1 password:',
' Password:',
' password:'):
self.assertNotEqual(
cloud.SSH_PASSWORD_PROMP_RE.match(pattern), None
)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudUtilsTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for the ssh password regex matching<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.cloud_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt-cloud utilities module
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cloud
class CloudUtilsTestCase(TestCase):
def test_ssh_password_regex(self):
'''Test matching ssh password patterns'''
for pattern in ('Password for root@127.0.0.1:',
'password for root@127.0.0.1:',
'root@127.0.0.1 Password:',
'root@127.0.0.1 password:',
' Password:',
' password:'):
self.assertNotEqual(
cloud.SSH_PASSWORD_PROMP_RE.match(pattern), None
)
if __name__ == '__main__':
from integration import run_tests
run_tests(CloudUtilsTestCase, needs_daemon=False)
|
|
e4a50aece7071091f5801c3e3239382b4b70256c
|
tools/state_updates.py
|
tools/state_updates.py
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def update_accu_one(U, X):
U = 1*U + l*(u - 1*X)
X = (1*U)
return (U, X)
def update_accu_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_1(U, X):
U = (1. - l) * U + l * (u - 1. * X)
X = (1. * U)
return (U, X)
updates = [update_accu_one,
update_accu_both,
update_interp_both,
update_interp_1]
x = range(30)
for j in range(len(updates)):
plt.subplot(len(updates), 1, j)
l = 0.3
U = 20
X = 0
u = 10
y1 = []
y2 = []
for i in x:
U, X = updates[j](U, X)
print "U: %.1f, X: %.1f, u: %.2f" % (U, X, u)
y1.append(U)
y2.append(X)
plt.plot(x, y1)
plt.plot(x, y2)
plt.show()
|
Add a simple script for plotting iteration behaviors
|
Add a simple script for plotting iteration behaviors
|
Python
|
mit
|
arasmus/eca
|
Add a simple script for plotting iteration behaviors
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def update_accu_one(U, X):
U = 1*U + l*(u - 1*X)
X = (1*U)
return (U, X)
def update_accu_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_1(U, X):
U = (1. - l) * U + l * (u - 1. * X)
X = (1. * U)
return (U, X)
updates = [update_accu_one,
update_accu_both,
update_interp_both,
update_interp_1]
x = range(30)
for j in range(len(updates)):
plt.subplot(len(updates), 1, j)
l = 0.3
U = 20
X = 0
u = 10
y1 = []
y2 = []
for i in x:
U, X = updates[j](U, X)
print "U: %.1f, X: %.1f, u: %.2f" % (U, X, u)
y1.append(U)
y2.append(X)
plt.plot(x, y1)
plt.plot(x, y2)
plt.show()
|
<commit_before><commit_msg>Add a simple script for plotting iteration behaviors<commit_after>
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def update_accu_one(U, X):
U = 1*U + l*(u - 1*X)
X = (1*U)
return (U, X)
def update_accu_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_1(U, X):
U = (1. - l) * U + l * (u - 1. * X)
X = (1. * U)
return (U, X)
updates = [update_accu_one,
update_accu_both,
update_interp_both,
update_interp_1]
x = range(30)
for j in range(len(updates)):
plt.subplot(len(updates), 1, j)
l = 0.3
U = 20
X = 0
u = 10
y1 = []
y2 = []
for i in x:
U, X = updates[j](U, X)
print "U: %.1f, X: %.1f, u: %.2f" % (U, X, u)
y1.append(U)
y2.append(X)
plt.plot(x, y1)
plt.plot(x, y2)
plt.show()
|
Add a simple script for plotting iteration behaviors#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def update_accu_one(U, X):
U = 1*U + l*(u - 1*X)
X = (1*U)
return (U, X)
def update_accu_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_1(U, X):
U = (1. - l) * U + l * (u - 1. * X)
X = (1. * U)
return (U, X)
updates = [update_accu_one,
update_accu_both,
update_interp_both,
update_interp_1]
x = range(30)
for j in range(len(updates)):
plt.subplot(len(updates), 1, j)
l = 0.3
U = 20
X = 0
u = 10
y1 = []
y2 = []
for i in x:
U, X = updates[j](U, X)
print "U: %.1f, X: %.1f, u: %.2f" % (U, X, u)
y1.append(U)
y2.append(X)
plt.plot(x, y1)
plt.plot(x, y2)
plt.show()
|
<commit_before><commit_msg>Add a simple script for plotting iteration behaviors<commit_after>#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def update_accu_one(U, X):
U = 1*U + l*(u - 1*X)
X = (1*U)
return (U, X)
def update_accu_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_both(U, X):
U = (1-l)*U + l*(u - 1*X)
X = (1-l)*X + l*(1*U)
return (U, X)
def update_interp_1(U, X):
U = (1. - l) * U + l * (u - 1. * X)
X = (1. * U)
return (U, X)
updates = [update_accu_one,
update_accu_both,
update_interp_both,
update_interp_1]
x = range(30)
for j in range(len(updates)):
plt.subplot(len(updates), 1, j)
l = 0.3
U = 20
X = 0
u = 10
y1 = []
y2 = []
for i in x:
U, X = updates[j](U, X)
print "U: %.1f, X: %.1f, u: %.2f" % (U, X, u)
y1.append(U)
y2.append(X)
plt.plot(x, y1)
plt.plot(x, y2)
plt.show()
|
|
750f9c3d243ad024d54b6f4903657ab35aa6feb9
|
learntools/computer_vision/ex6.py
|
learntools/computer_vision/ex6.py
|
from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
Add exercise 6 checking code
|
Add exercise 6 checking code
|
Python
|
apache-2.0
|
Kaggle/learntools,Kaggle/learntools
|
Add exercise 6 checking code
|
from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
<commit_before><commit_msg>Add exercise 6 checking code<commit_after>
|
from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
Add exercise 6 checking codefrom learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
<commit_before><commit_msg>Add exercise 6 checking code<commit_after>from learntools.core import *
import tensorflow as tf
# Free
class Q1(CodingProblem):
_solution = ""
_hint = ""
def check(self):
pass
class Q2A(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = """It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow. Rotations especially could be taken through the full range, since there's no real concept of "up or down" for pictures taken straight overhead."""
class Q2B(ThoughtExperiment):
_hint = "Remember that whatever transformation you apply needs at least to keep the classes distinct, but otherwise should more or less keep the appearance of the images the same. If you rotated a picture of a forest, would it still look like a forest?"
_solution = "It seems to this author that any of the transformations from the first problem might be appropriate, provided the parameter values were reasonable. A picture of a forest that had been rotated or shifted or stretched would still look like a forest, and contrast adjustments could perhaps make up for differences in light and shadow."
Q2 = MultipartProblem(Q2A, Q2B)
class Q3(CodingProblem):
pass
qvars = bind_exercises(globals(), [
Q1, Q2, Q3,
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
|
b4ab495f5addc50b885d07150de9ce3d6309c1bd
|
breadcrumbs/default_config.py
|
breadcrumbs/default_config.py
|
# Copy this file to config.py and fill in your own values
SECRET_KEY = 'Genjric Noguchen'
DEBUG = True
FACEBOOK_APP_ID = '1234567890'
FACEBOOK_APP_SECRET = '10abef0bc0'
|
Add a default configuration with instructions
|
Add a default configuration with instructions
|
Python
|
isc
|
breadcrumbs-app/breadcrumbs,breadcrumbs-app/breadcrumbs,breadcrumbs-app/breadcrumbs
|
Add a default configuration with instructions
|
# Copy this file to config.py and fill in your own values
SECRET_KEY = 'Genjric Noguchen'
DEBUG = True
FACEBOOK_APP_ID = '1234567890'
FACEBOOK_APP_SECRET = '10abef0bc0'
|
<commit_before><commit_msg>Add a default configuration with instructions<commit_after>
|
# Copy this file to config.py and fill in your own values
SECRET_KEY = 'Genjric Noguchen'
DEBUG = True
FACEBOOK_APP_ID = '1234567890'
FACEBOOK_APP_SECRET = '10abef0bc0'
|
Add a default configuration with instructions# Copy this file to config.py and fill in your own values
SECRET_KEY = 'Genjric Noguchen'
DEBUG = True
FACEBOOK_APP_ID = '1234567890'
FACEBOOK_APP_SECRET = '10abef0bc0'
|
<commit_before><commit_msg>Add a default configuration with instructions<commit_after># Copy this file to config.py and fill in your own values
SECRET_KEY = 'Genjric Noguchen'
DEBUG = True
FACEBOOK_APP_ID = '1234567890'
FACEBOOK_APP_SECRET = '10abef0bc0'
|
|
86c41ea896031cbcdf3894835f2bd5c8a91de079
|
test/rigid/cubes/create_restart.py
|
test/rigid/cubes/create_restart.py
|
from hoomd_script import *
init.read_xml('cubes.xml')
lj = pair.lj(r_cut=2**(1.0/6.0));
lj.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj.set_params(mode='shift')
nlist.reset_exclusions(exclusions=['body'])
integrate.mode_standard(dt=0.005)
bdnvt = integrate.bdnvt_rigid(group=group.all(), T=1.2)
dcd = dump.dcd(filename='nve.dcd', period=100, overwrite=True)
log = analyze.log(filename="nve_basic.log", period=100, overwrite=True, quantities=['potential_energy', 'kinetic_energy'])
run(50000)
bdnvt.disable()
nve = integrate.nve_rigid(group=group.all())
run(50000)
xml = dump.xml()
xml.set_params(all=True)
xml.write(filename="continue.xml")
dcd = dump.dcd(filename='baseline.dcd', period=100)
run(50000)
|
Test script to create an xml restart file
|
Test script to create an xml restart file
git-svn-id: 5cda8128732f5b679951344c24659131be7d2dfc@2733 fa922fa7-2fde-0310-acd8-f43f465a7996
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Test script to create an xml restart file
git-svn-id: 5cda8128732f5b679951344c24659131be7d2dfc@2733 fa922fa7-2fde-0310-acd8-f43f465a7996
|
from hoomd_script import *
init.read_xml('cubes.xml')
lj = pair.lj(r_cut=2**(1.0/6.0));
lj.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj.set_params(mode='shift')
nlist.reset_exclusions(exclusions=['body'])
integrate.mode_standard(dt=0.005)
bdnvt = integrate.bdnvt_rigid(group=group.all(), T=1.2)
dcd = dump.dcd(filename='nve.dcd', period=100, overwrite=True)
log = analyze.log(filename="nve_basic.log", period=100, overwrite=True, quantities=['potential_energy', 'kinetic_energy'])
run(50000)
bdnvt.disable()
nve = integrate.nve_rigid(group=group.all())
run(50000)
xml = dump.xml()
xml.set_params(all=True)
xml.write(filename="continue.xml")
dcd = dump.dcd(filename='baseline.dcd', period=100)
run(50000)
|
<commit_before><commit_msg>Test script to create an xml restart file
git-svn-id: 5cda8128732f5b679951344c24659131be7d2dfc@2733 fa922fa7-2fde-0310-acd8-f43f465a7996<commit_after>
|
from hoomd_script import *
init.read_xml('cubes.xml')
lj = pair.lj(r_cut=2**(1.0/6.0));
lj.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj.set_params(mode='shift')
nlist.reset_exclusions(exclusions=['body'])
integrate.mode_standard(dt=0.005)
bdnvt = integrate.bdnvt_rigid(group=group.all(), T=1.2)
dcd = dump.dcd(filename='nve.dcd', period=100, overwrite=True)
log = analyze.log(filename="nve_basic.log", period=100, overwrite=True, quantities=['potential_energy', 'kinetic_energy'])
run(50000)
bdnvt.disable()
nve = integrate.nve_rigid(group=group.all())
run(50000)
xml = dump.xml()
xml.set_params(all=True)
xml.write(filename="continue.xml")
dcd = dump.dcd(filename='baseline.dcd', period=100)
run(50000)
|
Test script to create an xml restart file
git-svn-id: 5cda8128732f5b679951344c24659131be7d2dfc@2733 fa922fa7-2fde-0310-acd8-f43f465a7996from hoomd_script import *
init.read_xml('cubes.xml')
lj = pair.lj(r_cut=2**(1.0/6.0));
lj.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj.set_params(mode='shift')
nlist.reset_exclusions(exclusions=['body'])
integrate.mode_standard(dt=0.005)
bdnvt = integrate.bdnvt_rigid(group=group.all(), T=1.2)
dcd = dump.dcd(filename='nve.dcd', period=100, overwrite=True)
log = analyze.log(filename="nve_basic.log", period=100, overwrite=True, quantities=['potential_energy', 'kinetic_energy'])
run(50000)
bdnvt.disable()
nve = integrate.nve_rigid(group=group.all())
run(50000)
xml = dump.xml()
xml.set_params(all=True)
xml.write(filename="continue.xml")
dcd = dump.dcd(filename='baseline.dcd', period=100)
run(50000)
|
<commit_before><commit_msg>Test script to create an xml restart file
git-svn-id: 5cda8128732f5b679951344c24659131be7d2dfc@2733 fa922fa7-2fde-0310-acd8-f43f465a7996<commit_after>from hoomd_script import *
init.read_xml('cubes.xml')
lj = pair.lj(r_cut=2**(1.0/6.0));
lj.pair_coeff.set('A', 'A', sigma=1.0, epsilon=1.0)
lj.set_params(mode='shift')
nlist.reset_exclusions(exclusions=['body'])
integrate.mode_standard(dt=0.005)
bdnvt = integrate.bdnvt_rigid(group=group.all(), T=1.2)
dcd = dump.dcd(filename='nve.dcd', period=100, overwrite=True)
log = analyze.log(filename="nve_basic.log", period=100, overwrite=True, quantities=['potential_energy', 'kinetic_energy'])
run(50000)
bdnvt.disable()
nve = integrate.nve_rigid(group=group.all())
run(50000)
xml = dump.xml()
xml.set_params(all=True)
xml.write(filename="continue.xml")
dcd = dump.dcd(filename='baseline.dcd', period=100)
run(50000)
|
|
ac5696eb25063a753ac608c5765a5f7b804e95a2
|
locations/spiders/merrilllynch.py
|
locations/spiders/merrilllynch.py
|
# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
import json
class MerrillLynchSpider(scrapy.Spider):
name = 'merrilllynch'
allowed_domains = ['ml.com']
start_urls = ('https://fa.ml.com/',)
def parse_branch(self, response):
data = json.loads(response.body_as_unicode())
for location in data["Results"]:
properties = {
'ref': location["UniqueId"],
'name': location["Company"],
'addr_full': location["Address1"].strip(),
'city': location["City"],
'state': location["Region"],
'country': location["Country"],
'postcode': location["PostalCode"],
'lat': float(location["GeoLat"]),
'lon': float(location["GeoLon"]),
'website': location["XmlData"]["parameters"].get("Url"),
'extras': {
'unit': location.get("Address2") or None
}
}
yield GeojsonPointItem(**properties)
def parse(self, response):
states = response.xpath('//section[@class="state-view"]//li/a/@data-state-abbrev').extract()
for state in states:
url = 'https://fa.ml.com/locator/api/InternalSearch'
payload = {
"Locator":"MER-WM-Offices",
"Region":state,
"Company":None,
"ProfileTypes":"Branch",
"DoFuzzyNameSearch":0,
"SearchRadius":100
}
yield scrapy.Request(url,
method='POST',
body=json.dumps(payload),
headers={'Content-Type':'application/json'},
callback=self.parse_branch)
|
Add spider for Merrill Lynch offices
|
Add spider for Merrill Lynch offices
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Merrill Lynch offices
|
# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
import json
class MerrillLynchSpider(scrapy.Spider):
name = 'merrilllynch'
allowed_domains = ['ml.com']
start_urls = ('https://fa.ml.com/',)
def parse_branch(self, response):
data = json.loads(response.body_as_unicode())
for location in data["Results"]:
properties = {
'ref': location["UniqueId"],
'name': location["Company"],
'addr_full': location["Address1"].strip(),
'city': location["City"],
'state': location["Region"],
'country': location["Country"],
'postcode': location["PostalCode"],
'lat': float(location["GeoLat"]),
'lon': float(location["GeoLon"]),
'website': location["XmlData"]["parameters"].get("Url"),
'extras': {
'unit': location.get("Address2") or None
}
}
yield GeojsonPointItem(**properties)
def parse(self, response):
states = response.xpath('//section[@class="state-view"]//li/a/@data-state-abbrev').extract()
for state in states:
url = 'https://fa.ml.com/locator/api/InternalSearch'
payload = {
"Locator":"MER-WM-Offices",
"Region":state,
"Company":None,
"ProfileTypes":"Branch",
"DoFuzzyNameSearch":0,
"SearchRadius":100
}
yield scrapy.Request(url,
method='POST',
body=json.dumps(payload),
headers={'Content-Type':'application/json'},
callback=self.parse_branch)
|
<commit_before><commit_msg>Add spider for Merrill Lynch offices<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
import json
class MerrillLynchSpider(scrapy.Spider):
name = 'merrilllynch'
allowed_domains = ['ml.com']
start_urls = ('https://fa.ml.com/',)
def parse_branch(self, response):
data = json.loads(response.body_as_unicode())
for location in data["Results"]:
properties = {
'ref': location["UniqueId"],
'name': location["Company"],
'addr_full': location["Address1"].strip(),
'city': location["City"],
'state': location["Region"],
'country': location["Country"],
'postcode': location["PostalCode"],
'lat': float(location["GeoLat"]),
'lon': float(location["GeoLon"]),
'website': location["XmlData"]["parameters"].get("Url"),
'extras': {
'unit': location.get("Address2") or None
}
}
yield GeojsonPointItem(**properties)
def parse(self, response):
states = response.xpath('//section[@class="state-view"]//li/a/@data-state-abbrev').extract()
for state in states:
url = 'https://fa.ml.com/locator/api/InternalSearch'
payload = {
"Locator":"MER-WM-Offices",
"Region":state,
"Company":None,
"ProfileTypes":"Branch",
"DoFuzzyNameSearch":0,
"SearchRadius":100
}
yield scrapy.Request(url,
method='POST',
body=json.dumps(payload),
headers={'Content-Type':'application/json'},
callback=self.parse_branch)
|
Add spider for Merrill Lynch offices# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
import json
class MerrillLynchSpider(scrapy.Spider):
name = 'merrilllynch'
allowed_domains = ['ml.com']
start_urls = ('https://fa.ml.com/',)
def parse_branch(self, response):
data = json.loads(response.body_as_unicode())
for location in data["Results"]:
properties = {
'ref': location["UniqueId"],
'name': location["Company"],
'addr_full': location["Address1"].strip(),
'city': location["City"],
'state': location["Region"],
'country': location["Country"],
'postcode': location["PostalCode"],
'lat': float(location["GeoLat"]),
'lon': float(location["GeoLon"]),
'website': location["XmlData"]["parameters"].get("Url"),
'extras': {
'unit': location.get("Address2") or None
}
}
yield GeojsonPointItem(**properties)
def parse(self, response):
states = response.xpath('//section[@class="state-view"]//li/a/@data-state-abbrev').extract()
for state in states:
url = 'https://fa.ml.com/locator/api/InternalSearch'
payload = {
"Locator":"MER-WM-Offices",
"Region":state,
"Company":None,
"ProfileTypes":"Branch",
"DoFuzzyNameSearch":0,
"SearchRadius":100
}
yield scrapy.Request(url,
method='POST',
body=json.dumps(payload),
headers={'Content-Type':'application/json'},
callback=self.parse_branch)
|
<commit_before><commit_msg>Add spider for Merrill Lynch offices<commit_after># -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
import json
class MerrillLynchSpider(scrapy.Spider):
name = 'merrilllynch'
allowed_domains = ['ml.com']
start_urls = ('https://fa.ml.com/',)
def parse_branch(self, response):
data = json.loads(response.body_as_unicode())
for location in data["Results"]:
properties = {
'ref': location["UniqueId"],
'name': location["Company"],
'addr_full': location["Address1"].strip(),
'city': location["City"],
'state': location["Region"],
'country': location["Country"],
'postcode': location["PostalCode"],
'lat': float(location["GeoLat"]),
'lon': float(location["GeoLon"]),
'website': location["XmlData"]["parameters"].get("Url"),
'extras': {
'unit': location.get("Address2") or None
}
}
yield GeojsonPointItem(**properties)
def parse(self, response):
states = response.xpath('//section[@class="state-view"]//li/a/@data-state-abbrev').extract()
for state in states:
url = 'https://fa.ml.com/locator/api/InternalSearch'
payload = {
"Locator":"MER-WM-Offices",
"Region":state,
"Company":None,
"ProfileTypes":"Branch",
"DoFuzzyNameSearch":0,
"SearchRadius":100
}
yield scrapy.Request(url,
method='POST',
body=json.dumps(payload),
headers={'Content-Type':'application/json'},
callback=self.parse_branch)
|
|
50618c1f5cce31612d1278e7be0bfd14a670736a
|
facilities/migrations/set_facility_code_sequence_min_value.py
|
facilities/migrations/set_facility_code_sequence_min_value.py
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from facilities.models import Facility
def set_min_code_value(apps, schema_editor):
from django.db import connection
cursor = connection.cursor()
sql = """
ALTER SEQUENCE facilities_facility_code_seq restart 100000 start 100000 minvalue 100000
"""
cursor = cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_initial'),
]
operations = [
migrations.RunPython(set_min_code_value),
]
|
Add back facility code sequence migrations
|
Add back facility code sequence migrations
|
Python
|
mit
|
MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api
|
Add back facility code sequence migrations
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from facilities.models import Facility
def set_min_code_value(apps, schema_editor):
from django.db import connection
cursor = connection.cursor()
sql = """
ALTER SEQUENCE facilities_facility_code_seq restart 100000 start 100000 minvalue 100000
"""
cursor = cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_initial'),
]
operations = [
migrations.RunPython(set_min_code_value),
]
|
<commit_before><commit_msg>Add back facility code sequence migrations<commit_after>
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from facilities.models import Facility
def set_min_code_value(apps, schema_editor):
from django.db import connection
cursor = connection.cursor()
sql = """
ALTER SEQUENCE facilities_facility_code_seq restart 100000 start 100000 minvalue 100000
"""
cursor = cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_initial'),
]
operations = [
migrations.RunPython(set_min_code_value),
]
|
Add back facility code sequence migrations# -*- coding: utf-8 -*-
from django.db import models, migrations
from facilities.models import Facility
def set_min_code_value(apps, schema_editor):
from django.db import connection
cursor = connection.cursor()
sql = """
ALTER SEQUENCE facilities_facility_code_seq restart 100000 start 100000 minvalue 100000
"""
cursor = cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_initial'),
]
operations = [
migrations.RunPython(set_min_code_value),
]
|
<commit_before><commit_msg>Add back facility code sequence migrations<commit_after># -*- coding: utf-8 -*-
from django.db import models, migrations
from facilities.models import Facility
def set_min_code_value(apps, schema_editor):
from django.db import connection
cursor = connection.cursor()
sql = """
ALTER SEQUENCE facilities_facility_code_seq restart 100000 start 100000 minvalue 100000
"""
cursor = cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_initial'),
]
operations = [
migrations.RunPython(set_min_code_value),
]
|
|
9a5c959bc8d83b0b5c93666d66111b3337e937fc
|
orchestra/test/test_integration.py
|
orchestra/test/test_integration.py
|
from .. import monkey; monkey.patch_all()
from nose.tools import eq_ as eq
import os
import nose
from .. import connection, run
from .util import assert_raises
def setup():
try:
host = os.environ['ORCHESTRA_TEST_HOST']
except KeyError:
raise nose.SkipTest(
'To run integration tests, set environment '
+ 'variable ORCHESTRA_TEST_HOST to user@host to use.',
)
global HOST
HOST = host
def test_crash():
ssh = connection.connect(HOST)
e = assert_raises(
run.CommandCrashedError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $$'],
)
eq(e.command, "sh -c 'kill -ABRT $$'")
eq(str(e), "Command crashed: \"sh -c 'kill -ABRT $$'\"")
def test_lost():
ssh = connection.connect(HOST)
e = assert_raises(
run.ConnectionLostError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $PPID'],
)
eq(e.command, "sh -c 'kill -ABRT $PPID'")
eq(str(e), "SSH connection was lost: \"sh -c 'kill -ABRT $PPID'\"")
|
Add integration tests for signals and connection loss.
|
Add integration tests for signals and connection loss.
|
Python
|
mit
|
ivotron/teuthology,robbat2/teuthology,dreamhost/teuthology,SUSE/teuthology,yghannam/teuthology,ktdreyer/teuthology,tchaikov/teuthology,caibo2014/teuthology,ktdreyer/teuthology,dmick/teuthology,dreamhost/teuthology,dmick/teuthology,t-miyamae/teuthology,SUSE/teuthology,tchaikov/teuthology,ceph/teuthology,robbat2/teuthology,zhouyuan/teuthology,ceph/teuthology,ivotron/teuthology,zhouyuan/teuthology,yghannam/teuthology,t-miyamae/teuthology,SUSE/teuthology,caibo2014/teuthology,michaelsevilla/teuthology,dmick/teuthology,michaelsevilla/teuthology
|
Add integration tests for signals and connection loss.
|
from .. import monkey; monkey.patch_all()
from nose.tools import eq_ as eq
import os
import nose
from .. import connection, run
from .util import assert_raises
def setup():
try:
host = os.environ['ORCHESTRA_TEST_HOST']
except KeyError:
raise nose.SkipTest(
'To run integration tests, set environment '
+ 'variable ORCHESTRA_TEST_HOST to user@host to use.',
)
global HOST
HOST = host
def test_crash():
ssh = connection.connect(HOST)
e = assert_raises(
run.CommandCrashedError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $$'],
)
eq(e.command, "sh -c 'kill -ABRT $$'")
eq(str(e), "Command crashed: \"sh -c 'kill -ABRT $$'\"")
def test_lost():
ssh = connection.connect(HOST)
e = assert_raises(
run.ConnectionLostError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $PPID'],
)
eq(e.command, "sh -c 'kill -ABRT $PPID'")
eq(str(e), "SSH connection was lost: \"sh -c 'kill -ABRT $PPID'\"")
|
<commit_before><commit_msg>Add integration tests for signals and connection loss.<commit_after>
|
from .. import monkey; monkey.patch_all()
from nose.tools import eq_ as eq
import os
import nose
from .. import connection, run
from .util import assert_raises
def setup():
try:
host = os.environ['ORCHESTRA_TEST_HOST']
except KeyError:
raise nose.SkipTest(
'To run integration tests, set environment '
+ 'variable ORCHESTRA_TEST_HOST to user@host to use.',
)
global HOST
HOST = host
def test_crash():
ssh = connection.connect(HOST)
e = assert_raises(
run.CommandCrashedError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $$'],
)
eq(e.command, "sh -c 'kill -ABRT $$'")
eq(str(e), "Command crashed: \"sh -c 'kill -ABRT $$'\"")
def test_lost():
ssh = connection.connect(HOST)
e = assert_raises(
run.ConnectionLostError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $PPID'],
)
eq(e.command, "sh -c 'kill -ABRT $PPID'")
eq(str(e), "SSH connection was lost: \"sh -c 'kill -ABRT $PPID'\"")
|
Add integration tests for signals and connection loss.from .. import monkey; monkey.patch_all()
from nose.tools import eq_ as eq
import os
import nose
from .. import connection, run
from .util import assert_raises
def setup():
try:
host = os.environ['ORCHESTRA_TEST_HOST']
except KeyError:
raise nose.SkipTest(
'To run integration tests, set environment '
+ 'variable ORCHESTRA_TEST_HOST to user@host to use.',
)
global HOST
HOST = host
def test_crash():
ssh = connection.connect(HOST)
e = assert_raises(
run.CommandCrashedError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $$'],
)
eq(e.command, "sh -c 'kill -ABRT $$'")
eq(str(e), "Command crashed: \"sh -c 'kill -ABRT $$'\"")
def test_lost():
ssh = connection.connect(HOST)
e = assert_raises(
run.ConnectionLostError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $PPID'],
)
eq(e.command, "sh -c 'kill -ABRT $PPID'")
eq(str(e), "SSH connection was lost: \"sh -c 'kill -ABRT $PPID'\"")
|
<commit_before><commit_msg>Add integration tests for signals and connection loss.<commit_after>from .. import monkey; monkey.patch_all()
from nose.tools import eq_ as eq
import os
import nose
from .. import connection, run
from .util import assert_raises
def setup():
try:
host = os.environ['ORCHESTRA_TEST_HOST']
except KeyError:
raise nose.SkipTest(
'To run integration tests, set environment '
+ 'variable ORCHESTRA_TEST_HOST to user@host to use.',
)
global HOST
HOST = host
def test_crash():
ssh = connection.connect(HOST)
e = assert_raises(
run.CommandCrashedError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $$'],
)
eq(e.command, "sh -c 'kill -ABRT $$'")
eq(str(e), "Command crashed: \"sh -c 'kill -ABRT $$'\"")
def test_lost():
ssh = connection.connect(HOST)
e = assert_raises(
run.ConnectionLostError,
run.run,
client=ssh,
args=['sh', '-c', 'kill -ABRT $PPID'],
)
eq(e.command, "sh -c 'kill -ABRT $PPID'")
eq(str(e), "SSH connection was lost: \"sh -c 'kill -ABRT $PPID'\"")
|
|
877406927bc4754daeab10b9bfb0f7879e8f6092
|
perftest.py
|
perftest.py
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool.
|
Add a very simple performance testing tool.
|
Python
|
bsd-3-clause
|
infinit/couchdb-python,djc/couchdb-python,djc/couchdb-python,Roger/couchdb-python
|
Add a very simple performance testing tool.
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool."""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.