commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb5f6bf999b2cd8b674bc2c89f74f1413fc8ee1e
|
command_line_tic_tac_toe.py
|
command_line_tic_tac_toe.py
|
#!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
|
Add command line interface to play
|
Add command line interface to play
|
Python
|
mit
|
rickerbh/tictactoe_py
|
Add command line interface to play
|
#!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
|
<commit_before><commit_msg>Add command line interface to play<commit_after>
|
#!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
|
Add command line interface to play#!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
|
<commit_before><commit_msg>Add command line interface to play<commit_after>#!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
|
|
b1ef133904540b7f49e22ac52a0f844963be829e
|
nose2/tests/functional/test_discovery_loader.py
|
nose2/tests/functional/test_discovery_loader.py
|
from nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
|
Add basic test for discovery loader
|
Add basic test for discovery loader
|
Python
|
bsd-2-clause
|
ojengwa/nose2,ezigman/nose2,little-dude/nose2,ojengwa/nose2,leth/nose2,ptthiem/nose2,ptthiem/nose2,little-dude/nose2,leth/nose2,ezigman/nose2
|
Add basic test for discovery loader
|
from nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
|
<commit_before><commit_msg>Add basic test for discovery loader<commit_after>
|
from nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
|
Add basic test for discovery loaderfrom nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
|
<commit_before><commit_msg>Add basic test for discovery loader<commit_after>from nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
|
|
681cc0a4160373fe82de59946b52e0e21611af84
|
linkLister.py
|
linkLister.py
|
import requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
|
Print out all links on a page
|
Print out all links on a page
|
Python
|
mit
|
NilanjanaLodh/PyScripts,NilanjanaLodh/PyScripts
|
Print out all links on a page
|
import requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
|
<commit_before><commit_msg>Print out all links on a page<commit_after>
|
import requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
|
Print out all links on a pageimport requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
|
<commit_before><commit_msg>Print out all links on a page<commit_after>import requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
|
|
27622185e04bb652284597783287262e23bafa7d
|
plenum/test/node_request/test_apply_stashed_partially_ordered.py
|
plenum/test/node_request/test_apply_stashed_partially_ordered.py
|
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
|
Add minimal test case (failing)
|
INDY-1405: Add minimal test case (failing)
Signed-off-by: Sergey Khoroshavin <b770466c7a06c5fe47531d5f0e31684f1131354d@dsr-corporation.com>
|
Python
|
apache-2.0
|
evernym/zeno,evernym/plenum
|
INDY-1405: Add minimal test case (failing)
Signed-off-by: Sergey Khoroshavin <b770466c7a06c5fe47531d5f0e31684f1131354d@dsr-corporation.com>
|
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
|
<commit_before><commit_msg>INDY-1405: Add minimal test case (failing)
Signed-off-by: Sergey Khoroshavin <b770466c7a06c5fe47531d5f0e31684f1131354d@dsr-corporation.com><commit_after>
|
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
|
INDY-1405: Add minimal test case (failing)
Signed-off-by: Sergey Khoroshavin <b770466c7a06c5fe47531d5f0e31684f1131354d@dsr-corporation.com>import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
|
<commit_before><commit_msg>INDY-1405: Add minimal test case (failing)
Signed-off-by: Sergey Khoroshavin <b770466c7a06c5fe47531d5f0e31684f1131354d@dsr-corporation.com><commit_after>import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
|
|
e8a6c0adc3aa77f8e0b1399fe076b43720acb823
|
tests/test_api.py
|
tests/test_api.py
|
# -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
|
Test the API can run
|
Test the API can run
|
Python
|
agpl-3.0
|
antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france
|
Test the API can run
|
# -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
|
<commit_before><commit_msg>Test the API can run<commit_after>
|
# -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
|
Test the API can run# -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
|
<commit_before><commit_msg>Test the API can run<commit_after># -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
|
|
690c08b2b35df2d81dc0977d8bd593c45806e1c2
|
tests/test_log.py
|
tests/test_log.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
|
Add dumb log view test cases
|
Add dumb log view test cases
|
Python
|
mit
|
bosondata/badwolf,bosondata/badwolf,bosondata/badwolf
|
Add dumb log view test cases
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
|
<commit_before><commit_msg>Add dumb log view test cases<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
|
Add dumb log view test cases# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
|
<commit_before><commit_msg>Add dumb log view test cases<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
|
|
4d500d9abe2da28cdd9bd95019048de445aac265
|
docs/source/tutorial/v5/history_demo.py
|
docs/source/tutorial/v5/history_demo.py
|
# coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
|
Add a history demo in documentation.
|
Add a history demo in documentation.
|
Python
|
mit
|
tantale/deprecated
|
Add a history demo in documentation.
|
# coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
|
<commit_before><commit_msg>Add a history demo in documentation.<commit_after>
|
# coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
|
Add a history demo in documentation.# coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
|
<commit_before><commit_msg>Add a history demo in documentation.<commit_after># coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
|
|
c39c086f51963678769c1066637ca573c721e827
|
static_gallery.py
|
static_gallery.py
|
from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
|
Create a simple static gallery script.
|
Create a simple static gallery script.
|
Python
|
mit
|
strickyak/aphid,strickyak/aphid,strickyak/aphid,strickyak/aphid,strickyak/aphid,strickyak/aphid,strickyak/aphid
|
Create a simple static gallery script.
|
from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
|
<commit_before><commit_msg>Create a simple static gallery script.<commit_after>
|
from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
|
Create a simple static gallery script.from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
|
<commit_before><commit_msg>Create a simple static gallery script.<commit_after>from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
|
|
8139dc9e04025da001323122521951f5ed2c391b
|
users/migrations/0010_users-profile-encoding.py
|
users/migrations/0010_users-profile-encoding.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
|
Fix mysql encoding for users.profile.reason
|
Fix mysql encoding for users.profile.reason
|
Python
|
mit
|
sbuss/voteswap,sbuss/voteswap,sbuss/voteswap,sbuss/voteswap
|
Fix mysql encoding for users.profile.reason
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
|
<commit_before><commit_msg>Fix mysql encoding for users.profile.reason<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
|
Fix mysql encoding for users.profile.reason# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
|
<commit_before><commit_msg>Fix mysql encoding for users.profile.reason<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-25 01:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0009_remove_profile_active'),
]
operations = [
migrations.RunSQL("ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
migrations.RunSQL("ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"),
]
|
|
328901c74d1ee103a1ee5b2f26aa391ddeda465b
|
tests/test_web.py
|
tests/test_web.py
|
"""Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
|
Add unit test for webpage creation and description
|
Add unit test for webpage creation and description
|
Python
|
mit
|
appeltel/AutoCMS,appeltel/AutoCMS,appeltel/AutoCMS
|
Add unit test for webpage creation and description
|
"""Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for webpage creation and description<commit_after>
|
"""Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
|
Add unit test for webpage creation and description"""Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for webpage creation and description<commit_after>"""Test the AutoCMS web reporting functionality."""
import os
import shutil
import unittest
import re
from autocms.core import load_configuration
from autocms.web import (
produce_default_webpage
)
class TestWebPageCreation(unittest.TestCase):
"""Test the accurate creation of test webpages."""
def setUp(self):
self.config = load_configuration('autocms.cfg.example')
self.config['AUTOCMS_WEBDIR'] = self.config['AUTOCMS_BASEDIR']
# call the scratch directory 'uscratch' instead of 'scratch'
# so that in pathological cases one does not resolve to
# /scratch which is often used.
self.testdir = os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch')
os.makedirs(self.testdir)
self.page_description = 'AutoCMS Web Unit Test Description'
description_file = os.path.join(self.testdir, 'description.html')
with open(description_file, 'w') as description_filehandle:
description_filehandle.write(self.page_description)
def tearDown(self):
shutil.rmtree(os.path.join(self.config['AUTOCMS_BASEDIR'],
'uscratch'))
def test_create_webpage_with_description(self):
"""Test that a default webpage is created with description."""
records = []
produce_default_webpage(records, 'uscratch', self.config)
webpage_path = os.path.join(self.config['AUTOCMS_WEBDIR'],
'uscratch/index.html')
self.assertTrue(os.path.isfile(webpage_path))
with open(webpage_path) as webpage:
webpage_contents = webpage.read()
self.assertTrue(re.search(self.page_description, webpage_contents))
if __name__ == '__main__':
unittest.main()
|
|
3133bbfcb5ee56c88ea20be21778519bffe77299
|
literotica.py
|
literotica.py
|
from common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
|
Add another different type of book
|
Add another different type of book
|
Python
|
agpl-3.0
|
palfrey/book-blog
|
Add another different type of book
|
from common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
|
<commit_before><commit_msg>Add another different type of book<commit_after>
|
from common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
|
Add another different type of bookfrom common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
|
<commit_before><commit_msg>Add another different type of book<commit_after>from common import *
from sys import argv
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE
cache = Cache()
url = argv[1]
titlePattern = compile("<h1>([^<]+)</h1>")
contentPattern = compile("<div class=\"b-story-body-x x-r15\">(.+?)</div><div class=\"b-story-stats-block\">" , DOTALL|MULTILINE)
nextPattern = compile("\"([^\"]+)\">Next</a>")
page = cache.get(url, max_age = -1)
data = page.read()
open("dump", "wb").write(data)
title = titlePattern.findall(data)
print title
title = title[0]
content = u""
while True:
contentMatch = contentPattern.findall(data)
print page.headers.headers
print type(data)
content += contentMatch[0]
#print content
nextMatch = nextPattern.findall(data)
if nextMatch == []:
break
nextURL = nextMatch[0]
print nextURL
page = cache.get(nextURL, max_age=-1)
data = page.read()
toc = tocStart(title)
generatePage(url, title, content, title, toc)
tocEnd(toc)
|
|
893679baff0367538bdf3b52b04f8bae72732be8
|
zerver/migrations/0031_remove_system_avatar_source.py
|
zerver/migrations/0031_remove_system_avatar_source.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
Add migration to remove system avatar source.
|
Add migration to remove system avatar source.
Fixes the last commit having broken master.
|
Python
|
apache-2.0
|
sharmaeklavya2/zulip,isht3/zulip,joyhchen/zulip,susansls/zulip,zulip/zulip,punchagan/zulip,blaze225/zulip,PhilSk/zulip,samatdav/zulip,hackerkid/zulip,christi3k/zulip,brainwane/zulip,mahim97/zulip,kou/zulip,jackrzhang/zulip,amyliu345/zulip,paxapy/zulip,niftynei/zulip,samatdav/zulip,vikas-parashar/zulip,shubhamdhama/zulip,vabs22/zulip,AZtheAsian/zulip,vabs22/zulip,SmartPeople/zulip,Galexrt/zulip,jainayush975/zulip,j831/zulip,jrowan/zulip,susansls/zulip,dattatreya303/zulip,aakash-cr7/zulip,jackrzhang/zulip,paxapy/zulip,reyha/zulip,Jianchun1/zulip,ryanbackman/zulip,rht/zulip,susansls/zulip,zulip/zulip,vikas-parashar/zulip,souravbadami/zulip,verma-varsha/zulip,TigorC/zulip,amanharitsh123/zulip,niftynei/zulip,brockwhittaker/zulip,jphilipsen05/zulip,christi3k/zulip,rht/zulip,isht3/zulip,j831/zulip,zacps/zulip,brockwhittaker/zulip,amanharitsh123/zulip,jphilipsen05/zulip,sonali0901/zulip,AZtheAsian/zulip,zulip/zulip,cosmicAsymmetry/zulip,dhcrzf/zulip,tommyip/zulip,souravbadami/zulip,j831/zulip,jackrzhang/zulip,SmartPeople/zulip,blaze225/zulip,sharmaeklavya2/zulip,eeshangarg/zulip,vaidap/zulip,SmartPeople/zulip,ryanbackman/zulip,Jianchun1/zulip,susansls/zulip,rishig/zulip,kou/zulip,dawran6/zulip,zulip/zulip,rishig/zulip,peguin40/zulip,ryanbackman/zulip,tommyip/zulip,TigorC/zulip,eeshangarg/zulip,hackerkid/zulip,AZtheAsian/zulip,aakash-cr7/zulip,timabbott/zulip,zulip/zulip,Diptanshu8/zulip,synicalsyntax/zulip,calvinleenyc/zulip,blaze225/zulip,verma-varsha/zulip,peguin40/zulip,zacps/zulip,grave-w-grave/zulip,christi3k/zulip,j831/zulip,andersk/zulip,synicalsyntax/zulip,arpith/zulip,zacps/zulip,brainwane/zulip,JPJPJPOPOP/zulip,aakash-cr7/zulip,christi3k/zulip,JPJPJPOPOP/zulip,brainwane/zulip,arpith/zulip,PhilSk/zulip,susansls/zulip,andersk/zulip,amyliu345/zulip,brainwane/zulip,synicalsyntax/zulip,TigorC/zulip,AZtheAsian/zulip,jainayush975/zulip,calvinleenyc/zulip,samatdav/zulip,JPJPJPOPOP/zulip,zacps/zulip,dattatreya303/zulip,brockwhittaker/zulip,amyliu345/zulip,Juanvulcano/zulip,cosmicAsymmetry/zulip,SmartPeople/zulip,hackerkid/zulip,verma-varsha/zulip,reyha/zulip,reyha/zulip,jainayush975/zulip,punchagan/zulip,j831/zulip,PhilSk/zulip,dawran6/zulip,souravbadami/zulip,calvinleenyc/zulip,jrowan/zulip,reyha/zulip,Diptanshu8/zulip,timabbott/zulip,eeshangarg/zulip,KingxBanana/zulip,eeshangarg/zulip,sonali0901/zulip,aakash-cr7/zulip,punchagan/zulip,vabs22/zulip,TigorC/zulip,rht/zulip,andersk/zulip,KingxBanana/zulip,peguin40/zulip,kou/zulip,Diptanshu8/zulip,cosmicAsymmetry/zulip,timabbott/zulip,brainwane/zulip,souravbadami/zulip,paxapy/zulip,shubhamdhama/zulip,niftynei/zulip,ryanbackman/zulip,dattatreya303/zulip,arpith/zulip,vaidap/zulip,cosmicAsymmetry/zulip,tommyip/zulip,KingxBanana/zulip,tommyip/zulip,amyliu345/zulip,verma-varsha/zulip,mahim97/zulip,brockwhittaker/zulip,punchagan/zulip,dhcrzf/zulip,jainayush975/zulip,jphilipsen05/zulip,vaidap/zulip,samatdav/zulip,SmartPeople/zulip,JPJPJPOPOP/zulip,vabs22/zulip,grave-w-grave/zulip,kou/zulip,dawran6/zulip,joyhchen/zulip,dhcrzf/zulip,mahim97/zulip,vaidap/zulip,peguin40/zulip,susansls/zulip,brockwhittaker/zulip,showell/zulip,grave-w-grave/zulip,TigorC/zulip,synicalsyntax/zulip,rishig/zulip,eeshangarg/zulip,showell/zulip,isht3/zulip,andersk/zulip,dattatreya303/zulip,jackrzhang/zulip,j831/zulip,joyhchen/zulip,sharmaeklavya2/zulip,synicalsyntax/zulip,sonali0901/zulip,zacps/zulip,sharmaeklavya2/zulip,Galexrt/zulip,paxapy/zulip,joyhchen/zulip,jrowan/zulip,aakash-cr7/zulip,samatdav/zulip,arpith/zulip,shubhamdhama/zulip,souravbadami/zulip,jphilipsen05/zulip,hackerkid/zulip,joyhchen/zulip,rht/zulip,vaidap/zulip,niftynei/zulip,rht/zulip,synicalsyntax/zulip,Diptanshu8/zulip,jackrzhang/zulip,jackrzhang/zulip,timabbott/zulip,timabbott/zulip,shubhamdhama/zulip,rishig/zulip,showell/zulip,vaidap/zulip,jainayush975/zulip,blaze225/zulip,rht/zulip,verma-varsha/zulip,cosmicAsymmetry/zulip,sharmaeklavya2/zulip,Diptanshu8/zulip,Juanvulcano/zulip,calvinleenyc/zulip,vabs22/zulip,hackerkid/zulip,paxapy/zulip,grave-w-grave/zulip,shubhamdhama/zulip,brainwane/zulip,JPJPJPOPOP/zulip,calvinleenyc/zulip,zulip/zulip,showell/zulip,aakash-cr7/zulip,arpith/zulip,andersk/zulip,Juanvulcano/zulip,showell/zulip,jphilipsen05/zulip,showell/zulip,zacps/zulip,vikas-parashar/zulip,christi3k/zulip,dhcrzf/zulip,KingxBanana/zulip,eeshangarg/zulip,blaze225/zulip,hackerkid/zulip,dattatreya303/zulip,Galexrt/zulip,Jianchun1/zulip,Juanvulcano/zulip,arpith/zulip,PhilSk/zulip,andersk/zulip,dhcrzf/zulip,grave-w-grave/zulip,sonali0901/zulip,rishig/zulip,PhilSk/zulip,synicalsyntax/zulip,jrowan/zulip,mahim97/zulip,amanharitsh123/zulip,jackrzhang/zulip,punchagan/zulip,PhilSk/zulip,jrowan/zulip,jainayush975/zulip,Jianchun1/zulip,brockwhittaker/zulip,Galexrt/zulip,Galexrt/zulip,vikas-parashar/zulip,kou/zulip,JPJPJPOPOP/zulip,sharmaeklavya2/zulip,dattatreya303/zulip,KingxBanana/zulip,mahim97/zulip,brainwane/zulip,rishig/zulip,AZtheAsian/zulip,sonali0901/zulip,isht3/zulip,sonali0901/zulip,joyhchen/zulip,peguin40/zulip,showell/zulip,andersk/zulip,reyha/zulip,niftynei/zulip,Jianchun1/zulip,amanharitsh123/zulip,paxapy/zulip,vabs22/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,amyliu345/zulip,isht3/zulip,kou/zulip,tommyip/zulip,tommyip/zulip,amanharitsh123/zulip,punchagan/zulip,souravbadami/zulip,tommyip/zulip,mahim97/zulip,hackerkid/zulip,dhcrzf/zulip,AZtheAsian/zulip,dawran6/zulip,calvinleenyc/zulip,timabbott/zulip,dawran6/zulip,amyliu345/zulip,timabbott/zulip,ryanbackman/zulip,grave-w-grave/zulip,punchagan/zulip,amanharitsh123/zulip,ryanbackman/zulip,Juanvulcano/zulip,reyha/zulip,shubhamdhama/zulip,vikas-parashar/zulip,blaze225/zulip,rishig/zulip,christi3k/zulip,dhcrzf/zulip,shubhamdhama/zulip,Juanvulcano/zulip,Galexrt/zulip,eeshangarg/zulip,rht/zulip,kou/zulip,isht3/zulip,samatdav/zulip,peguin40/zulip,Diptanshu8/zulip,SmartPeople/zulip,vikas-parashar/zulip,TigorC/zulip,zulip/zulip,jphilipsen05/zulip,Galexrt/zulip,niftynei/zulip,Jianchun1/zulip,jrowan/zulip,dawran6/zulip,KingxBanana/zulip
|
Add migration to remove system avatar source.
Fixes the last commit having broken master.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
<commit_before><commit_msg>Add migration to remove system avatar source.
Fixes the last commit having broken master.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
Add migration to remove system avatar source.
Fixes the last commit having broken master.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
<commit_before><commit_msg>Add migration to remove system avatar source.
Fixes the last commit having broken master.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0030_realm_org_type'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar_source',
field=models.CharField(choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')], max_length=1, default='G'),
),
]
|
|
b51398d602a157ce55fd7e08eedd953051f716a1
|
backend/scripts/updatedf.py
|
backend/scripts/updatedf.py
|
#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
Add script to update uploaded files.
|
Add script to update uploaded files.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add script to update uploaded files.
|
#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to update uploaded files.<commit_after>
|
#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
Add script to update uploaded files.#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to update uploaded files.<commit_after>#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
|
fbf36a2fb52b5ed1aceaec4c1d1075448584a97d
|
tests/test_imports.py
|
tests/test_imports.py
|
"""Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
|
Test that modules can be imported in any order
|
Test that modules can be imported in any order
This excercises an tricky import cycle introduced in:
7c68f3f78 more imports at top level
|
Python
|
bsd-3-clause
|
lektor/lektor,lektor/lektor,lektor/lektor,lektor/lektor
|
Test that modules can be imported in any order
This excercises an tricky import cycle introduced in:
7c68f3f78 more imports at top level
|
"""Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
|
<commit_before><commit_msg>Test that modules can be imported in any order
This excercises an tricky import cycle introduced in:
7c68f3f78 more imports at top level<commit_after>
|
"""Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
|
Test that modules can be imported in any order
This excercises an tricky import cycle introduced in:
7c68f3f78 more imports at top level"""Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
|
<commit_before><commit_msg>Test that modules can be imported in any order
This excercises an tricky import cycle introduced in:
7c68f3f78 more imports at top level<commit_after>"""Test that all modules/packages in the lektor tree are importable in any order
Here we import each module by itself, one at a time, each in a new
python interpreter.
"""
import pkgutil
import sys
from subprocess import run
import pytest
import lektor
def iter_lektor_modules():
for module in pkgutil.walk_packages(lektor.__path__, f"{lektor.__name__}."):
yield module.name
@pytest.fixture(params=iter_lektor_modules())
def module(request):
return request.param
def test_import(module):
python = sys.executable
assert run([python, "-c", f"import {module}"], check=False).returncode == 0
|
|
85202173cf120caad603315cd57fa66857a88b0b
|
feder/institutions/migrations/0013_auto_20170810_2118.py
|
feder/institutions/migrations/0013_auto_20170810_2118.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
|
Add missing migrations for institutions
|
Add missing migrations for institutions
|
Python
|
mit
|
watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder
|
Add missing migrations for institutions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
|
<commit_before><commit_msg>Add missing migrations for institutions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
|
Add missing migrations for institutions# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
|
<commit_before><commit_msg>Add missing migrations for institutions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-10 21:18
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('institutions', '0012_auto_20170808_0309'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='extra',
field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),
),
]
|
|
c95234c130435ddd116784ad1829f7bdaa9182c5
|
100_to_199/euler_138.py
|
100_to_199/euler_138.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
|
ADD 138 solutions with A195615(OEIS)
|
ADD 138 solutions with A195615(OEIS)
|
Python
|
mit
|
byung-u/ProjectEuler
|
ADD 138 solutions with A195615(OEIS)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
|
<commit_before><commit_msg>ADD 138 solutions with A195615(OEIS)<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
|
ADD 138 solutions with A195615(OEIS)#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
|
<commit_before><commit_msg>ADD 138 solutions with A195615(OEIS)<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 138
Consider the isosceles triangle with base length, b = 16, and legs, L = 17.
By using the Pythagorean theorem it can be seen that the height of the triangle, h = √(172 − 82) = 15, which is one less than the base length.
With b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b ± 1.
Find ∑ L for the twelve smallest isosceles triangles for which h = b ± 1 and b, L are positive integers.
'''
from decimal import Decimal
from math import modf
# Pythagorean approximations
# http://oeis.org/A195615 (FORMULA)
def a(n):
if n == 0:
return 15
if n == 1:
return 273
if n == 2:
return 4895
return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)
def p138():
highs = [a(i) for i in range(0, 12)]
result = []
for h in highs:
hd = h ** 2
bd = ((h - 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[-]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
continue
bd = ((h + 1) // 2) ** 2
ret = Decimal(hd + bd).sqrt()
ret_float, ret_int = modf(ret)
if ret_float == 0.0:
# print('[+]', [h], ret, ret_float, ret_int)
result.append(int(ret_int))
print(sum(result))
p138()
|
|
b01bd1b21f1b12c9120845ec8a85355b038d6b20
|
inventory_control/storage.py
|
inventory_control/storage.py
|
"""
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
|
Add a basic Storage engine to talk to the DB
|
Add a basic Storage engine to talk to the DB
|
Python
|
mit
|
codeforsanjose/inventory-control,worldcomputerxchange/inventory-control
|
Add a basic Storage engine to talk to the DB
|
"""
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
|
<commit_before><commit_msg>Add a basic Storage engine to talk to the DB<commit_after>
|
"""
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
|
Add a basic Storage engine to talk to the DB"""
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
|
<commit_before><commit_msg>Add a basic Storage engine to talk to the DB<commit_after>"""
This is the Storage engine. It's how everything should talk to the database
layer that sits on the inside of the inventory-control system.
"""
import MySQLdb
class StorageEngine(object):
"""
Instantiate a DB access object, create all the necessary hooks and
then the accessors to a SQL database.
"""
def __init__(self, config):
self.config = config
self.db = MySQLdb.connect(host=self.config['host'],
user=self.config['user'],
passwd=self.config['password'],
db=self.config['db'])
self.cursor = self.db.cursor()
|
|
399daa8ebec14bc4d7ee6c08135e525190e1eb6f
|
collections/show-test/print-divs.py
|
collections/show-test/print-divs.py
|
# print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20)
|
Add short Python script that prints as many dummy divs as needed.
|
Add short Python script that prints as many dummy divs as needed.
Used for idea testing; tracking rather than deleting b/c may still need to populate a layout with dozens of test divs, since many collections have 50+ items.
|
Python
|
apache-2.0
|
scholarslab/takeback,scholarslab/takeback,scholarslab/takeback,scholarslab/takeback,scholarslab/takeback
|
Add short Python script that prints as many dummy divs as needed.
Used for idea testing; tracking rather than deleting b/c may still need to populate a layout with dozens of test divs, since many collections have 50+ items.
|
# print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20)
|
<commit_before><commit_msg>Add short Python script that prints as many dummy divs as needed.
Used for idea testing; tracking rather than deleting b/c may still need to populate a layout with dozens of test divs, since many collections have 50+ items.<commit_after>
|
# print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20)
|
Add short Python script that prints as many dummy divs as needed.
Used for idea testing; tracking rather than deleting b/c may still need to populate a layout with dozens of test divs, since many collections have 50+ items.# print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20)
|
<commit_before><commit_msg>Add short Python script that prints as many dummy divs as needed.
Used for idea testing; tracking rather than deleting b/c may still need to populate a layout with dozens of test divs, since many collections have 50+ items.<commit_after># print-divs.py
def printDivs(num):
for i in range(num):
print('<div class="item">Item ' + str(i+1) + '</div>')
printDivs(20)
|
|
2b80b358edd5bcf914d0c709369dbbcfd748772b
|
common/djangoapps/mitxmako/tests.py
|
common/djangoapps/mitxmako/tests.py
|
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
|
Add in a test for the marketing_link function in mitxmako
|
Add in a test for the marketing_link function in mitxmako
|
Python
|
agpl-3.0
|
polimediaupv/edx-platform,praveen-pal/edx-platform,pdehaye/theming-edx-platform,raccoongang/edx-platform,defance/edx-platform,atsolakid/edx-platform,Softmotions/edx-platform,IONISx/edx-platform,ampax/edx-platform,LICEF/edx-platform,IndonesiaX/edx-platform,abdoosh00/edraak,chudaol/edx-platform,morpheby/levelup-by,raccoongang/edx-platform,eduNEXT/edunext-platform,dkarakats/edx-platform,peterm-itr/edx-platform,Edraak/circleci-edx-platform,caesar2164/edx-platform,Semi-global/edx-platform,cpennington/edx-platform,waheedahmed/edx-platform,xuxiao19910803/edx-platform,pabloborrego93/edx-platform,EduPepperPDTesting/pepper2013-testing,SravanthiSinha/edx-platform,solashirai/edx-platform,rhndg/openedx,DefyVentures/edx-platform,cognitiveclass/edx-platform,pepeportela/edx-platform,cecep-edu/edx-platform,inares/edx-platform,J861449197/edx-platform,ampax/edx-platform-backup,eemirtekin/edx-platform,arifsetiawan/edx-platform,msegado/edx-platform,peterm-itr/edx-platform,antoviaque/edx-platform,mushtaqak/edx-platform,franosincic/edx-platform,PepperPD/edx-pepper-platform,hamzehd/edx-platform,kmoocdev/edx-platform,OmarIthawi/edx-platform,beacloudgenius/edx-platform,deepsrijit1105/edx-platform,itsjeyd/edx-platform,JCBarahona/edX,chauhanhardik/populo,bigdatauniversity/edx-platform,DNFcode/edx-platform,edry/edx-platform,rationalAgent/edx-platform-custom,iivic/BoiseStateX,atsolakid/edx-platform,cselis86/edx-platform,ahmadiga/min_edx,kursitet/edx-platform,SivilTaram/edx-platform,wwj718/ANALYSE,jbassen/edx-platform,jelugbo/tundex,devs1991/test_edx_docmode,yokose-ks/edx-platform,jelugbo/tundex,pelikanchik/edx-platform,cecep-edu/edx-platform,ahmadiga/min_edx,prarthitm/edxplatform,edry/edx-platform,vismartltd/edx-platform,doganov/edx-platform,devs1991/test_edx_docmode,miptliot/edx-platform,EDUlib/edx-platform,shashank971/edx-platform,atsolakid/edx-platform,angelapper/edx-platform,rue89-tech/edx-platform,mjg2203/edx-platform-seas,appliedx/edx-platform,jazztpt/edx-platform,chrisndodge/edx-platform,zadgroup/edx-platform,EduPepperPDTesting/pepper2013-testing,ovnicraft/edx-platform,stvstnfrd/edx-platform,devs1991/test_edx_docmode,etzhou/edx-platform,vasyarv/edx-platform,doganov/edx-platform,mushtaqak/edx-platform,pepeportela/edx-platform,pku9104038/edx-platform,alexthered/kienhoc-platform,ESOedX/edx-platform,kamalx/edx-platform,nttks/jenkins-test,Ayub-Khan/edx-platform,PepperPD/edx-pepper-platform,lduarte1991/edx-platform,cselis86/edx-platform,Edraak/circleci-edx-platform,zubair-arbi/edx-platform,cecep-edu/edx-platform,xinjiguaike/edx-platform,eestay/edx-platform,procangroup/edx-platform,ahmadiga/min_edx,morenopc/edx-platform,SivilTaram/edx-platform,proversity-org/edx-platform,dsajkl/123,jolyonb/edx-platform,hkawasaki/kawasaki-aio8-2,Shrhawk/edx-platform,analyseuc3m/ANALYSE-v1,tiagochiavericosta/edx-platform,dsajkl/123,nikolas/edx-platform,Ayub-Khan/edx-platform,WatanabeYasumasa/edx-platform,adoosii/edx-platform,TeachAtTUM/edx-platform,jbzdak/edx-platform,simbs/edx-platform,vikas1885/test1,dkarakats/edx-platform,inares/edx-platform,cyanna/edx-platform,jazztpt/edx-platform,cpennington/edx-platform,utecuy/edx-platform,amir-qayyum-khan/edx-platform,edx-solutions/edx-platform,zofuthan/edx-platform,nanolearning/edx-platform,edry/edx-platform,stvstnfrd/edx-platform,zubair-arbi/edx-platform,EduPepperPD/pepper2013,louyihua/edx-platform,syjeon/new_edx,AkA84/edx-platform,olexiim/edx-platform,eduNEXT/edunext-platform,openfun/edx-platform,andyzsf/edx,synergeticsedx/deployment-wipro,doismellburning/edx-platform,LICEF/edx-platform,carsongee/edx-platform,dsajkl/reqiop,CourseTalk/edx-platform,fly19890211/edx-platform,UXE/local-edx,xingyepei/edx-platform,cecep-edu/edx-platform,martynovp/edx-platform,CourseTalk/edx-platform,fintech-circle/edx-platform,Edraak/circleci-edx-platform,stvstnfrd/edx-platform,morenopc/edx-platform,shubhdev/edxOnBaadal,chand3040/cloud_that,10clouds/edx-platform,zerobatu/edx-platform,bigdatauniversity/edx-platform,tanmaykm/edx-platform,cognitiveclass/edx-platform,franosincic/edx-platform,jolyonb/edx-platform,ferabra/edx-platform,martynovp/edx-platform,morenopc/edx-platform,LearnEra/LearnEraPlaftform,vikas1885/test1,nanolearning/edx-platform,Lektorium-LLC/edx-platform,cyanna/edx-platform,olexiim/edx-platform,zofuthan/edx-platform,torchingloom/edx-platform,benpatterson/edx-platform,vasyarv/edx-platform,kxliugang/edx-platform,openfun/edx-platform,abdoosh00/edx-rtl-final,arbrandes/edx-platform,TsinghuaX/edx-platform,vismartltd/edx-platform,nttks/jenkins-test,ampax/edx-platform,Lektorium-LLC/edx-platform,proversity-org/edx-platform,olexiim/edx-platform,shubhdev/edx-platform,deepsrijit1105/edx-platform,playm2mboy/edx-platform,jazkarta/edx-platform,jamiefolsom/edx-platform,kmoocdev/edx-platform,shubhdev/edx-platform,alu042/edx-platform,mitocw/edx-platform,sudheerchintala/LearnEraPlatForm,hkawasaki/kawasaki-aio8-1,adoosii/edx-platform,Unow/edx-platform,Ayub-Khan/edx-platform,WatanabeYasumasa/edx-platform,jonathan-beard/edx-platform,dsajkl/reqiop,mjirayu/sit_academy,cognitiveclass/edx-platform,antoviaque/edx-platform,jbassen/edx-platform,fly19890211/edx-platform,beacloudgenius/edx-platform,ahmadio/edx-platform,tiagochiavericosta/edx-platform,alu042/edx-platform,doismellburning/edx-platform,jzoldak/edx-platform,leansoft/edx-platform,ESOedX/edx-platform,zhenzhai/edx-platform,PepperPD/edx-pepper-platform,etzhou/edx-platform,jbzdak/edx-platform,JioEducation/edx-platform,ZLLab-Mooc/edx-platform,OmarIthawi/edx-platform,gsehub/edx-platform,xuxiao19910803/edx,eestay/edx-platform,zerobatu/edx-platform,etzhou/edx-platform,praveen-pal/edx-platform,ampax/edx-platform-backup,bitifirefly/edx-platform,ampax/edx-platform-backup,auferack08/edx-platform,bitifirefly/edx-platform,jswope00/griffinx,shurihell/testasia,dsajkl/123,shubhdev/openedx,jazkarta/edx-platform,Endika/edx-platform,TsinghuaX/edx-platform,chauhanhardik/populo,rismalrv/edx-platform,RPI-OPENEDX/edx-platform,nanolearningllc/edx-platform-cypress-2,nttks/edx-platform,xingyepei/edx-platform,xingyepei/edx-platform,TeachAtTUM/edx-platform,hastexo/edx-platform,itsjeyd/edx-platform,martynovp/edx-platform,kmoocdev2/edx-platform,10clouds/edx-platform,longmen21/edx-platform,antonve/s4-project-mooc,zerobatu/edx-platform,UXE/local-edx,morpheby/levelup-by,xinjiguaike/edx-platform,angelapper/edx-platform,vasyarv/edx-platform,IndonesiaX/edx-platform,MSOpenTech/edx-platform,Kalyzee/edx-platform,xuxiao19910803/edx-platform,edry/edx-platform,chudaol/edx-platform,shashank971/edx-platform,dcosentino/edx-platform,EduPepperPD/pepper2013,rationalAgent/edx-platform-custom,kxliugang/edx-platform,peterm-itr/edx-platform,CourseTalk/edx-platform,xuxiao19910803/edx,jamiefolsom/edx-platform,chauhanhardik/populo,don-github/edx-platform,abdoosh00/edraak,inares/edx-platform,halvertoluke/edx-platform,zadgroup/edx-platform,eemirtekin/edx-platform,dsajkl/reqiop,xuxiao19910803/edx,xingyepei/edx-platform,marcore/edx-platform,mahendra-r/edx-platform,rue89-tech/edx-platform,AkA84/edx-platform,jzoldak/edx-platform,shubhdev/edxOnBaadal,4eek/edx-platform,antonve/s4-project-mooc,Livit/Livit.Learn.EdX,motion2015/edx-platform,polimediaupv/edx-platform,ZLLab-Mooc/edx-platform,torchingloom/edx-platform,hkawasaki/kawasaki-aio8-0,ovnicraft/edx-platform,kalebhartje/schoolboost,shubhdev/openedx,SivilTaram/edx-platform,JioEducation/edx-platform,yokose-ks/edx-platform,simbs/edx-platform,Endika/edx-platform,hkawasaki/kawasaki-aio8-0,carsongee/edx-platform,gymnasium/edx-platform,waheedahmed/edx-platform,Livit/Livit.Learn.EdX,xinjiguaike/edx-platform,Stanford-Online/edx-platform,Softmotions/edx-platform,miptliot/edx-platform,tanmaykm/edx-platform,rhndg/openedx,playm2mboy/edx-platform,CredoReference/edx-platform,longmen21/edx-platform,Stanford-Online/edx-platform,alexthered/kienhoc-platform,eemirtekin/edx-platform,OmarIthawi/edx-platform,EDUlib/edx-platform,dsajkl/123,prarthitm/edxplatform,IITBinterns13/edx-platform-dev,kalebhartje/schoolboost,y12uc231/edx-platform,kalebhartje/schoolboost,AkA84/edx-platform,jamiefolsom/edx-platform,JCBarahona/edX,leansoft/edx-platform,zubair-arbi/edx-platform,B-MOOC/edx-platform,yokose-ks/edx-platform,ZLLab-Mooc/edx-platform,zadgroup/edx-platform,dsajkl/123,jjmiranda/edx-platform,cyanna/edx-platform,arifsetiawan/edx-platform,mitocw/edx-platform,wwj718/ANALYSE,MakeHer/edx-platform,stvstnfrd/edx-platform,nagyistoce/edx-platform,rue89-tech/edx-platform,B-MOOC/edx-platform,Softmotions/edx-platform,TeachAtTUM/edx-platform,jswope00/griffinx,AkA84/edx-platform,mjg2203/edx-platform-seas,morpheby/levelup-by,pomegranited/edx-platform,MakeHer/edx-platform,unicri/edx-platform,4eek/edx-platform,motion2015/edx-platform,don-github/edx-platform,CredoReference/edx-platform,iivic/BoiseStateX,ferabra/edx-platform,Stanford-Online/edx-platform,halvertoluke/edx-platform,procangroup/edx-platform,shurihell/testasia,SravanthiSinha/edx-platform,syjeon/new_edx,dkarakats/edx-platform,cecep-edu/edx-platform,polimediaupv/edx-platform,RPI-OPENEDX/edx-platform,hastexo/edx-platform,appliedx/edx-platform,rhndg/openedx,JCBarahona/edX,fly19890211/edx-platform,Endika/edx-platform,motion2015/edx-platform,apigee/edx-platform,hmcmooc/muddx-platform,kmoocdev/edx-platform,amir-qayyum-khan/edx-platform,SravanthiSinha/edx-platform,TsinghuaX/edx-platform,torchingloom/edx-platform,IITBinterns13/edx-platform-dev,eemirtekin/edx-platform,Edraak/edx-platform,nanolearningllc/edx-platform-cypress,UOMx/edx-platform,doismellburning/edx-platform,mahendra-r/edx-platform,BehavioralInsightsTeam/edx-platform,DNFcode/edx-platform,halvertoluke/edx-platform,valtech-mooc/edx-platform,jbzdak/edx-platform,nttks/jenkins-test,jruiperezv/ANALYSE,EduPepperPDTesting/pepper2013-testing,praveen-pal/edx-platform,ferabra/edx-platform,sameetb-cuelogic/edx-platform-test,IONISx/edx-platform,MSOpenTech/edx-platform,pomegranited/edx-platform,philanthropy-u/edx-platform,procangroup/edx-platform,longmen21/edx-platform,beni55/edx-platform,UOMx/edx-platform,kamalx/edx-platform,Shrhawk/edx-platform,pdehaye/theming-edx-platform,bdero/edx-platform,caesar2164/edx-platform,shashank971/edx-platform,devs1991/test_edx_docmode,pelikanchik/edx-platform,hamzehd/edx-platform,AkA84/edx-platform,jruiperezv/ANALYSE,msegado/edx-platform,nttks/jenkins-test,andyzsf/edx,cognitiveclass/edx-platform,appliedx/edx-platform,IONISx/edx-platform,Ayub-Khan/edx-platform,edx/edx-platform,Kalyzee/edx-platform,Shrhawk/edx-platform,EduPepperPDTesting/pepper2013-testing,devs1991/test_edx_docmode,simbs/edx-platform,philanthropy-u/edx-platform,morenopc/edx-platform,ak2703/edx-platform,jazkarta/edx-platform-for-isc,jruiperezv/ANALYSE,JCBarahona/edX,knehez/edx-platform,simbs/edx-platform,atsolakid/edx-platform,philanthropy-u/edx-platform,openfun/edx-platform,don-github/edx-platform,sameetb-cuelogic/edx-platform-test,mcgachey/edx-platform,don-github/edx-platform,jbzdak/edx-platform,kxliugang/edx-platform,pomegranited/edx-platform,dcosentino/edx-platform,nanolearning/edx-platform,kursitet/edx-platform,DNFcode/edx-platform,shubhdev/edxOnBaadal,louyihua/edx-platform,zofuthan/edx-platform,ubc/edx-platform,tiagochiavericosta/edx-platform,Unow/edx-platform,marcore/edx-platform,devs1991/test_edx_docmode,SivilTaram/edx-platform,inares/edx-platform,mjirayu/sit_academy,JioEducation/edx-platform,vismartltd/edx-platform,jamiefolsom/edx-platform,PepperPD/edx-pepper-platform,caesar2164/edx-platform,10clouds/edx-platform,edry/edx-platform,ferabra/edx-platform,syjeon/new_edx,pabloborrego93/edx-platform,knehez/edx-platform,edx-solutions/edx-platform,CredoReference/edx-platform,chrisndodge/edx-platform,4eek/edx-platform,fintech-circle/edx-platform,hkawasaki/kawasaki-aio8-1,vikas1885/test1,y12uc231/edx-platform,ubc/edx-platform,olexiim/edx-platform,a-parhom/edx-platform,hkawasaki/kawasaki-aio8-2,raccoongang/edx-platform,ahmadio/edx-platform,solashirai/edx-platform,mjg2203/edx-platform-seas,wwj718/edx-platform,mjirayu/sit_academy,andyzsf/edx,motion2015/a3,pepeportela/edx-platform,Kalyzee/edx-platform,shabab12/edx-platform,analyseuc3m/ANALYSE-v1,vasyarv/edx-platform,kalebhartje/schoolboost,alexthered/kienhoc-platform,hkawasaki/kawasaki-aio8-1,auferack08/edx-platform,motion2015/edx-platform,chauhanhardik/populo_2,antoviaque/edx-platform,BehavioralInsightsTeam/edx-platform,BehavioralInsightsTeam/edx-platform,wwj718/edx-platform,shubhdev/openedx,kalebhartje/schoolboost,nikolas/edx-platform,bigdatauniversity/edx-platform,Edraak/circleci-edx-platform,pomegranited/edx-platform,y12uc231/edx-platform,abdoosh00/edx-rtl-final,xingyepei/edx-platform,wwj718/ANALYSE,chauhanhardik/populo_2,miptliot/edx-platform,cpennington/edx-platform,mahendra-r/edx-platform,valtech-mooc/edx-platform,ESOedX/edx-platform,shabab12/edx-platform,LearnEra/LearnEraPlaftform,kursitet/edx-platform,fintech-circle/edx-platform,dkarakats/edx-platform,mitocw/edx-platform,hkawasaki/kawasaki-aio8-2,jjmiranda/edx-platform,alu042/edx-platform,arifsetiawan/edx-platform,playm2mboy/edx-platform,dcosentino/edx-platform,mtlchun/edx,knehez/edx-platform,appsembler/edx-platform,LICEF/edx-platform,Unow/edx-platform,ahmedaljazzar/edx-platform,romain-li/edx-platform,mahendra-r/edx-platform,DefyVentures/edx-platform,RPI-OPENEDX/edx-platform,ovnicraft/edx-platform,nikolas/edx-platform,halvertoluke/edx-platform,eemirtekin/edx-platform,ahmedaljazzar/edx-platform,mcgachey/edx-platform,hkawasaki/kawasaki-aio8-2,benpatterson/edx-platform,olexiim/edx-platform,wwj718/edx-platform,jamesblunt/edx-platform,jzoldak/edx-platform,edx-solutions/edx-platform,apigee/edx-platform,dkarakats/edx-platform,jelugbo/tundex,J861449197/edx-platform,appsembler/edx-platform,adoosii/edx-platform,jswope00/griffinx,pelikanchik/edx-platform,msegado/edx-platform,eestay/edx-platform,apigee/edx-platform,Edraak/edx-platform,proversity-org/edx-platform,nanolearningllc/edx-platform-cypress-2,a-parhom/edx-platform,shubhdev/edxOnBaadal,J861449197/edx-platform,solashirai/edx-platform,Ayub-Khan/edx-platform,Edraak/edx-platform,auferack08/edx-platform,lduarte1991/edx-platform,cselis86/edx-platform,antonve/s4-project-mooc,MakeHer/edx-platform,SivilTaram/edx-platform,Livit/Livit.Learn.EdX,naresh21/synergetics-edx-platform,pepeportela/edx-platform,naresh21/synergetics-edx-platform,nttks/jenkins-test,doismellburning/edx-platform,tanmaykm/edx-platform,openfun/edx-platform,EduPepperPD/pepper2013,jswope00/griffinx,jjmiranda/edx-platform,dcosentino/edx-platform,arifsetiawan/edx-platform,rhndg/openedx,nanolearningllc/edx-platform-cypress,ubc/edx-platform,hamzehd/edx-platform,ahmedaljazzar/edx-platform,wwj718/ANALYSE,kamalx/edx-platform,ubc/edx-platform,Shrhawk/edx-platform,benpatterson/edx-platform,playm2mboy/edx-platform,bigdatauniversity/edx-platform,teltek/edx-platform,ovnicraft/edx-platform,auferack08/edx-platform,gsehub/edx-platform,iivic/BoiseStateX,pdehaye/theming-edx-platform,MSOpenTech/edx-platform,jamiefolsom/edx-platform,mtlchun/edx,IONISx/edx-platform,kmoocdev/edx-platform,xuxiao19910803/edx,vasyarv/edx-platform,shubhdev/edxOnBaadal,eduNEXT/edx-platform,cselis86/edx-platform,utecuy/edx-platform,shurihell/testasia,marcore/edx-platform,chauhanhardik/populo_2,a-parhom/edx-platform,ahmedaljazzar/edx-platform,nanolearning/edx-platform,jolyonb/edx-platform,y12uc231/edx-platform,sudheerchintala/LearnEraPlatForm,edx-solutions/edx-platform,jazztpt/edx-platform,louyihua/edx-platform,EduPepperPDTesting/pepper2013-testing,bitifirefly/edx-platform,franosincic/edx-platform,valtech-mooc/edx-platform,hamzehd/edx-platform,eduNEXT/edx-platform,jazkarta/edx-platform,B-MOOC/edx-platform,IONISx/edx-platform,mjirayu/sit_academy,alexthered/kienhoc-platform,antonve/s4-project-mooc,Shrhawk/edx-platform,ESOedX/edx-platform,xuxiao19910803/edx-platform,romain-li/edx-platform,appsembler/edx-platform,msegado/edx-platform,apigee/edx-platform,romain-li/edx-platform,halvertoluke/edx-platform,xuxiao19910803/edx-platform,PepperPD/edx-pepper-platform,jonathan-beard/edx-platform,zadgroup/edx-platform,MSOpenTech/edx-platform,analyseuc3m/ANALYSE-v1,nagyistoce/edx-platform,Unow/edx-platform,nikolas/edx-platform,Edraak/circleci-edx-platform,Semi-global/edx-platform,synergeticsedx/deployment-wipro,iivic/BoiseStateX,chauhanhardik/populo_2,J861449197/edx-platform,romain-li/edx-platform,UOMx/edx-platform,cyanna/edx-platform,Endika/edx-platform,shurihell/testasia,Lektorium-LLC/edx-platform,EduPepperPD/pepper2013,teltek/edx-platform,4eek/edx-platform,polimediaupv/edx-platform,mtlchun/edx,EduPepperPD/pepper2013,chauhanhardik/populo,beni55/edx-platform,itsjeyd/edx-platform,gymnasium/edx-platform,appliedx/edx-platform,abdoosh00/edx-rtl-final,mitocw/edx-platform,hkawasaki/kawasaki-aio8-0,cpennington/edx-platform,sudheerchintala/LearnEraPlatForm,unicri/edx-platform,chand3040/cloud_that,nanolearningllc/edx-platform-cypress,arbrandes/edx-platform,mcgachey/edx-platform,dsajkl/reqiop,defance/edx-platform,amir-qayyum-khan/edx-platform,waheedahmed/edx-platform,jonathan-beard/edx-platform,gymnasium/edx-platform,vikas1885/test1,rismalrv/edx-platform,zerobatu/edx-platform,gsehub/edx-platform,praveen-pal/edx-platform,ak2703/edx-platform,rue89-tech/edx-platform,Edraak/edx-platform,xuxiao19910803/edx-platform,nagyistoce/edx-platform,jamesblunt/edx-platform,adoosii/edx-platform,angelapper/edx-platform,OmarIthawi/edx-platform,RPI-OPENEDX/edx-platform,chauhanhardik/populo,jswope00/GAI,longmen21/edx-platform,jolyonb/edx-platform,bigdatauniversity/edx-platform,simbs/edx-platform,xuxiao19910803/edx,alu042/edx-platform,arbrandes/edx-platform,SravanthiSinha/edx-platform,chudaol/edx-platform,dcosentino/edx-platform,Edraak/edraak-platform,bitifirefly/edx-platform,rhndg/openedx,antoviaque/edx-platform,doismellburning/edx-platform,Semi-global/edx-platform,don-github/edx-platform,jruiperezv/ANALYSE,sameetb-cuelogic/edx-platform-test,nanolearningllc/edx-platform-cypress-2,appsembler/edx-platform,jazztpt/edx-platform,proversity-org/edx-platform,beni55/edx-platform,inares/edx-platform,beni55/edx-platform,hastexo/edx-platform,Semi-global/edx-platform,DefyVentures/edx-platform,wwj718/edx-platform,ubc/edx-platform,rationalAgent/edx-platform-custom,BehavioralInsightsTeam/edx-platform,abdoosh00/edraak,jbassen/edx-platform,zofuthan/edx-platform,etzhou/edx-platform,yokose-ks/edx-platform,TsinghuaX/edx-platform,jonathan-beard/edx-platform,beacloudgenius/edx-platform,unicri/edx-platform,solashirai/edx-platform,zhenzhai/edx-platform,WatanabeYasumasa/edx-platform,doganov/edx-platform,nikolas/edx-platform,IndonesiaX/edx-platform,TeachAtTUM/edx-platform,nanolearningllc/edx-platform-cypress,jazkarta/edx-platform,Softmotions/edx-platform,ak2703/edx-platform,eestay/edx-platform,mahendra-r/edx-platform,alexthered/kienhoc-platform,longmen21/edx-platform,jonathan-beard/edx-platform,vikas1885/test1,mushtaqak/edx-platform,J861449197/edx-platform,defance/edx-platform,benpatterson/edx-platform,DNFcode/edx-platform,hmcmooc/muddx-platform,fly19890211/edx-platform,cselis86/edx-platform,eduNEXT/edunext-platform,nttks/edx-platform,raccoongang/edx-platform,itsjeyd/edx-platform,jswope00/GAI,jelugbo/tundex,hastexo/edx-platform,jamesblunt/edx-platform,yokose-ks/edx-platform,a-parhom/edx-platform,louyihua/edx-platform,MakeHer/edx-platform,sameetb-cuelogic/edx-platform-test,franosincic/edx-platform,abdoosh00/edraak,edx/edx-platform,mtlchun/edx,LICEF/edx-platform,kursitet/edx-platform,analyseuc3m/ANALYSE-v1,utecuy/edx-platform,4eek/edx-platform,knehez/edx-platform,mjg2203/edx-platform-seas,edx/edx-platform,mjirayu/sit_academy,cognitiveclass/edx-platform,deepsrijit1105/edx-platform,shubhdev/openedx,martynovp/edx-platform,jbassen/edx-platform,kmoocdev2/edx-platform,beacloudgenius/edx-platform,motion2015/a3,devs1991/test_edx_docmode,doganov/edx-platform,tiagochiavericosta/edx-platform,hmcmooc/muddx-platform,polimediaupv/edx-platform,zhenzhai/edx-platform,fintech-circle/edx-platform,WatanabeYasumasa/edx-platform,vismartltd/edx-platform,jazkarta/edx-platform-for-isc,Softmotions/edx-platform,shabab12/edx-platform,zerobatu/edx-platform,mbareta/edx-platform-ft,morpheby/levelup-by,eduNEXT/edx-platform,tiagochiavericosta/edx-platform,zubair-arbi/edx-platform,unicri/edx-platform,mtlchun/edx,eestay/edx-platform,rismalrv/edx-platform,mushtaqak/edx-platform,jruiperezv/ANALYSE,valtech-mooc/edx-platform,iivic/BoiseStateX,pku9104038/edx-platform,playm2mboy/edx-platform,mbareta/edx-platform-ft,adoosii/edx-platform,nagyistoce/edx-platform,kmoocdev2/edx-platform,shubhdev/edx-platform,hkawasaki/kawasaki-aio8-1,LearnEra/LearnEraPlaftform,amir-qayyum-khan/edx-platform,chrisndodge/edx-platform,knehez/edx-platform,y12uc231/edx-platform,defance/edx-platform,pabloborrego93/edx-platform,waheedahmed/edx-platform,chudaol/edx-platform,ahmadio/edx-platform,DNFcode/edx-platform,ovnicraft/edx-platform,ampax/edx-platform,DefyVentures/edx-platform,nanolearningllc/edx-platform-cypress-2,jjmiranda/edx-platform,Kalyzee/edx-platform,EDUlib/edx-platform,atsolakid/edx-platform,msegado/edx-platform,shubhdev/edx-platform,nttks/edx-platform,Edraak/edraak-platform,doganov/edx-platform,shashank971/edx-platform,tanmaykm/edx-platform,kursitet/edx-platform,motion2015/a3,naresh21/synergetics-edx-platform,jazkarta/edx-platform-for-isc,CourseTalk/edx-platform,ampax/edx-platform-backup,ak2703/edx-platform,eduNEXT/edx-platform,pku9104038/edx-platform,synergeticsedx/deployment-wipro,mbareta/edx-platform-ft,kmoocdev/edx-platform,beni55/edx-platform,shubhdev/edx-platform,pdehaye/theming-edx-platform,zubair-arbi/edx-platform,kxliugang/edx-platform,prarthitm/edxplatform,prarthitm/edxplatform,chauhanhardik/populo_2,mushtaqak/edx-platform,ak2703/edx-platform,shurihell/testasia,gsehub/edx-platform,zofuthan/edx-platform,Livit/Livit.Learn.EdX,rue89-tech/edx-platform,IndonesiaX/edx-platform,martynovp/edx-platform,nttks/edx-platform,vismartltd/edx-platform,ahmadiga/min_edx,Semi-global/edx-platform,waheedahmed/edx-platform,teltek/edx-platform,B-MOOC/edx-platform,shubhdev/openedx,pelikanchik/edx-platform,shashank971/edx-platform,pomegranited/edx-platform,pabloborrego93/edx-platform,abdoosh00/edx-rtl-final,synergeticsedx/deployment-wipro,valtech-mooc/edx-platform,romain-li/edx-platform,jazztpt/edx-platform,carsongee/edx-platform,EduPepperPDTesting/pepper2013-testing,gymnasium/edx-platform,xinjiguaike/edx-platform,JCBarahona/edX,mcgachey/edx-platform,devs1991/test_edx_docmode,CredoReference/edx-platform,chand3040/cloud_that,marcore/edx-platform,leansoft/edx-platform,ferabra/edx-platform,Edraak/edx-platform,kmoocdev2/edx-platform,torchingloom/edx-platform,ahmadiga/min_edx,kxliugang/edx-platform,UXE/local-edx,motion2015/a3,chrisndodge/edx-platform,MakeHer/edx-platform,chudaol/edx-platform,ahmadio/edx-platform,leansoft/edx-platform,jamesblunt/edx-platform,arbrandes/edx-platform,sudheerchintala/LearnEraPlatForm,jazkarta/edx-platform,openfun/edx-platform,solashirai/edx-platform,IITBinterns13/edx-platform-dev,ZLLab-Mooc/edx-platform,bdero/edx-platform,jbzdak/edx-platform,bitifirefly/edx-platform,RPI-OPENEDX/edx-platform,rationalAgent/edx-platform-custom,Edraak/edraak-platform,jzoldak/edx-platform,deepsrijit1105/edx-platform,philanthropy-u/edx-platform,bdero/edx-platform,unicri/edx-platform,10clouds/edx-platform,Edraak/edraak-platform,syjeon/new_edx,shabab12/edx-platform,mcgachey/edx-platform,ahmadio/edx-platform,bdero/edx-platform,jbassen/edx-platform,nanolearningllc/edx-platform-cypress-2,leansoft/edx-platform,B-MOOC/edx-platform,andyzsf/edx,eduNEXT/edunext-platform,hmcmooc/muddx-platform,LICEF/edx-platform,kamalx/edx-platform,EDUlib/edx-platform,chand3040/cloud_that,zhenzhai/edx-platform,procangroup/edx-platform,utecuy/edx-platform,lduarte1991/edx-platform,DefyVentures/edx-platform,jelugbo/tundex,chand3040/cloud_that,ampax/edx-platform,benpatterson/edx-platform,pku9104038/edx-platform,morenopc/edx-platform,kmoocdev2/edx-platform,edx/edx-platform,JioEducation/edx-platform,wwj718/ANALYSE,naresh21/synergetics-edx-platform,jswope00/GAI,utecuy/edx-platform,mbareta/edx-platform-ft,cyanna/edx-platform,rismalrv/edx-platform,franosincic/edx-platform,zhenzhai/edx-platform,jamesblunt/edx-platform,nanolearning/edx-platform,xinjiguaike/edx-platform,jazkarta/edx-platform-for-isc,appliedx/edx-platform,rismalrv/edx-platform,UXE/local-edx,hkawasaki/kawasaki-aio8-0,antonve/s4-project-mooc,Kalyzee/edx-platform,carsongee/edx-platform,jswope00/GAI,rationalAgent/edx-platform-custom,motion2015/a3,etzhou/edx-platform,hamzehd/edx-platform,IndonesiaX/edx-platform,beacloudgenius/edx-platform,SravanthiSinha/edx-platform,peterm-itr/edx-platform,motion2015/edx-platform,lduarte1991/edx-platform,jazkarta/edx-platform-for-isc,miptliot/edx-platform,caesar2164/edx-platform,jswope00/griffinx,nanolearningllc/edx-platform-cypress,Lektorium-LLC/edx-platform,ampax/edx-platform-backup,UOMx/edx-platform,arifsetiawan/edx-platform,sameetb-cuelogic/edx-platform-test,Stanford-Online/edx-platform,nagyistoce/edx-platform,zadgroup/edx-platform,MSOpenTech/edx-platform,nttks/edx-platform,torchingloom/edx-platform,LearnEra/LearnEraPlaftform,ZLLab-Mooc/edx-platform,IITBinterns13/edx-platform-dev,wwj718/edx-platform,teltek/edx-platform,angelapper/edx-platform,kamalx/edx-platform,fly19890211/edx-platform
|
Add in a test for the marketing_link function in mitxmako
|
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
|
<commit_before><commit_msg>Add in a test for the marketing_link function in mitxmako<commit_after>
|
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
|
Add in a test for the marketing_link function in mitxmakofrom django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
|
<commit_before><commit_msg>Add in a test for the marketing_link function in mitxmako<commit_after>from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from mitxmako.shortcuts import marketing_link
from mock import patch
class ShortcutsTests(TestCase):
"""
Test the mitxmako shortcuts file
"""
@override_settings(MKTG_URLS={'ROOT': 'dummy-root', 'ABOUT': '/about-us'})
@override_settings(MKTG_URL_LINK_MAP={'ABOUT': 'about_edx'})
def test_marketing_link(self):
# test marketing site on
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': True}):
expected_link = 'dummy-root/about-us'
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
# test marketing site off
with patch.dict('django.conf.settings.MITX_FEATURES', {'ENABLE_MKTG_SITE': False}):
expected_link = reverse('about_edx')
link = marketing_link('ABOUT')
self.assertEquals(link, expected_link)
|
|
e9f2e966361d8a23c83fbbbb4a4b3d4046203a16
|
CERR_core/Contouring/models/heart/test/test.py
|
CERR_core/Contouring/models/heart/test/test.py
|
#Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv)
|
Test script for the heart container
|
Test script for the heart container
|
Python
|
lgpl-2.1
|
cerr/CERR,cerr/CERR,cerr/CERR,aditiiyer/CERR,aditiiyer/CERR,aditiiyer/CERR,aditiiyer/CERR,cerr/CERR,cerr/CERR,cerr/CERR,aditiiyer/CERR,cerr/CERR,aditiiyer/CERR,cerr/CERR,aditiiyer/CERR,cerr/CERR,aditiiyer/CERR
|
Test script for the heart container
|
#Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv)
|
<commit_before><commit_msg>Test script for the heart container<commit_after>
|
#Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv)
|
Test script for the heart container#Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv)
|
<commit_before><commit_msg>Test script for the heart container<commit_after>#Test script for heart container testing if all the imports are successful
import sys
import os
import numpy as np
import h5py
import fnmatch
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from torchvision.utils import make_grid
from dataloaders.utils import decode_seg_map_sequence
from skimage.transform import resize
from dataloaders import custom_transforms as tr
from PIL import Image
from torchvision import transforms
input_size = 512
def main(argv):
print("All imports done. Test Successful")
if __name__ == "__main__":
main(sys.argv)
|
|
b223c8be2bcb11d529a07997c05a9c5ab2b183b2
|
csunplugged/tests/resources/generators/test_run_length_encoding.py
|
csunplugged/tests/resources/generators/test_run_length_encoding.py
|
from unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
|
Add basic tests for run length encoding printable
|
Add basic tests for run length encoding printable
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add basic tests for run length encoding printable
|
from unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
|
<commit_before><commit_msg>Add basic tests for run length encoding printable<commit_after>
|
from unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
|
Add basic tests for run length encoding printablefrom unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
|
<commit_before><commit_msg>Add basic tests for run length encoding printable<commit_after>from unittest import mock
from django.http import QueryDict
from django.test import tag
from resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator
from tests.resources.generators.utils import BaseGeneratorTest
@tag("resource")
class RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.language = "en"
def test_worksheet_version_values(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.run_parameter_smoke_tests(generator, "worksheet_type")
def test_subtitle_student_basic_a4(self):
query = QueryDict("worksheet_type=student-basic&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - a4"
)
def test_subtitle_student_basic_letter(self):
query = QueryDict("worksheet_type=student-basic&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Kid Fax - letter"
)
def test_subtitle_student_create_a4(self):
query = QueryDict("worksheet_type=student-create&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - a4"
)
def test_subtitle_student_create_letter(self):
query = QueryDict("worksheet_type=student-create&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own - letter"
)
def test_subtitle_student_create_colour_a4(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - a4"
)
def test_subtitle_student_create_colour_letter(self):
query = QueryDict("worksheet_type=student-create-colour&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Student Worksheet - Create your own in colour - letter"
)
def test_subtitle_student_teacher_a4(self):
query = QueryDict("worksheet_type=teacher&paper_size=a4")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - a4"
)
def test_subtitle_student_teacher_letter(self):
query = QueryDict("worksheet_type=teacher&paper_size=letter")
generator = RunLengthEncodingResourceGenerator(query)
self.assertEqual(
generator.subtitle,
"Teacher Worksheet - letter"
)
|
|
4569c22d2d0245641e0c2696f798f273405c6bee
|
test_add_group.py
|
test_add_group.py
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test recorded and exported to the project
|
Test recorded and exported to the project
|
Python
|
mit
|
spcartman/python_qa
|
Test recorded and exported to the project
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test recorded and exported to the project<commit_after>
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test recorded and exported to the project# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test recorded and exported to the project<commit_after># -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group_002")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("another_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("another_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
|
c16620dffd2cd6396eb6b7db76a9c29849a16500
|
components/lie_structures/lie_structures/cheminfo_descriptors.py
|
components/lie_structures/lie_structures/cheminfo_descriptors.py
|
# -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs
|
Add support for cheminformatics descriptors
|
Add support for cheminformatics descriptors
|
Python
|
apache-2.0
|
MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio
|
Add support for cheminformatics descriptors
|
# -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs
|
<commit_before><commit_msg>Add support for cheminformatics descriptors<commit_after>
|
# -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs
|
Add support for cheminformatics descriptors# -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs
|
<commit_before><commit_msg>Add support for cheminformatics descriptors<commit_after># -*- coding: utf-8 -*-
"""
file: cheminfo_molhandle.py
Cinfony driven cheminformatics fingerprint functions
"""
import logging
from twisted.logger import Logger
from . import toolkits
logging = Logger()
def available_descriptors():
"""
List available molecular descriptors for all active cheminformatics
packages
The webel toolkit has a descriptor service but the supported
descriptors are not listed in Cinfony. The toolkit is available
however.
:rtype: :py:dict
"""
available_descs = {'webel': None}
for toolkit, obj in toolkits.items():
if hasattr(obj, 'descs'):
available_descs[toolkit] = obj.descs
return available_descs
|
|
ee3e04d32e39d6ac7ef4ac7abc2363a1ac9b8917
|
example_music.py
|
example_music.py
|
from screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01)
|
Add an example for the music module
|
Add an example for the music module
|
Python
|
mit
|
derblub/pixelpi,marian42/pixelpi,derblub/pixelpi,derblub/pixelpi,marian42/pixelpi
|
Add an example for the music module
|
from screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01)
|
<commit_before><commit_msg>Add an example for the music module<commit_after>
|
from screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01)
|
Add an example for the music modulefrom screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01)
|
<commit_before><commit_msg>Add an example for the music module<commit_after>from screenfactory import create_screen
from modules.music import Music
import config
import time
import pygame
screen = create_screen()
music = Music(screen)
music.start()
while True:
if config.virtual_hardware:
pygame.time.wait(10)
for event in pygame.event.get():
pass
else:
time.sleep(0.01)
|
|
05c7d62e0e26000440e72d0700c9806d7a409744
|
games/migrations/0023_auto_20171104_2246.py
|
games/migrations/0023_auto_20171104_2246.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
Add migrations for game change suggestions
|
Add migrations for game change suggestions
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add migrations for game change suggestions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
<commit_before><commit_msg>Add migrations for game change suggestions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
Add migrations for game change suggestions# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
<commit_before><commit_msg>Add migrations for game change suggestions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-04 21:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0022_installer_reason'),
]
operations = [
migrations.AddField(
model_name='game',
name='change_for',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='games.Game'),
),
migrations.AddField(
model_name='gamesubmission',
name='reason',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='game',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
|
db41bce3d90cfada9916baa8f9267cd9e6160a94
|
examples/open_file.py
|
examples/open_file.py
|
import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
|
Add an example for opening a file.
|
Add an example for opening a file.
|
Python
|
bsd-3-clause
|
MrTheodor/pyh5md,khinsen/pyh5md
|
Add an example for opening a file.
|
import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
|
<commit_before><commit_msg>Add an example for opening a file.<commit_after>
|
import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
|
Add an example for opening a file.import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
|
<commit_before><commit_msg>Add an example for opening a file.<commit_after>import numpy as np
import pyh5md
f = pyh5md.H5MD_File('poc.h5', 'r')
at = f.trajectory('atoms')
at_pos = at.data('position')
r = at_pos.v.value
print r
f.f.close()
|
|
10eb703867fd10df543a141837c2a57d1052ba2c
|
ideascube/conf/kb_civ_babylab.py
|
ideascube/conf/kb_civ_babylab.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Rename file with correct pattern
|
Rename file with correct pattern
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Rename file with correct pattern
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Rename file with correct pattern<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Rename file with correct pattern# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Rename file with correct pattern<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
|
35317e778b2fe1d238e21954df1eac0c5380b00b
|
generate_horoscope.py
|
generate_horoscope.py
|
#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
|
Add corpus fetch from database
|
Add corpus fetch from database
Former-commit-id: 2241247b60e2829003059f8c9d7d7c72b3e49951
|
Python
|
mit
|
greenify/zodiacy,greenify/zodiacy
|
Add corpus fetch from database
Former-commit-id: 2241247b60e2829003059f8c9d7d7c72b3e49951
|
#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
|
<commit_before><commit_msg>Add corpus fetch from database
Former-commit-id: 2241247b60e2829003059f8c9d7d7c72b3e49951<commit_after>
|
#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
|
Add corpus fetch from database
Former-commit-id: 2241247b60e2829003059f8c9d7d7c72b3e49951#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
|
<commit_before><commit_msg>Add corpus fetch from database
Former-commit-id: 2241247b60e2829003059f8c9d7d7c72b3e49951<commit_after>#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
import sys
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
_parser = argparse.ArgumentParser(description="Awesome SQLite importer")
_parser.add_argument('-d', '--database', dest='database', required=True, help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign', help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword', help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold', help='minimum count of horoscopes for the given filters', default=10)
def keyword_valid(cursor, keyword, threshold=10):
""" Checks whether enough horoscopes are present for the keyword """
# TODO implement
return True
def get_corpuses(cursor, with_rating=False, zodiac_sign=None, keyword=None):
""" Returns a cursor with all horoscopes for the given parameters """
# ugly code =(
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
zodiac_sign_ordinal = zodiac_signs[zodiac_sign]
base_stmt = 'SELECT interp%s from horoscopes' % (',rating' if with_rating else '')
if zodiac_sign is None:
if keyword is None:
return cursor.execute(base_stmt)
else:
return cursor.execute(base_stmt + ' WHERE keyword=?', (keyword,))
else:
if keyword is None:
return cursor.execute(base_stmt + ' WHERE sign=?', (str(zodiac_sign_ordinal),))
else:
return cursor.execute(base_stmt + ' WHERE sign=? and keyword=?', (str(zodiac_sign_ordinal), keyword))
if __name__ == '__main__':
args = _parser.parse_args()
with sqlite3.connect(args.database) as conn:
if not keyword_valid:
print('Not enough horoscopes for the given keyword', sys.stderr)
sys.exit(1)
corpuses = get_corpuses(conn.cursor(), zodiac_sign=None, keyword='enthusiasm')
print(corpuses.fetchone())
|
|
c0ab9b755b4906129988348b2247452b6dfc157f
|
plugins/modules/dedicated_server_display_name.py
|
plugins/modules/dedicated_server_display_name.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
Add a module to set the "display name" of a dedicated server
|
INFRA-6896: Add a module to set the "display name" of a dedicated server
- Change the "display name" in ovh manager, so you can use your internal
naming for example
- No need to set internal names in reverse dns anymore
- Change is only visible in OVH manager (and API, of course)
|
Python
|
mit
|
synthesio/infra-ovh-ansible-module
|
INFRA-6896: Add a module to set the "display name" of a dedicated server
- Change the "display name" in ovh manager, so you can use your internal
naming for example
- No need to set internal names in reverse dns anymore
- Change is only visible in OVH manager (and API, of course)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>INFRA-6896: Add a module to set the "display name" of a dedicated server
- Change the "display name" in ovh manager, so you can use your internal
naming for example
- No need to set internal names in reverse dns anymore
- Change is only visible in OVH manager (and API, of course)<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
INFRA-6896: Add a module to set the "display name" of a dedicated server
- Change the "display name" in ovh manager, so you can use your internal
naming for example
- No need to set internal names in reverse dns anymore
- Change is only visible in OVH manager (and API, of course)#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>INFRA-6896: Add a module to set the "display name" of a dedicated server
- Change the "display name" in ovh manager, so you can use your internal
naming for example
- No need to set internal names in reverse dns anymore
- Change is only visible in OVH manager (and API, of course)<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_display_name
short_description: Modify the server display name in ovh manager
description:
- Modify the server display name in ovh manager, to help you find your server with your own naming
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service name
display_name:
required: true
description: The display name to set
'''
EXAMPLES = '''
synthesio.ovh.display_name
service_name: "{{ ovhname }}"
display_name: "{{ ansible_hostname }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
display_name=dict(required=True),
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
display_name = module.params['display_name']
service_name = module.params['service_name']
if module.check_mode:
module.exit_json(msg="display_name has been set to {} ! - (dry run mode)".format(display_name), changed=True)
try:
result = client.get('/dedicated/server/%s/serviceInfos' % service_name)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
service_id = result["serviceId"]
resource = {
"resource": {
'displayName': display_name,
'name': service_name}}
try:
client.put(
'/service/%s' % service_id,
**resource
)
module.exit_json(
msg="displayName succesfully set to {} for {} !".format(display_name, service_name),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
|
8fbc5877fa97b6b8df621ff7afe7515b501660fc
|
LeetCode/ConvertStringToCamelCase.py
|
LeetCode/ConvertStringToCamelCase.py
|
def to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
|
Convert string to camel case
|
Codewars: Convert string to camel case
|
Python
|
unlicense
|
SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive
|
Codewars: Convert string to camel case
|
def to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
|
<commit_before><commit_msg>Codewars: Convert string to camel case<commit_after>
|
def to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
|
Codewars: Convert string to camel casedef to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
|
<commit_before><commit_msg>Codewars: Convert string to camel case<commit_after>def to_camel_case(text):
if len(text) < 2:
return text
capped_camel = "".join([word.title() for word in text.replace('-','_').split('_')])
return capped_camel if text[0].isupper() else capped_camel[0].lower()+capped_camel[1:]
|
|
4fbb9ca1b055b040214c82dc307f69793947b800
|
api/sync_wallet.py
|
api/sync_wallet.py
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def sync_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
print request_dict
req_type = request_dict['type'][0].upper()
if req_type == "SYNCWALLET":
response_data = syncWallets(request_dict['masterWallets'][0])
else:
return (None, req_type + ' is not supported')
response = { 'status': 'OK', 'data': response_data }
return (json.dumps(response), None)
def syncWallets(master_wallets_json):
master_wallets = json.loads(master_wallets_json)
print master_wallets
for wallet in master_wallets:
uuid = wallet['uuid']
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
json.dump(wallet, f)
return "OK"
def sync_wallet_handler(environ, start_response):
return general_handler(environ, start_response, sync_wallet_response)
|
Add handler for syncing wallets to server
|
Add handler for syncing wallets to server
|
Python
|
agpl-3.0
|
habibmasuro/omniwallet,VukDukic/omniwallet,dexX7/omniwallet,FuzzyBearBTC/omniwallet,Nevtep/omniwallet,OmniLayer/omniwallet,arowser/omniwallet,FuzzyBearBTC/omniwallet,ripper234/omniwallet,achamely/omniwallet,FuzzyBearBTC/omniwallet,arowser/omniwallet,OmniLayer/omniwallet,OmniLayer/omniwallet,maran/omniwallet,maran/omniwallet,curtislacy/omniwallet,achamely/omniwallet,achamely/omniwallet,VukDukic/omniwallet,dexX7/omniwallet,dexX7/omniwallet,maran/omniwallet,habibmasuro/omniwallet,habibmasuro/omniwallet,arowser/omniwallet,Nevtep/omniwallet,Nevtep/omniwallet,Nevtep/omniwallet,VukDukic/omniwallet,habibmasuro/omniwallet,achamely/omniwallet,curtislacy/omniwallet,OmniLayer/omniwallet,curtislacy/omniwallet,ripper234/omniwallet,ripper234/omniwallet
|
Add handler for syncing wallets to server
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def sync_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
print request_dict
req_type = request_dict['type'][0].upper()
if req_type == "SYNCWALLET":
response_data = syncWallets(request_dict['masterWallets'][0])
else:
return (None, req_type + ' is not supported')
response = { 'status': 'OK', 'data': response_data }
return (json.dumps(response), None)
def syncWallets(master_wallets_json):
master_wallets = json.loads(master_wallets_json)
print master_wallets
for wallet in master_wallets:
uuid = wallet['uuid']
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
json.dump(wallet, f)
return "OK"
def sync_wallet_handler(environ, start_response):
return general_handler(environ, start_response, sync_wallet_response)
|
<commit_before><commit_msg>Add handler for syncing wallets to server<commit_after>
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def sync_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
print request_dict
req_type = request_dict['type'][0].upper()
if req_type == "SYNCWALLET":
response_data = syncWallets(request_dict['masterWallets'][0])
else:
return (None, req_type + ' is not supported')
response = { 'status': 'OK', 'data': response_data }
return (json.dumps(response), None)
def syncWallets(master_wallets_json):
master_wallets = json.loads(master_wallets_json)
print master_wallets
for wallet in master_wallets:
uuid = wallet['uuid']
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
json.dump(wallet, f)
return "OK"
def sync_wallet_handler(environ, start_response):
return general_handler(environ, start_response, sync_wallet_response)
|
Add handler for syncing wallets to serverimport urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def sync_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
print request_dict
req_type = request_dict['type'][0].upper()
if req_type == "SYNCWALLET":
response_data = syncWallets(request_dict['masterWallets'][0])
else:
return (None, req_type + ' is not supported')
response = { 'status': 'OK', 'data': response_data }
return (json.dumps(response), None)
def syncWallets(master_wallets_json):
master_wallets = json.loads(master_wallets_json)
print master_wallets
for wallet in master_wallets:
uuid = wallet['uuid']
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
json.dump(wallet, f)
return "OK"
def sync_wallet_handler(environ, start_response):
return general_handler(environ, start_response, sync_wallet_response)
|
<commit_before><commit_msg>Add handler for syncing wallets to server<commit_after>import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def sync_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
print request_dict
req_type = request_dict['type'][0].upper()
if req_type == "SYNCWALLET":
response_data = syncWallets(request_dict['masterWallets'][0])
else:
return (None, req_type + ' is not supported')
response = { 'status': 'OK', 'data': response_data }
return (json.dumps(response), None)
def syncWallets(master_wallets_json):
master_wallets = json.loads(master_wallets_json)
print master_wallets
for wallet in master_wallets:
uuid = wallet['uuid']
filename = data_dir_root + '/wallets/' + uuid + '.json'
with open(filename, 'w') as f:
json.dump(wallet, f)
return "OK"
def sync_wallet_handler(environ, start_response):
return general_handler(environ, start_response, sync_wallet_response)
|
|
613a0056e12a28232542aaf561831d276868e413
|
programs/kinbody-creator/openraveMapGenerator.py
|
programs/kinbody-creator/openraveMapGenerator.py
|
#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
nX = 3
nY = 2
boxHeight = 1.0
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
# print "iY:",iY
for iX in range(nX):
# print "* iX:",iX
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
Number = iX + (iY * nX)
#Create pixel
Body = etree.SubElement(KinBody, "Body", name="square"+str(Number), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
|
Add parametric map generator, good for wrinkles
|
Add parametric map generator, good for wrinkles
|
Python
|
lgpl-2.1
|
roboticslab-uc3m/xgnitive,roboticslab-uc3m/xgnitive,roboticslab-uc3m/xgnitive
|
Add parametric map generator, good for wrinkles
|
#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
nX = 3
nY = 2
boxHeight = 1.0
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
# print "iY:",iY
for iX in range(nX):
# print "* iX:",iX
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
Number = iX + (iY * nX)
#Create pixel
Body = etree.SubElement(KinBody, "Body", name="square"+str(Number), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
|
<commit_before><commit_msg>Add parametric map generator, good for wrinkles<commit_after>
|
#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
nX = 3
nY = 2
boxHeight = 1.0
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
# print "iY:",iY
for iX in range(nX):
# print "* iX:",iX
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
Number = iX + (iY * nX)
#Create pixel
Body = etree.SubElement(KinBody, "Body", name="square"+str(Number), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
|
Add parametric map generator, good for wrinkles#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
nX = 3
nY = 2
boxHeight = 1.0
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
# print "iY:",iY
for iX in range(nX):
# print "* iX:",iX
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
Number = iX + (iY * nX)
#Create pixel
Body = etree.SubElement(KinBody, "Body", name="square"+str(Number), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
|
<commit_before><commit_msg>Add parametric map generator, good for wrinkles<commit_after>#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
nX = 3
nY = 2
boxHeight = 1.0
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
# print "iY:",iY
for iX in range(nX):
# print "* iX:",iX
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
Number = iX + (iY * nX)
#Create pixel
Body = etree.SubElement(KinBody, "Body", name="square"+str(Number), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
|
|
e262d176ecd7d8871a9e06ebc542cf473acf0925
|
reports/migrations/0004_transnational_weights.py
|
reports/migrations/0004_transnational_weights.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
for media_type, weight in item.iteritems():
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0003_indonesia-weights'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [
{'Country': 'T1',
'Internet': '1',
'Print': '1',
'Radio': '1',
'Television': '1',
'Twitter': '1'}]
|
Add migration for transnational weights
|
Add migration for transnational weights
|
Python
|
apache-2.0
|
Code4SA/gmmp,Code4SA/gmmp,Code4SA/gmmp
|
Add migration for transnational weights
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
for media_type, weight in item.iteritems():
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0003_indonesia-weights'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [
{'Country': 'T1',
'Internet': '1',
'Print': '1',
'Radio': '1',
'Television': '1',
'Twitter': '1'}]
|
<commit_before><commit_msg>Add migration for transnational weights<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
for media_type, weight in item.iteritems():
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0003_indonesia-weights'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [
{'Country': 'T1',
'Internet': '1',
'Print': '1',
'Radio': '1',
'Television': '1',
'Twitter': '1'}]
|
Add migration for transnational weights# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
for media_type, weight in item.iteritems():
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0003_indonesia-weights'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [
{'Country': 'T1',
'Internet': '1',
'Print': '1',
'Radio': '1',
'Television': '1',
'Twitter': '1'}]
|
<commit_before><commit_msg>Add migration for transnational weights<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
for media_type, weight in item.iteritems():
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('reports', '0003_indonesia-weights'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
]
COUNTRY_WEIGHTS= [
{'Country': 'T1',
'Internet': '1',
'Print': '1',
'Radio': '1',
'Television': '1',
'Twitter': '1'}]
|
|
b0577ce3b8b162ce3702430b189905f9beaae8d5
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
Add script to clean up all FD phone and fax numbers.
|
Add script to clean up all FD phone and fax numbers.
|
Python
|
mit
|
FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares
|
Add script to clean up all FD phone and fax numbers.
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
<commit_before><commit_msg>Add script to clean up all FD phone and fax numbers.<commit_after>
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
Add script to clean up all FD phone and fax numbers.from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
<commit_before><commit_msg>Add script to clean up all FD phone and fax numbers.<commit_after>from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
|
d5d3fcfb331c1486acbfb004705b94b1923a0db8
|
Codes/SuperEdge/SuperEdge/dump_libsvm.py
|
Codes/SuperEdge/SuperEdge/dump_libsvm.py
|
import numpy as np
from datetime import datetime
from sklearn.datasets import dump_svmlight_file
import os.path as path
def main():
cache_path = 'largecache/'
feat_name = 'feat.dat'
lbl_name = 'lbl.dat'
feat_len = 4224 #1088
now = datetime.now()
lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')
feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape[0], feat_len))
print 'loading dataset took ', (datetime.now() - now)
now = datetime.now()
print 'starting dumping feature files to libsvm format'
dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')
if __name__ == '__main__':
main()
|
Add code to dump features into libsvm file format
|
[Code] Add code to dump features into libsvm file format
|
Python
|
mit
|
erfannoury/SuperEdge,erfannoury/SuperEdge,erfannoury/SuperEdge,erfannoury/SuperEdge,erfannoury/SuperEdge
|
[Code] Add code to dump features into libsvm file format
|
import numpy as np
from datetime import datetime
from sklearn.datasets import dump_svmlight_file
import os.path as path
def main():
cache_path = 'largecache/'
feat_name = 'feat.dat'
lbl_name = 'lbl.dat'
feat_len = 4224 #1088
now = datetime.now()
lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')
feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape[0], feat_len))
print 'loading dataset took ', (datetime.now() - now)
now = datetime.now()
print 'starting dumping feature files to libsvm format'
dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[Code] Add code to dump features into libsvm file format<commit_after>
|
import numpy as np
from datetime import datetime
from sklearn.datasets import dump_svmlight_file
import os.path as path
def main():
cache_path = 'largecache/'
feat_name = 'feat.dat'
lbl_name = 'lbl.dat'
feat_len = 4224 #1088
now = datetime.now()
lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')
feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape[0], feat_len))
print 'loading dataset took ', (datetime.now() - now)
now = datetime.now()
print 'starting dumping feature files to libsvm format'
dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')
if __name__ == '__main__':
main()
|
[Code] Add code to dump features into libsvm file formatimport numpy as np
from datetime import datetime
from sklearn.datasets import dump_svmlight_file
import os.path as path
def main():
cache_path = 'largecache/'
feat_name = 'feat.dat'
lbl_name = 'lbl.dat'
feat_len = 4224 #1088
now = datetime.now()
lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')
feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape[0], feat_len))
print 'loading dataset took ', (datetime.now() - now)
now = datetime.now()
print 'starting dumping feature files to libsvm format'
dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[Code] Add code to dump features into libsvm file format<commit_after>import numpy as np
from datetime import datetime
from sklearn.datasets import dump_svmlight_file
import os.path as path
def main():
cache_path = 'largecache/'
feat_name = 'feat.dat'
lbl_name = 'lbl.dat'
feat_len = 4224 #1088
now = datetime.now()
lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')
feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape[0], feat_len))
print 'loading dataset took ', (datetime.now() - now)
now = datetime.now()
print 'starting dumping feature files to libsvm format'
dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')
if __name__ == '__main__':
main()
|
|
a2516d28c86fd23efcb893e59de42b33526bfe6f
|
swig/tkgui.py
|
swig/tkgui.py
|
#!/usr/bin/env python
import Tkinter
import sys
import mapper
def on_gui_change(x):
# print 'on_gui_change',x,x.__class__
sig_out.update_scalar(int(x))
def on_mapper_change(sig, x):
# print 'on_mapper_change', x, x.__class__
w.set(int(x))
dev = mapper.device("tkgui", 9000)
sig_in = mapper.signal(1, "/signal0", None, 'f', on_mapper_change)
dev.register_input(sig_in)
sig_out = mapper.signal(1, "/signal0", None, 'f', lambda x: x)
dev.register_output(sig_out)
master = Tkinter.Tk()
master.title("libmapper Python GUI demo")
w = Tkinter.Scale(master, from_=0, to=100, label='signal0',
orient=Tkinter.HORIZONTAL, length=300,
command=on_gui_change)
w.pack()
def do_poll():
dev.poll(20)
master.after(5, do_poll)
do_poll()
master.mainloop()
|
Add a Python Tkinter example showing how to map a scale widget.
|
swig: Add a Python Tkinter example showing how to map a scale widget.
|
Python
|
lgpl-2.1
|
davidhernon/libmapper,johnty/libmapper,davidhernon/libmapper,radarsat1/libmapper,malloch/libmapper,malloch/libmapper,radarsat1/libmapper,libmapper/libmapper,radarsat1/libmapper,davidhernon/libmapper-admin2,johnty/libmapper,johnty/libmapper,malloch/libmapper,johnty/libmapper,radarsat1/libmapper,radarsat1/libmapper,libmapper/libmapper,davidhernon/libmapper-admin2,davidhernon/libmapper,malloch/libmapper,davidhernon/libmapper,davidhernon/libmapper,johnty/libmapper,davidhernon/libmapper-admin2,malloch/libmapper,libmapper/libmapper,davidhernon/libmapper-admin2,malloch/libmapper,libmapper/libmapper,libmapper/libmapper,davidhernon/libmapper-admin2,libmapper/libmapper
|
swig: Add a Python Tkinter example showing how to map a scale widget.
|
#!/usr/bin/env python
import Tkinter
import sys
import mapper
def on_gui_change(x):
# print 'on_gui_change',x,x.__class__
sig_out.update_scalar(int(x))
def on_mapper_change(sig, x):
# print 'on_mapper_change', x, x.__class__
w.set(int(x))
dev = mapper.device("tkgui", 9000)
sig_in = mapper.signal(1, "/signal0", None, 'f', on_mapper_change)
dev.register_input(sig_in)
sig_out = mapper.signal(1, "/signal0", None, 'f', lambda x: x)
dev.register_output(sig_out)
master = Tkinter.Tk()
master.title("libmapper Python GUI demo")
w = Tkinter.Scale(master, from_=0, to=100, label='signal0',
orient=Tkinter.HORIZONTAL, length=300,
command=on_gui_change)
w.pack()
def do_poll():
dev.poll(20)
master.after(5, do_poll)
do_poll()
master.mainloop()
|
<commit_before><commit_msg>swig: Add a Python Tkinter example showing how to map a scale widget.<commit_after>
|
#!/usr/bin/env python
import Tkinter
import sys
import mapper
def on_gui_change(x):
# print 'on_gui_change',x,x.__class__
sig_out.update_scalar(int(x))
def on_mapper_change(sig, x):
# print 'on_mapper_change', x, x.__class__
w.set(int(x))
dev = mapper.device("tkgui", 9000)
sig_in = mapper.signal(1, "/signal0", None, 'f', on_mapper_change)
dev.register_input(sig_in)
sig_out = mapper.signal(1, "/signal0", None, 'f', lambda x: x)
dev.register_output(sig_out)
master = Tkinter.Tk()
master.title("libmapper Python GUI demo")
w = Tkinter.Scale(master, from_=0, to=100, label='signal0',
orient=Tkinter.HORIZONTAL, length=300,
command=on_gui_change)
w.pack()
def do_poll():
dev.poll(20)
master.after(5, do_poll)
do_poll()
master.mainloop()
|
swig: Add a Python Tkinter example showing how to map a scale widget.#!/usr/bin/env python
import Tkinter
import sys
import mapper
def on_gui_change(x):
# print 'on_gui_change',x,x.__class__
sig_out.update_scalar(int(x))
def on_mapper_change(sig, x):
# print 'on_mapper_change', x, x.__class__
w.set(int(x))
dev = mapper.device("tkgui", 9000)
sig_in = mapper.signal(1, "/signal0", None, 'f', on_mapper_change)
dev.register_input(sig_in)
sig_out = mapper.signal(1, "/signal0", None, 'f', lambda x: x)
dev.register_output(sig_out)
master = Tkinter.Tk()
master.title("libmapper Python GUI demo")
w = Tkinter.Scale(master, from_=0, to=100, label='signal0',
orient=Tkinter.HORIZONTAL, length=300,
command=on_gui_change)
w.pack()
def do_poll():
dev.poll(20)
master.after(5, do_poll)
do_poll()
master.mainloop()
|
<commit_before><commit_msg>swig: Add a Python Tkinter example showing how to map a scale widget.<commit_after>#!/usr/bin/env python
import Tkinter
import sys
import mapper
def on_gui_change(x):
# print 'on_gui_change',x,x.__class__
sig_out.update_scalar(int(x))
def on_mapper_change(sig, x):
# print 'on_mapper_change', x, x.__class__
w.set(int(x))
dev = mapper.device("tkgui", 9000)
sig_in = mapper.signal(1, "/signal0", None, 'f', on_mapper_change)
dev.register_input(sig_in)
sig_out = mapper.signal(1, "/signal0", None, 'f', lambda x: x)
dev.register_output(sig_out)
master = Tkinter.Tk()
master.title("libmapper Python GUI demo")
w = Tkinter.Scale(master, from_=0, to=100, label='signal0',
orient=Tkinter.HORIZONTAL, length=300,
command=on_gui_change)
w.pack()
def do_poll():
dev.poll(20)
master.after(5, do_poll)
do_poll()
master.mainloop()
|
|
4c3c9c6929ebc3f439ccf3bb7d3696f484b154bc
|
karspexet/ticket/migrations/0017_positive_integers_20180322_2056.py
|
karspexet/ticket/migrations/0017_positive_integers_20180322_2056.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-03-22 19:56
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0016_add_voucher_note_20180213_2307'),
]
operations = [
migrations.AlterField(
model_name='discount',
name='amount',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='reservation',
name='ticket_price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='reservation',
name='total',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='ticket',
name='price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='voucher',
name='amount',
field=models.PositiveIntegerField(help_text='Rabatt i SEK'),
),
]
|
Add missing noop-migrations for PositiveIntegerField
|
Add missing noop-migrations for PositiveIntegerField
|
Python
|
mit
|
Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet
|
Add missing noop-migrations for PositiveIntegerField
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-03-22 19:56
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0016_add_voucher_note_20180213_2307'),
]
operations = [
migrations.AlterField(
model_name='discount',
name='amount',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='reservation',
name='ticket_price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='reservation',
name='total',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='ticket',
name='price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='voucher',
name='amount',
field=models.PositiveIntegerField(help_text='Rabatt i SEK'),
),
]
|
<commit_before><commit_msg>Add missing noop-migrations for PositiveIntegerField<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-03-22 19:56
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0016_add_voucher_note_20180213_2307'),
]
operations = [
migrations.AlterField(
model_name='discount',
name='amount',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='reservation',
name='ticket_price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='reservation',
name='total',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='ticket',
name='price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='voucher',
name='amount',
field=models.PositiveIntegerField(help_text='Rabatt i SEK'),
),
]
|
Add missing noop-migrations for PositiveIntegerField# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-03-22 19:56
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0016_add_voucher_note_20180213_2307'),
]
operations = [
migrations.AlterField(
model_name='discount',
name='amount',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='reservation',
name='ticket_price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='reservation',
name='total',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='ticket',
name='price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='voucher',
name='amount',
field=models.PositiveIntegerField(help_text='Rabatt i SEK'),
),
]
|
<commit_before><commit_msg>Add missing noop-migrations for PositiveIntegerField<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-03-22 19:56
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0016_add_voucher_note_20180213_2307'),
]
operations = [
migrations.AlterField(
model_name='discount',
name='amount',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)]),
),
migrations.AlterField(
model_name='reservation',
name='ticket_price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='reservation',
name='total',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='ticket',
name='price',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='voucher',
name='amount',
field=models.PositiveIntegerField(help_text='Rabatt i SEK'),
),
]
|
|
af508daaf016b824c7518a36f9b92f571f0f65af
|
nodeconductor/structure/management/commands/init_balance_history.py
|
nodeconductor/structure/management/commands/init_balance_history.py
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from nodeconductor.structure.models import BalanceHistory
from nodeconductor.structure.models import Customer
class Command(BaseCommand):
help = """ Initialize demo records of balance history """
def handle(self, *args, **options):
self.stdout.write('Creating demo records of balance history for all customers')
for customer in Customer.objects.all():
for i in range(10):
BalanceHistory.objects.create(customer=customer,
created=timezone.now() - timedelta(days=i),
amount=100 + i * 10)
self.stdout.write('... Done')
|
Implement management command for creating demo records of balance history (NC-842)
|
Implement management command for creating demo records of balance history (NC-842)
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Implement management command for creating demo records of balance history (NC-842)
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from nodeconductor.structure.models import BalanceHistory
from nodeconductor.structure.models import Customer
class Command(BaseCommand):
help = """ Initialize demo records of balance history """
def handle(self, *args, **options):
self.stdout.write('Creating demo records of balance history for all customers')
for customer in Customer.objects.all():
for i in range(10):
BalanceHistory.objects.create(customer=customer,
created=timezone.now() - timedelta(days=i),
amount=100 + i * 10)
self.stdout.write('... Done')
|
<commit_before><commit_msg>Implement management command for creating demo records of balance history (NC-842)<commit_after>
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from nodeconductor.structure.models import BalanceHistory
from nodeconductor.structure.models import Customer
class Command(BaseCommand):
help = """ Initialize demo records of balance history """
def handle(self, *args, **options):
self.stdout.write('Creating demo records of balance history for all customers')
for customer in Customer.objects.all():
for i in range(10):
BalanceHistory.objects.create(customer=customer,
created=timezone.now() - timedelta(days=i),
amount=100 + i * 10)
self.stdout.write('... Done')
|
Implement management command for creating demo records of balance history (NC-842)from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from nodeconductor.structure.models import BalanceHistory
from nodeconductor.structure.models import Customer
class Command(BaseCommand):
help = """ Initialize demo records of balance history """
def handle(self, *args, **options):
self.stdout.write('Creating demo records of balance history for all customers')
for customer in Customer.objects.all():
for i in range(10):
BalanceHistory.objects.create(customer=customer,
created=timezone.now() - timedelta(days=i),
amount=100 + i * 10)
self.stdout.write('... Done')
|
<commit_before><commit_msg>Implement management command for creating demo records of balance history (NC-842)<commit_after>from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from nodeconductor.structure.models import BalanceHistory
from nodeconductor.structure.models import Customer
class Command(BaseCommand):
help = """ Initialize demo records of balance history """
def handle(self, *args, **options):
self.stdout.write('Creating demo records of balance history for all customers')
for customer in Customer.objects.all():
for i in range(10):
BalanceHistory.objects.create(customer=customer,
created=timezone.now() - timedelta(days=i),
amount=100 + i * 10)
self.stdout.write('... Done')
|
|
1765ac3a12ea2a56b4e25e05cf1f1b531de5b2cf
|
pyexternal.py
|
pyexternal.py
|
#!/usr/bin/env python
# Get External Temperature from OpenWeatherMap
# External informations are :
# - temperature
# - humidity
# - pressure
# - precipitation volume (each 3h)
import urllib.request
import json
import pyowm
from datetime import datetime
from pyserial import pySerial
from imports.pyTemperature import pyTemperature
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?q="
DEFAULT_CITY = "Meyreuil, France"
API_KEY = "4ca5e2bebb63f72d4cc5564300cf68d5"
class py_external(object):
def __init__(self):
super(py_external, self).__init__()
self.pyTemperature = None
def getDataAPI(self):
owm = pyowm.OWM(API_KEY)
#observation = owm.weather_at_place(DEFAULT_CITY,'accurate')
observation = owm.weather_at_id(2994068)
print(observation)
if observation is not None:
w = observation.get_weather()
w_temp = w.get_temperature(unit='celsius')
w_hum = w.get_humidity()
w_pres = w.get_pressure()
w_prec = w.get_rain()
#print(w_prec)
l = observation.get_location()
#print(l.get_ID())
#print(l.get_name())
#print(l.get_lat())
#print(l.get_lon())
#pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):
dateNow = datetime.now()
self.pyTemperature = pyTemperature(dateNow,w_temp['temp'],w_pres['press'],w_hum)
#print("Temperature at pyExternal")
#self.pyTemperature.printTemperature()
def getPyTemperature(self):
return self.pyTemperature
def setDate(self,newDate):
self.date = newDate
def setPressure(self,newPressure):
self.pressure = newPressure
def setHumidity(self,newHumidity):
self.humidity = newHumidity
|
Add External Temperature Probe from OpenWeather
|
Add External Temperature Probe from OpenWeather
|
Python
|
mit
|
mattcongy/piprobe
|
Add External Temperature Probe from OpenWeather
|
#!/usr/bin/env python
# Get External Temperature from OpenWeatherMap
# External informations are :
# - temperature
# - humidity
# - pressure
# - precipitation volume (each 3h)
import urllib.request
import json
import pyowm
from datetime import datetime
from pyserial import pySerial
from imports.pyTemperature import pyTemperature
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?q="
DEFAULT_CITY = "Meyreuil, France"
API_KEY = "4ca5e2bebb63f72d4cc5564300cf68d5"
class py_external(object):
def __init__(self):
super(py_external, self).__init__()
self.pyTemperature = None
def getDataAPI(self):
owm = pyowm.OWM(API_KEY)
#observation = owm.weather_at_place(DEFAULT_CITY,'accurate')
observation = owm.weather_at_id(2994068)
print(observation)
if observation is not None:
w = observation.get_weather()
w_temp = w.get_temperature(unit='celsius')
w_hum = w.get_humidity()
w_pres = w.get_pressure()
w_prec = w.get_rain()
#print(w_prec)
l = observation.get_location()
#print(l.get_ID())
#print(l.get_name())
#print(l.get_lat())
#print(l.get_lon())
#pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):
dateNow = datetime.now()
self.pyTemperature = pyTemperature(dateNow,w_temp['temp'],w_pres['press'],w_hum)
#print("Temperature at pyExternal")
#self.pyTemperature.printTemperature()
def getPyTemperature(self):
return self.pyTemperature
def setDate(self,newDate):
self.date = newDate
def setPressure(self,newPressure):
self.pressure = newPressure
def setHumidity(self,newHumidity):
self.humidity = newHumidity
|
<commit_before><commit_msg>Add External Temperature Probe from OpenWeather<commit_after>
|
#!/usr/bin/env python
# Get External Temperature from OpenWeatherMap
# External informations are :
# - temperature
# - humidity
# - pressure
# - precipitation volume (each 3h)
import urllib.request
import json
import pyowm
from datetime import datetime
from pyserial import pySerial
from imports.pyTemperature import pyTemperature
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?q="
DEFAULT_CITY = "Meyreuil, France"
API_KEY = "4ca5e2bebb63f72d4cc5564300cf68d5"
class py_external(object):
def __init__(self):
super(py_external, self).__init__()
self.pyTemperature = None
def getDataAPI(self):
owm = pyowm.OWM(API_KEY)
#observation = owm.weather_at_place(DEFAULT_CITY,'accurate')
observation = owm.weather_at_id(2994068)
print(observation)
if observation is not None:
w = observation.get_weather()
w_temp = w.get_temperature(unit='celsius')
w_hum = w.get_humidity()
w_pres = w.get_pressure()
w_prec = w.get_rain()
#print(w_prec)
l = observation.get_location()
#print(l.get_ID())
#print(l.get_name())
#print(l.get_lat())
#print(l.get_lon())
#pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):
dateNow = datetime.now()
self.pyTemperature = pyTemperature(dateNow,w_temp['temp'],w_pres['press'],w_hum)
#print("Temperature at pyExternal")
#self.pyTemperature.printTemperature()
def getPyTemperature(self):
return self.pyTemperature
def setDate(self,newDate):
self.date = newDate
def setPressure(self,newPressure):
self.pressure = newPressure
def setHumidity(self,newHumidity):
self.humidity = newHumidity
|
Add External Temperature Probe from OpenWeather#!/usr/bin/env python
# Get External Temperature from OpenWeatherMap
# External informations are :
# - temperature
# - humidity
# - pressure
# - precipitation volume (each 3h)
import urllib.request
import json
import pyowm
from datetime import datetime
from pyserial import pySerial
from imports.pyTemperature import pyTemperature
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?q="
DEFAULT_CITY = "Meyreuil, France"
API_KEY = "4ca5e2bebb63f72d4cc5564300cf68d5"
class py_external(object):
def __init__(self):
super(py_external, self).__init__()
self.pyTemperature = None
def getDataAPI(self):
owm = pyowm.OWM(API_KEY)
#observation = owm.weather_at_place(DEFAULT_CITY,'accurate')
observation = owm.weather_at_id(2994068)
print(observation)
if observation is not None:
w = observation.get_weather()
w_temp = w.get_temperature(unit='celsius')
w_hum = w.get_humidity()
w_pres = w.get_pressure()
w_prec = w.get_rain()
#print(w_prec)
l = observation.get_location()
#print(l.get_ID())
#print(l.get_name())
#print(l.get_lat())
#print(l.get_lon())
#pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):
dateNow = datetime.now()
self.pyTemperature = pyTemperature(dateNow,w_temp['temp'],w_pres['press'],w_hum)
#print("Temperature at pyExternal")
#self.pyTemperature.printTemperature()
def getPyTemperature(self):
return self.pyTemperature
def setDate(self,newDate):
self.date = newDate
def setPressure(self,newPressure):
self.pressure = newPressure
def setHumidity(self,newHumidity):
self.humidity = newHumidity
|
<commit_before><commit_msg>Add External Temperature Probe from OpenWeather<commit_after>#!/usr/bin/env python
# Get External Temperature from OpenWeatherMap
# External informations are :
# - temperature
# - humidity
# - pressure
# - precipitation volume (each 3h)
import urllib.request
import json
import pyowm
from datetime import datetime
from pyserial import pySerial
from imports.pyTemperature import pyTemperature
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?q="
DEFAULT_CITY = "Meyreuil, France"
API_KEY = "4ca5e2bebb63f72d4cc5564300cf68d5"
class py_external(object):
def __init__(self):
super(py_external, self).__init__()
self.pyTemperature = None
def getDataAPI(self):
owm = pyowm.OWM(API_KEY)
#observation = owm.weather_at_place(DEFAULT_CITY,'accurate')
observation = owm.weather_at_id(2994068)
print(observation)
if observation is not None:
w = observation.get_weather()
w_temp = w.get_temperature(unit='celsius')
w_hum = w.get_humidity()
w_pres = w.get_pressure()
w_prec = w.get_rain()
#print(w_prec)
l = observation.get_location()
#print(l.get_ID())
#print(l.get_name())
#print(l.get_lat())
#print(l.get_lon())
#pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):
dateNow = datetime.now()
self.pyTemperature = pyTemperature(dateNow,w_temp['temp'],w_pres['press'],w_hum)
#print("Temperature at pyExternal")
#self.pyTemperature.printTemperature()
def getPyTemperature(self):
return self.pyTemperature
def setDate(self,newDate):
self.date = newDate
def setPressure(self,newPressure):
self.pressure = newPressure
def setHumidity(self,newHumidity):
self.humidity = newHumidity
|
|
decf4b1916a421fe996a31feb131b7ed9e4e3c36
|
numpy-benchmark-one.py
|
numpy-benchmark-one.py
|
import timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
|
Add a simple benchmark script
|
Add a simple benchmark script
|
Python
|
unlicense
|
abhimanyuma/ml-with-py
|
Add a simple benchmark script
|
import timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
|
<commit_before><commit_msg>Add a simple benchmark script<commit_after>
|
import timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
|
Add a simple benchmark scriptimport timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
|
<commit_before><commit_msg>Add a simple benchmark script<commit_after>import timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
|
|
98aee2af9aa3f7dcc75969f1ec3118c40539793e
|
pandoc-include-code.py
|
pandoc-include-code.py
|
#! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
|
Add clone of Haskell version
|
pandoc-include-clone.py: Add clone of Haskell version
This at least doesn't require linking against anything, waiting for a
long compilation and installation with `cabal`, complaining about the
Pandoc API version in the latest GitHub release as of this writing, or
using up a gigabyte more disk space.
|
Python
|
isc
|
pilona/Utils,pilona/Utils,pilona/Utils
|
pandoc-include-clone.py: Add clone of Haskell version
This at least doesn't require linking against anything, waiting for a
long compilation and installation with `cabal`, complaining about the
Pandoc API version in the latest GitHub release as of this writing, or
using up a gigabyte more disk space.
|
#! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
|
<commit_before><commit_msg>pandoc-include-clone.py: Add clone of Haskell version
This at least doesn't require linking against anything, waiting for a
long compilation and installation with `cabal`, complaining about the
Pandoc API version in the latest GitHub release as of this writing, or
using up a gigabyte more disk space.<commit_after>
|
#! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
|
pandoc-include-clone.py: Add clone of Haskell version
This at least doesn't require linking against anything, waiting for a
long compilation and installation with `cabal`, complaining about the
Pandoc API version in the latest GitHub release as of this writing, or
using up a gigabyte more disk space.#! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
|
<commit_before><commit_msg>pandoc-include-clone.py: Add clone of Haskell version
This at least doesn't require linking against anything, waiting for a
long compilation and installation with `cabal`, complaining about the
Pandoc API version in the latest GitHub release as of this writing, or
using up a gigabyte more disk space.<commit_after>#! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
|
|
70a6553d9323b3522e492c414b67e76111519368
|
scripts/data_download/school_census/create_all_files.py
|
scripts/data_download/school_census/create_all_files.py
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to school census.
|
Add file to create all files to school census.
|
Python
|
mit
|
DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site
|
Add file to create all files to school census.
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to school census.<commit_after>
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to school census.import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to school census.<commit_after>import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
|
638ee09f0f2958a955fbad42368ffc6bb2a2688a
|
pipeline/scripts/bb_pipeline_api.py
|
pipeline/scripts/bb_pipeline_api.py
|
#!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
Add minimal REST API script based on flask
|
Add minimal REST API script based on flask
|
Python
|
apache-2.0
|
BioroboticsLab/deeppipeline,BioroboticsLab/bb_pipeline,BioroboticsLab/deeppipeline
|
Add minimal REST API script based on flask
|
#!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
<commit_before><commit_msg>Add minimal REST API script based on flask<commit_after>
|
#!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
Add minimal REST API script based on flask#!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
<commit_before><commit_msg>Add minimal REST API script based on flask<commit_after>#!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
|
70e14187ecd2567894e5e8183341a63835d6839c
|
data/pldm_variables.py
|
data/pldm_variables.py
|
#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
|
Create pldm related specific constants file.
|
Create pldm related specific constants file.
Change-Id: I3eda08d7ec1c0113931511f2d3539f2c658f7ad0
Signed-off-by: Sridevi Ramesh <cb5e1f81dd390dfc3a9afc08ab7298b7ab4296f5@in.ibm.com>
|
Python
|
apache-2.0
|
openbmc/openbmc-test-automation,openbmc/openbmc-test-automation
|
Create pldm related specific constants file.
Change-Id: I3eda08d7ec1c0113931511f2d3539f2c658f7ad0
Signed-off-by: Sridevi Ramesh <cb5e1f81dd390dfc3a9afc08ab7298b7ab4296f5@in.ibm.com>
|
#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
|
<commit_before><commit_msg>Create pldm related specific constants file.
Change-Id: I3eda08d7ec1c0113931511f2d3539f2c658f7ad0
Signed-off-by: Sridevi Ramesh <cb5e1f81dd390dfc3a9afc08ab7298b7ab4296f5@in.ibm.com><commit_after>
|
#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
|
Create pldm related specific constants file.
Change-Id: I3eda08d7ec1c0113931511f2d3539f2c658f7ad0
Signed-off-by: Sridevi Ramesh <cb5e1f81dd390dfc3a9afc08ab7298b7ab4296f5@in.ibm.com>#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
|
<commit_before><commit_msg>Create pldm related specific constants file.
Change-Id: I3eda08d7ec1c0113931511f2d3539f2c658f7ad0
Signed-off-by: Sridevi Ramesh <cb5e1f81dd390dfc3a9afc08ab7298b7ab4296f5@in.ibm.com><commit_after>#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
|
|
8b42b0825d5cbb6becef9669b43a2c8229ea8642
|
remove_unpaired_fasta_entries.py
|
remove_unpaired_fasta_entries.py
|
#!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
|
Add script to remove unpaired fasta entries.
|
Add script to remove unpaired fasta entries.
|
Python
|
isc
|
konrad/kuf_bio_scripts
|
Add script to remove unpaired fasta entries.
|
#!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
|
<commit_before><commit_msg>Add script to remove unpaired fasta entries.<commit_after>
|
#!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
|
Add script to remove unpaired fasta entries.#!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
|
<commit_before><commit_msg>Add script to remove unpaired fasta entries.<commit_after>#!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
|
|
ccbb7e11edc63a128b7006e015539fdabd8f3a7f
|
bitHopper/LongPoll.py
|
bitHopper/LongPoll.py
|
from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
|
Set up frontend for longpolling
|
Set up frontend for longpolling
|
Python
|
mit
|
c00w/bitHopper,c00w/bitHopper
|
Set up frontend for longpolling
|
from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
|
<commit_before><commit_msg>Set up frontend for longpolling<commit_after>
|
from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
|
Set up frontend for longpollingfrom gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
|
<commit_before><commit_msg>Set up frontend for longpolling<commit_after>from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
|
|
324243dfd61afd8ce244a9a02ffc800c5c73ce55
|
charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py
|
charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py
|
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
|
Add modified chart with better values
|
Add modified chart with better values
|
Python
|
mit
|
chrisgilmerproj/brewday,chrisgilmerproj/brewday
|
Add modified chart with better values
|
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add modified chart with better values<commit_after>
|
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
|
Add modified chart with better values
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add modified chart with better values<commit_after>
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
|
|
cb454d310431700e5ac9883a32f0b36e2e50e0fe
|
sensu/plugins/check-keystone-expired-tokens.py
|
sensu/plugins/check-keystone-expired-tokens.py
|
#!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
|
Add a check for keystone expired tokens buildup.
|
Add a check for keystone expired tokens buildup.
More than 1K is too many when cleaning every 5 minutes, so warn about
it. The admin may have to go in and more aggressively clean or figure
out why the cleaning isn't completing as expected.
This uses a lot of keystone-manage code to interact with the database,
and monkeypatches itself into that code, hence why it needs to be
executed from the keystone venv.
It also only works with sql backend of tokens, but that's all we
support.
|
Python
|
apache-2.0
|
aacole/ursula-monitoring,sivakom/ursula-monitoring,sivakom/ursula-monitoring,blueboxgroup/ursula-monitoring,blueboxgroup/ursula-monitoring,sivakom/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,sivakom/ursula-monitoring,blueboxgroup/ursula-monitoring,blueboxgroup/ursula-monitoring
|
Add a check for keystone expired tokens buildup.
More than 1K is too many when cleaning every 5 minutes, so warn about
it. The admin may have to go in and more aggressively clean or figure
out why the cleaning isn't completing as expected.
This uses a lot of keystone-manage code to interact with the database,
and monkeypatches itself into that code, hence why it needs to be
executed from the keystone venv.
It also only works with sql backend of tokens, but that's all we
support.
|
#!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
|
<commit_before><commit_msg>Add a check for keystone expired tokens buildup.
More than 1K is too many when cleaning every 5 minutes, so warn about
it. The admin may have to go in and more aggressively clean or figure
out why the cleaning isn't completing as expected.
This uses a lot of keystone-manage code to interact with the database,
and monkeypatches itself into that code, hence why it needs to be
executed from the keystone venv.
It also only works with sql backend of tokens, but that's all we
support.<commit_after>
|
#!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
|
Add a check for keystone expired tokens buildup.
More than 1K is too many when cleaning every 5 minutes, so warn about
it. The admin may have to go in and more aggressively clean or figure
out why the cleaning isn't completing as expected.
This uses a lot of keystone-manage code to interact with the database,
and monkeypatches itself into that code, hence why it needs to be
executed from the keystone venv.
It also only works with sql backend of tokens, but that's all we
support.#!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
|
<commit_before><commit_msg>Add a check for keystone expired tokens buildup.
More than 1K is too many when cleaning every 5 minutes, so warn about
it. The admin may have to go in and more aggressively clean or figure
out why the cleaning isn't completing as expected.
This uses a lot of keystone-manage code to interact with the database,
and monkeypatches itself into that code, hence why it needs to be
executed from the keystone venv.
It also only works with sql backend of tokens, but that's all we
support.<commit_after>#!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
|
|
ac40e54d22717fbf1a2444a67198cdba66506df8
|
cea/tests/test_inputs_setup_workflow.py
|
cea/tests/test_inputs_setup_workflow.py
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
Add test for input setup workflow
|
Add test for input setup workflow
|
Python
|
mit
|
architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst
|
Add test for input setup workflow
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for input setup workflow<commit_after>
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
Add test for input setup workflowimport os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for input setup workflow<commit_after>import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
|
582b5c598da5b35032447f0eb7888051b84f844c
|
alembic/versions/20860ffde766_add_datetime_to_fastcache.py
|
alembic/versions/20860ffde766_add_datetime_to_fastcache.py
|
"""Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
|
Add datetime to fast cache
|
Add datetime to fast cache
|
Python
|
bsd-2-clause
|
porduna/appcomposer,morelab/appcomposer,porduna/appcomposer,morelab/appcomposer,morelab/appcomposer,go-lab/appcomposer,go-lab/appcomposer,porduna/appcomposer,morelab/appcomposer,porduna/appcomposer,go-lab/appcomposer,go-lab/appcomposer
|
Add datetime to fast cache
|
"""Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
|
<commit_before><commit_msg>Add datetime to fast cache<commit_after>
|
"""Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
|
Add datetime to fast cache"""Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
|
<commit_before><commit_msg>Add datetime to fast cache<commit_after>"""Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
|
|
65b362985d502440b12efc8a6a49ab0603354fd2
|
liwc_emotional_sentences.py
|
liwc_emotional_sentences.py
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
|
Add script to count emotional sentences according to LIWC
|
Add script to count emotional sentences according to LIWC
Added a script that counts the number of emotional sentences in titles
in the corpus. A sentence is considered emotional if it contains at
least one Posemo or Negemo term. The statistical results are written to
standard out.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to count emotional sentences according to LIWC
Added a script that counts the number of emotional sentences in titles
in the corpus. A sentence is considered emotional if it contains at
least one Posemo or Negemo term. The statistical results are written to
standard out.
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
|
<commit_before><commit_msg>Add script to count emotional sentences according to LIWC
Added a script that counts the number of emotional sentences in titles
in the corpus. A sentence is considered emotional if it contains at
least one Posemo or Negemo term. The statistical results are written to
standard out.<commit_after>
|
"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
|
Add script to count emotional sentences according to LIWC
Added a script that counts the number of emotional sentences in titles
in the corpus. A sentence is considered emotional if it contains at
least one Posemo or Negemo term. The statistical results are written to
standard out."""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
|
<commit_before><commit_msg>Add script to count emotional sentences according to LIWC
Added a script that counts the number of emotional sentences in titles
in the corpus. A sentence is considered emotional if it contains at
least one Posemo or Negemo term. The statistical results are written to
standard out.<commit_after>"""Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
|
|
cc89c5222ec7f6d6f95b5efdce3958b3ca33814e
|
mica/archive/tests/test_aca_dark_cal.py
|
mica/archive/tests/test_aca_dark_cal.py
|
"""
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
|
Add basic functionality and regression tests for ACA dark cal module
|
Add basic functionality and regression tests for ACA dark cal module
|
Python
|
bsd-3-clause
|
sot/mica,sot/mica
|
Add basic functionality and regression tests for ACA dark cal module
|
"""
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
|
<commit_before><commit_msg>Add basic functionality and regression tests for ACA dark cal module<commit_after>
|
"""
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
|
Add basic functionality and regression tests for ACA dark cal module"""
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
|
<commit_before><commit_msg>Add basic functionality and regression tests for ACA dark cal module<commit_after>"""
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
|
|
5eefc407b8f51c017a3f4193c88f6dc188a88601
|
src/CLAHE_dir.py
|
src/CLAHE_dir.py
|
from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main()
|
Include OpenCV based Python CLAHE script
|
Include OpenCV based Python CLAHE script
|
Python
|
mit
|
seung-lab/Julimaps,seung-lab/Julimaps
|
Include OpenCV based Python CLAHE script
|
from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main()
|
<commit_before><commit_msg>Include OpenCV based Python CLAHE script<commit_after>
|
from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main()
|
Include OpenCV based Python CLAHE scriptfrom PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main()
|
<commit_before><commit_msg>Include OpenCV based Python CLAHE script<commit_after>from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main()
|
|
f0af14b8fcd420b63a47e18938664e14cf9ea968
|
subiquity/utils.py
|
subiquity/utils.py
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
|
Add generic asynchronous/synchronous run command
|
Add generic asynchronous/synchronous run command
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com>
|
Python
|
agpl-3.0
|
CanonicalLtd/subiquity,CanonicalLtd/subiquity
|
Add generic asynchronous/synchronous run command
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com>
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
|
<commit_before><commit_msg>Add generic asynchronous/synchronous run command
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com><commit_after>
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
|
Add generic asynchronous/synchronous run command
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com># Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
|
<commit_before><commit_msg>Add generic asynchronous/synchronous run command
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com><commit_after># Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
|
|
5a21b66f7ab77f419245d8c07d7473a6e1600fc4
|
comics/crawler/crawlers/harkavagrant.py
|
comics/crawler/crawlers/harkavagrant.py
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
|
Add crawler for 'Hark, A Vagrant'
|
Add crawler for 'Hark, A Vagrant'
|
Python
|
agpl-3.0
|
jodal/comics,jodal/comics,klette/comics,datagutten/comics,datagutten/comics,datagutten/comics,klette/comics,klette/comics,jodal/comics,jodal/comics,datagutten/comics
|
Add crawler for 'Hark, A Vagrant'
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
|
<commit_before><commit_msg>Add crawler for 'Hark, A Vagrant'<commit_after>
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
|
Add crawler for 'Hark, A Vagrant'from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
|
<commit_before><commit_msg>Add crawler for 'Hark, A Vagrant'<commit_after>from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
|
|
63143c94cef353d7bae13f7b13650801bb901c94
|
tests/unicode/unicode_pos.py
|
tests/unicode/unicode_pos.py
|
# str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
Test for explicit start/end args to str methods for unicode.
|
tests: Test for explicit start/end args to str methods for unicode.
|
Python
|
mit
|
hiway/micropython,martinribelotta/micropython,tdautc19841202/micropython,supergis/micropython,torwag/micropython,pfalcon/micropython,galenhz/micropython,TDAbboud/micropython,kerneltask/micropython,heisewangluo/micropython,kerneltask/micropython,pozetroninc/micropython,ericsnowcurrently/micropython,ernesto-g/micropython,omtinez/micropython,misterdanb/micropython,Peetz0r/micropython-esp32,praemdonck/micropython,xyb/micropython,MrSurly/micropython-esp32,ryannathans/micropython,redbear/micropython,infinnovation/micropython,rubencabrera/micropython,EcmaXp/micropython,xhat/micropython,cwyark/micropython,xhat/micropython,micropython/micropython-esp32,swegener/micropython,firstval/micropython,xhat/micropython,emfcamp/micropython,torwag/micropython,ernesto-g/micropython,pramasoul/micropython,toolmacher/micropython,noahwilliamsson/micropython,lbattraw/micropython,infinnovation/micropython,tuc-osg/micropython,slzatz/micropython,blmorris/micropython,pramasoul/micropython,pramasoul/micropython,Peetz0r/micropython-esp32,alex-robbins/micropython,EcmaXp/micropython,ericsnowcurrently/micropython,noahwilliamsson/micropython,tralamazza/micropython,jimkmc/micropython,ryannathans/micropython,micropython/micropython-esp32,PappaPeppar/micropython,kerneltask/micropython,ruffy91/micropython,emfcamp/micropython,dxxb/micropython,ChuckM/micropython,TDAbboud/micropython,aethaniel/micropython,ganshun666/micropython,oopy/micropython,ChuckM/micropython,ruffy91/micropython,suda/micropython,neilh10/micropython,AriZuu/micropython,neilh10/micropython,henriknelson/micropython,ganshun666/micropython,dinau/micropython,pozetroninc/micropython,blmorris/micropython,puuu/micropython,ernesto-g/micropython,ahotam/micropython,cloudformdesign/micropython,mpalomer/micropython,dhylands/micropython,dxxb/micropython,matthewelse/micropython,tobbad/micropython,galenhz/micropython,praemdonck/micropython,EcmaXp/micropython,swegener/micropython,jmarcelino/pycom-micropython,misterdanb/micropython,tuc-osg/micropython,galenhz/micropython,mgyenik/micropython,oopy/micropython,ahotam/micropython,toolmacher/micropython,stonegithubs/micropython,lbattraw/micropython,emfcamp/micropython,drrk/micropython,heisewangluo/micropython,ganshun666/micropython,dmazzella/micropython,PappaPeppar/micropython,jmarcelino/pycom-micropython,tdautc19841202/micropython,dhylands/micropython,pramasoul/micropython,trezor/micropython,alex-robbins/micropython,mhoffma/micropython,stonegithubs/micropython,Peetz0r/micropython-esp32,misterdanb/micropython,cloudformdesign/micropython,ganshun666/micropython,suda/micropython,PappaPeppar/micropython,pramasoul/micropython,tobbad/micropython,noahwilliamsson/micropython,tdautc19841202/micropython,SHA2017-badge/micropython-esp32,kostyll/micropython,ericsnowcurrently/micropython,kostyll/micropython,supergis/micropython,Timmenem/micropython,pozetroninc/micropython,adafruit/circuitpython,hiway/micropython,micropython/micropython-esp32,mianos/micropython,slzatz/micropython,selste/micropython,tobbad/micropython,kostyll/micropython,warner83/micropython,alex-march/micropython,adafruit/micropython,dxxb/micropython,tobbad/micropython,vriera/micropython,omtinez/micropython,hiway/micropython,dhylands/micropython,ceramos/micropython,drrk/micropython,vriera/micropython,vitiral/micropython,lowRISC/micropython,methoxid/micropystat,utopiaprince/micropython,orionrobots/micropython,lbattraw/micropython,mpalomer/micropython,kostyll/micropython,cnoviello/micropython,chrisdearman/micropython,pozetroninc/micropython,martinribelotta/micropython,lbattraw/micropython,noahwilliamsson/micropython,MrSurly/micropython-esp32,Peetz0r/micropython-esp32,MrSurly/micropython,xyb/micropython,neilh10/micropython,hiway/micropython,PappaPeppar/micropython,EcmaXp/micropython,cnoviello/micropython,martinribelotta/micropython,jimkmc/micropython,suda/micropython,cwyark/micropython,dxxb/micropython,martinribelotta/micropython,heisewangluo/micropython,orionrobots/micropython,paul-xxx/micropython,suda/micropython,SungEun-Steve-Kim/test-mp,jmarcelino/pycom-micropython,skybird6672/micropython,oopy/micropython,bvernoux/micropython,lowRISC/micropython,tdautc19841202/micropython,deshipu/micropython,galenhz/micropython,adafruit/circuitpython,alex-robbins/micropython,ericsnowcurrently/micropython,turbinenreiter/micropython,alex-march/micropython,SungEun-Steve-Kim/test-mp,matthewelse/micropython,matthewelse/micropython,Vogtinator/micropython,mgyenik/micropython,ceramos/micropython,paul-xxx/micropython,oopy/micropython,methoxid/micropystat,feilongfl/micropython,warner83/micropython,utopiaprince/micropython,cnoviello/micropython,chrisdearman/micropython,deshipu/micropython,adamkh/micropython,methoxid/micropystat,pfalcon/micropython,ceramos/micropython,Timmenem/micropython,HenrikSolver/micropython,xyb/micropython,skybird6672/micropython,mpalomer/micropython,vitiral/micropython,warner83/micropython,xuxiaoxin/micropython,emfcamp/micropython,henriknelson/micropython,swegener/micropython,mpalomer/micropython,noahwilliamsson/micropython,ruffy91/micropython,KISSMonX/micropython,aethaniel/micropython,ericsnowcurrently/micropython,Vogtinator/micropython,galenhz/micropython,methoxid/micropystat,KISSMonX/micropython,TDAbboud/micropython,deshipu/micropython,praemdonck/micropython,adamkh/micropython,Timmenem/micropython,AriZuu/micropython,cloudformdesign/micropython,skybird6672/micropython,hiway/micropython,henriknelson/micropython,xyb/micropython,jlillest/micropython,aethaniel/micropython,SHA2017-badge/micropython-esp32,drrk/micropython,dhylands/micropython,SHA2017-badge/micropython-esp32,methoxid/micropystat,kerneltask/micropython,tuc-osg/micropython,TDAbboud/micropython,matthewelse/micropython,bvernoux/micropython,mgyenik/micropython,alex-march/micropython,ceramos/micropython,ChuckM/micropython,trezor/micropython,micropython/micropython-esp32,adafruit/micropython,orionrobots/micropython,kerneltask/micropython,cnoviello/micropython,dmazzella/micropython,jlillest/micropython,Timmenem/micropython,ernesto-g/micropython,xuxiaoxin/micropython,vriera/micropython,MrSurly/micropython-esp32,martinribelotta/micropython,orionrobots/micropython,vitiral/micropython,toolmacher/micropython,Vogtinator/micropython,dhylands/micropython,tdautc19841202/micropython,MrSurly/micropython,adafruit/circuitpython,danicampora/micropython,alex-robbins/micropython,noahchense/micropython,ChuckM/micropython,dinau/micropython,feilongfl/micropython,omtinez/micropython,jlillest/micropython,puuu/micropython,adafruit/micropython,jlillest/micropython,adafruit/micropython,stonegithubs/micropython,hosaka/micropython,mhoffma/micropython,mpalomer/micropython,micropython/micropython-esp32,blmorris/micropython,SungEun-Steve-Kim/test-mp,noahchense/micropython,hosaka/micropython,chrisdearman/micropython,drrk/micropython,MrSurly/micropython,selste/micropython,feilongfl/micropython,selste/micropython,blazewicz/micropython,infinnovation/micropython,hosaka/micropython,swegener/micropython,warner83/micropython,tralamazza/micropython,MrSurly/micropython,MrSurly/micropython-esp32,torwag/micropython,neilh10/micropython,cloudformdesign/micropython,pfalcon/micropython,xuxiaoxin/micropython,mhoffma/micropython,blmorris/micropython,turbinenreiter/micropython,ruffy91/micropython,ceramos/micropython,xhat/micropython,torwag/micropython,PappaPeppar/micropython,supergis/micropython,puuu/micropython,omtinez/micropython,xuxiaoxin/micropython,redbear/micropython,turbinenreiter/micropython,mhoffma/micropython,adamkh/micropython,dmazzella/micropython,chrisdearman/micropython,toolmacher/micropython,paul-xxx/micropython,slzatz/micropython,torwag/micropython,noahchense/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,suda/micropython,hosaka/micropython,blazewicz/micropython,adafruit/micropython,mianos/micropython,noahchense/micropython,xyb/micropython,ahotam/micropython,deshipu/micropython,ernesto-g/micropython,turbinenreiter/micropython,ryannathans/micropython,omtinez/micropython,dmazzella/micropython,lowRISC/micropython,vitiral/micropython,feilongfl/micropython,matthewelse/micropython,chrisdearman/micropython,noahchense/micropython,xhat/micropython,blmorris/micropython,tralamazza/micropython,bvernoux/micropython,puuu/micropython,ryannathans/micropython,alex-march/micropython,vriera/micropython,skybird6672/micropython,firstval/micropython,deshipu/micropython,paul-xxx/micropython,henriknelson/micropython,danicampora/micropython,supergis/micropython,lbattraw/micropython,utopiaprince/micropython,tuc-osg/micropython,AriZuu/micropython,Vogtinator/micropython,mianos/micropython,danicampora/micropython,feilongfl/micropython,matthewelse/micropython,dinau/micropython,warner83/micropython,lowRISC/micropython,tobbad/micropython,HenrikSolver/micropython,adamkh/micropython,MrSurly/micropython-esp32,praemdonck/micropython,oopy/micropython,jimkmc/micropython,adafruit/circuitpython,dinau/micropython,Timmenem/micropython,skybird6672/micropython,toolmacher/micropython,blazewicz/micropython,adamkh/micropython,redbear/micropython,rubencabrera/micropython,jmarcelino/pycom-micropython,rubencabrera/micropython,infinnovation/micropython,tralamazza/micropython,heisewangluo/micropython,KISSMonX/micropython,adafruit/circuitpython,pfalcon/micropython,ganshun666/micropython,slzatz/micropython,trezor/micropython,jmarcelino/pycom-micropython,HenrikSolver/micropython,ahotam/micropython,turbinenreiter/micropython,pozetroninc/micropython,ruffy91/micropython,heisewangluo/micropython,mianos/micropython,supergis/micropython,trezor/micropython,redbear/micropython,cwyark/micropython,hosaka/micropython,danicampora/micropython,aethaniel/micropython,cloudformdesign/micropython,orionrobots/micropython,firstval/micropython,utopiaprince/micropython,trezor/micropython,slzatz/micropython,vitiral/micropython,xuxiaoxin/micropython,emfcamp/micropython,firstval/micropython,selste/micropython,cwyark/micropython,pfalcon/micropython,alex-robbins/micropython,aethaniel/micropython,HenrikSolver/micropython,puuu/micropython,SungEun-Steve-Kim/test-mp,selste/micropython,SHA2017-badge/micropython-esp32,ryannathans/micropython,AriZuu/micropython,mianos/micropython,mgyenik/micropython,lowRISC/micropython,jimkmc/micropython,blazewicz/micropython,ChuckM/micropython,kostyll/micropython,Peetz0r/micropython-esp32,dinau/micropython,misterdanb/micropython,SungEun-Steve-Kim/test-mp,TDAbboud/micropython,alex-march/micropython,firstval/micropython,mgyenik/micropython,swegener/micropython,drrk/micropython,vriera/micropython,bvernoux/micropython,cnoviello/micropython,MrSurly/micropython,blazewicz/micropython,paul-xxx/micropython,utopiaprince/micropython,dxxb/micropython,praemdonck/micropython,ahotam/micropython,bvernoux/micropython,misterdanb/micropython,KISSMonX/micropython,Vogtinator/micropython,neilh10/micropython,henriknelson/micropython,cwyark/micropython,mhoffma/micropython,KISSMonX/micropython,rubencabrera/micropython,redbear/micropython,danicampora/micropython,jlillest/micropython,jimkmc/micropython,HenrikSolver/micropython,stonegithubs/micropython,EcmaXp/micropython,rubencabrera/micropython,tuc-osg/micropython,stonegithubs/micropython,AriZuu/micropython,infinnovation/micropython
|
tests: Test for explicit start/end args to str methods for unicode.
|
# str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
<commit_before><commit_msg>tests: Test for explicit start/end args to str methods for unicode.<commit_after>
|
# str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
tests: Test for explicit start/end args to str methods for unicode.# str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
<commit_before><commit_msg>tests: Test for explicit start/end args to str methods for unicode.<commit_after># str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
|
|
f6f75172b1b8a41fc5ae025416ea665258d4ff4c
|
favicon-update.py
|
favicon-update.py
|
from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)])
|
Add script for updating favicon from gh avatar
|
Add script for updating favicon from gh avatar
|
Python
|
mit
|
Sorashi/sorashi.github.io
|
Add script for updating favicon from gh avatar
|
from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)])
|
<commit_before><commit_msg>Add script for updating favicon from gh avatar<commit_after>
|
from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)])
|
Add script for updating favicon from gh avatarfrom PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)])
|
<commit_before><commit_msg>Add script for updating favicon from gh avatar<commit_after>from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)])
|
|
70d912bfb1ccec03edfe92b9b2c87610346c8f42
|
corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py
|
corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
|
Add blocking migration for new domain db
|
Add blocking migration for new domain db
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add blocking migration for new domain db
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
|
<commit_before><commit_msg>Add blocking migration for new domain db<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
|
Add blocking migration for new domain db# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
|
<commit_before><commit_msg>Add blocking migration for new domain db<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
|
|
d0e5ea752912b10e473b2a05da9196800eb6ca86
|
examples/redis_lock.py
|
examples/redis_lock.py
|
import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
|
Add an example for the RedisLock
|
Add an example for the RedisLock
|
Python
|
bsd-3-clause
|
dieseldev/diesel
|
Add an example for the RedisLock
|
import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
|
<commit_before><commit_msg>Add an example for the RedisLock<commit_after>
|
import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
|
Add an example for the RedisLockimport random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
|
<commit_before><commit_msg>Add an example for the RedisLock<commit_after>import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
|
|
d0b8c68ae3c8acbc3d5dfe13842e3c41a198b978
|
fix_notions_db.py
|
fix_notions_db.py
|
from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
|
Add script to fix all notions
|
Add script to fix all notions
|
Python
|
mit
|
l-vincent-l/alignements_backend
|
Add script to fix all notions
|
from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
|
<commit_before><commit_msg>Add script to fix all notions<commit_after>
|
from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
|
Add script to fix all notionsfrom alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
|
<commit_before><commit_msg>Add script to fix all notions<commit_after>from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
|
|
550469032843eb2af3b4a9faaed34d9754f00700
|
geotrek/common/management/commands/test_managers_emails.py
|
geotrek/common/management/commands/test_managers_emails.py
|
from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
|
Add command to test managers emails
|
Add command to test managers emails
|
Python
|
bsd-2-clause
|
mabhub/Geotrek,camillemonchicourt/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,makinacorpus/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,Anaethelion/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,johan--/Geotrek,makinacorpus/Geotrek,camillemonchicourt/Geotrek,GeotrekCE/Geotrek-admin,mabhub/Geotrek
|
Add command to test managers emails
|
from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
|
<commit_before><commit_msg>Add command to test managers emails<commit_after>
|
from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
|
Add command to test managers emailsfrom django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
|
<commit_before><commit_msg>Add command to test managers emails<commit_after>from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
|
|
edb9500824faffd9f1d0d1b59ca29966e3b18282
|
modules/formatter_record.py
|
modules/formatter_record.py
|
from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
|
Customize behave formatter to output json
|
Customize behave formatter to output json
|
Python
|
mit
|
avidas/reliability-demo
|
Customize behave formatter to output json
|
from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
|
<commit_before><commit_msg>Customize behave formatter to output json<commit_after>
|
from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
|
Customize behave formatter to output jsonfrom behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
|
<commit_before><commit_msg>Customize behave formatter to output json<commit_after>from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
|
|
1f48fee7ffcef3eefa6aaedb5ca963c10bb7c58c
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py
|
from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
|
Add test case for user creation form
|
Add test case for user creation form
|
Python
|
bsd-3-clause
|
wldcordeiro/cookiecutter-django-essentials,wldcordeiro/cookiecutter-django-essentials,wldcordeiro/cookiecutter-django-essentials
|
Add test case for user creation form
|
from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
|
<commit_before><commit_msg>Add test case for user creation form<commit_after>
|
from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
|
Add test case for user creation formfrom django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
|
<commit_before><commit_msg>Add test case for user creation form<commit_after>from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
|
|
4d85702561c000824083544de98693e244c8aab7
|
tests/test_decoding_stack.py
|
tests/test_decoding_stack.py
|
#! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
Add test for decoder stack
|
Add test for decoder stack
|
Python
|
agpl-3.0
|
Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide
|
Add test for decoder stack
|
#! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
<commit_before><commit_msg>Add test for decoder stack<commit_after>
|
#! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
Add test for decoder stack#! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
<commit_before><commit_msg>Add test for decoder stack<commit_after>#! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
|
|
84153b0be78998ab8ec6914df8623c99255457b5
|
locust/test/mock_locustfile.py
|
locust/test/mock_locustfile.py
|
import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
|
Improve code for creating temporary locustfiles that can be used in tests
|
Improve code for creating temporary locustfiles that can be used in tests
|
Python
|
mit
|
mbeacom/locust,locustio/locust,locustio/locust,mbeacom/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust
|
Improve code for creating temporary locustfiles that can be used in tests
|
import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
|
<commit_before><commit_msg>Improve code for creating temporary locustfiles that can be used in tests<commit_after>
|
import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
|
Improve code for creating temporary locustfiles that can be used in testsimport os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
|
<commit_before><commit_msg>Improve code for creating temporary locustfiles that can be used in tests<commit_after>import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
|
|
595c8fad76696240f96e61d9a2299de3d6cda16a
|
skcode/utility/walketree.py
|
skcode/utility/walketree.py
|
"""
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
|
Add utility for walking etree and yielding nodes if options class type match.
|
Add utility for walking etree and yielding nodes if options class type match.
|
Python
|
agpl-3.0
|
TamiaLab/PySkCode
|
Add utility for walking etree and yielding nodes if options class type match.
|
"""
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
|
<commit_before><commit_msg>Add utility for walking etree and yielding nodes if options class type match.<commit_after>
|
"""
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
|
Add utility for walking etree and yielding nodes if options class type match."""
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
|
<commit_before><commit_msg>Add utility for walking etree and yielding nodes if options class type match.<commit_after>"""
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
|
|
427a95f0c56facc138448cde7e7b9da1bcdc8ea4
|
add_example.py
|
add_example.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
|
Add super basic Hypothesis example
|
Add super basic Hypothesis example
|
Python
|
mit
|
dkua/pyconca16-talk
|
Add super basic Hypothesis example
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
|
<commit_before><commit_msg>Add super basic Hypothesis example<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
|
Add super basic Hypothesis example#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
|
<commit_before><commit_msg>Add super basic Hypothesis example<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Unit Tests
def test_add_zero():
assert 0 + 1 == 1 + 0
def test_add_single_digits():
assert 1 + 2 == 2 + 1
def test_add_double_digits():
assert 10 + 12 == 12 + 10
# Property-based Test
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_add(x, y):
assert x + y == y + x
|
|
21a504dce25a1b22bda27cd74a443af98b24ad14
|
filters/extract_urls.py
|
filters/extract_urls.py
|
import io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
|
Add pseudo filter combining pypandoc and panflute
|
Add pseudo filter combining pypandoc and panflute
This is a prototype (filename is hardcoded) but should be easy to extend
|
Python
|
bsd-3-clause
|
sergiocorreia/panflute-filters
|
Add pseudo filter combining pypandoc and panflute
This is a prototype (filename is hardcoded) but should be easy to extend
|
import io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
|
<commit_before><commit_msg>Add pseudo filter combining pypandoc and panflute
This is a prototype (filename is hardcoded) but should be easy to extend<commit_after>
|
import io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
|
Add pseudo filter combining pypandoc and panflute
This is a prototype (filename is hardcoded) but should be easy to extendimport io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
|
<commit_before><commit_msg>Add pseudo filter combining pypandoc and panflute
This is a prototype (filename is hardcoded) but should be easy to extend<commit_after>import io
import pypandoc
import panflute
def prepare(doc):
doc.images = []
doc.links = []
def action(elem, doc):
if isinstance(elem, panflute.Image):
doc.images.append(elem)
elif isinstance(elem, panflute.Link):
doc.links.append(elem)
if __name__ == '__main__':
data = pypandoc.convert_file('example.md', 'json')
f = io.StringIO(data)
doc = panflute.load(f)
doc = panflute.run_filter(action, prepare=prepare, doc=doc)
print("\nImages:")
for image in doc.images:
print(image.url)
print("\nLinks:")
for link in doc.links:
print(link.url)
|
|
04287120372a6fdb906ed9f27ead4c5f91d5690e
|
tota/heroes/lenovo.py
|
tota/heroes/lenovo.py
|
from tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
|
Add a modified version of simple bot
|
Add a modified version of simple bot
|
Python
|
mit
|
fisadev/tota
|
Add a modified version of simple bot
|
from tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
|
<commit_before><commit_msg>Add a modified version of simple bot<commit_after>
|
from tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
|
Add a modified version of simple botfrom tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
|
<commit_before><commit_msg>Add a modified version of simple bot<commit_after>from tota.utils import closest, distance, sort_by_distance, possible_moves
from tota import settings
__author__ = "angvp"
def create():
def lenovo_hero_logic(self, things, t):
# some useful data about the enemies I can see in the map
enemy_team = settings.ENEMY_TEAMS[self.team]
enemies = [thing for thing in things.values()
if thing.team == enemy_team]
closest_enemy = closest(self, enemies)
closest_enemy_distance = distance(self, closest_enemy)
real_life = (self.life / self.max_life) * 100
# now lets decide what to do
if int(real_life) < 85 and self.can('heal', t):
# if I'm hurt and can heal, heal
if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t):
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t):
return 'stun', closest_enemy.position
elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t):
return 'attack', closest_enemy.position
else:
return 'heal', self.position
else:
# else, try to attack
if closest_enemy:
# there is an enemy
if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t):
# try to stun him
return 'stun', closest_enemy.position
if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS:
# else try to fireball him, but only if I'm not in range
return 'fireball', closest_enemy.position
elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE:
# else try to attack him
return 'attack', closest_enemy.position
else:
# of finally just move to him (if I have life > 50)
moves = sort_by_distance(closest_enemy,
possible_moves(self, things))
if len(moves) > 0:
back_moves = moves[len(moves)-1]
else:
back_moves = self.position
if moves and int(real_life) > 50:
return 'move', moves[0]
else:
return 'move', back_moves
# can't do the things I want. Do nothing.
return None
return lenovo_hero_logic
|
|
6b4733c213046c7a16bf255cfbc92408e2f01423
|
tests/models/test_authenticated_registry_model.py
|
tests/models/test_authenticated_registry_model.py
|
import pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
|
Add test for registry model hash
|
Add test for registry model hash
|
Python
|
isc
|
RickyCook/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,RickyCook/DockCI
|
Add test for registry model hash
|
import pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
|
<commit_before><commit_msg>Add test for registry model hash<commit_after>
|
import pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
|
Add test for registry model hashimport pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
|
<commit_before><commit_msg>Add test for registry model hash<commit_after>import pytest
from dockci.models.auth import AuthenticatedRegistry
BASE_AUTHENTICATED_REGISTRY = dict(
id=1,
display_name='Display name',
base_name='Base name',
username='Username',
password='Password',
email='Email',
insecure=False,
)
class TestHash(object):
""" Test ``AuthenticatedRegistry.__hash__`` """
def test_hash_eq(self):
""" Test when hash should be equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
assert hash(left) == hash(right)
@pytest.mark.parametrize('attr_name,attr_value', [
('id', 7),
('display_name', 'different'),
('base_name', 'different'),
('username', 'different'),
('password', 'different'),
('email', 'different'),
('insecure', True),
])
def test_hash_ne(self, attr_name, attr_value):
""" Test when hash should be not equal """
left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)
setattr(right, attr_name, attr_value)
assert hash(left) != hash(right)
|
|
d3d6a6018d55581bf081c93386f6676c8bb105ce
|
simulate.py
|
simulate.py
|
import genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
|
Add module for running the main simulation
|
Add module for running the main simulation
|
Python
|
mit
|
JoshuaBrockschmidt/ideal_ANN
|
Add module for running the main simulation
|
import genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
|
<commit_before><commit_msg>Add module for running the main simulation<commit_after>
|
import genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
|
Add module for running the main simulationimport genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
|
<commit_before><commit_msg>Add module for running the main simulation<commit_after>import genetic
import sys
output = sys.stdout
def setOutput(out):
output = out
genetic.setOutput(output)
# Test data for a XOR gate
testData = (
(0.1, 0.1, 0.9),
(0.1, 0.9, 0.9),
(0.9, 0.1, 0.9),
(0.9, 0.9, 0.1)
)
def simulate():
sim = genetic.Simulation(2, 1, testData, 100)
sim.simulate(100)
|
|
2cd1e7fcdf53c312c3db8e6f1d257084a87cccbb
|
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
|
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
|
Add migration to update action implementation hashes.
|
Add migration to update action implementation hashes.
|
Python
|
mpl-2.0
|
mozilla/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy
|
Add migration to update action implementation hashes.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
|
<commit_before><commit_msg>Add migration to update action implementation hashes.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
|
Add migration to update action implementation hashes.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
|
<commit_before><commit_msg>Add migration to update action implementation hashes.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from base64 import b64encode, urlsafe_b64encode
from django.db import migrations
def make_hashes_urlsafe_sri(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
digest = hashlib.sha384(data).digest()
data_hash = urlsafe_b64encode(digest)
action.implementation_hash = 'sha384-' + data_hash.decode()
action.save()
def make_hashes_sha1(apps, schema_editor):
Action = apps.get_model('recipes', 'Action')
for action in Action.objects.all():
data = action.implementation.encode()
data_hash = hashlib.sha1(data).hexdigest()
action.implementation_hash = data_hash
action.save()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0044_auto_20170801_0010'),
]
operations = [
migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1),
]
|
|
8fa7120606e206d08acbad198e253ea428eef584
|
tests/compiler/test_inline_list_compilation.py
|
tests/compiler/test_inline_list_compilation.py
|
import pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
|
Add tests for inline list compilation
|
Add tests for inline list compilation
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add tests for inline list compilation
|
import pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
|
<commit_before><commit_msg>Add tests for inline list compilation<commit_after>
|
import pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
|
Add tests for inline list compilationimport pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
|
<commit_before><commit_msg>Add tests for inline list compilation<commit_after>import pytest
from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START
from thinglang.compiler.errors import NoMatchingOverload, InvalidReference
from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic
def test_inline_list_compilation():
assert compile_snippet('list<number> numbers = [1, 2, 3]') == [
OpcodePushStatic(STATIC_START), # Push the values
OpcodePushStatic(STATIC_START + 1),
OpcodePushStatic(STATIC_START + 2),
internal_call('list.__constructor__'), # Create the list
internal_call('list.append'), # Compile 3 append calls
internal_call('list.append'),
internal_call('list.append'),
OpcodePopLocal(LOCAL_START)
]
def test_inline_list_type_homogeneity():
with pytest.raises(NoMatchingOverload):
assert compile_snippet('list<number> numbers = [1, Container(), 3]')
def test_inline_list_declaration_type_match():
with pytest.raises(InvalidReference):
assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
|
|
3ddf0f0fead6018b5c313253a0df2165452cfb6e
|
src/eduid_common/api/translation.py
|
src/eduid_common/api/translation.py
|
# -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
|
Add shared babel init code
|
Add shared babel init code
|
Python
|
bsd-3-clause
|
SUNET/eduid-common
|
Add shared babel init code
|
# -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
|
<commit_before><commit_msg>Add shared babel init code<commit_after>
|
# -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
|
Add shared babel init code# -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
|
<commit_before><commit_msg>Add shared babel init code<commit_after># -*- coding: utf-8 -*-
from flask import request
from flask_babel import Babel
__author__ = 'lundberg'
def init_babel(app):
babel = Babel(app)
app.babel = babel
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
# XXX: TODO
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES'))
return app
|
|
30bca45e1ac9fc6953728950695135b491403215
|
tests/basics/logic_constfolding.py
|
tests/basics/logic_constfolding.py
|
# tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
|
Add test for logical constant folding.
|
tests/basics: Add test for logical constant folding.
|
Python
|
mit
|
mhoffma/micropython,blazewicz/micropython,tobbad/micropython,oopy/micropython,pozetroninc/micropython,swegener/micropython,tralamazza/micropython,tobbad/micropython,mhoffma/micropython,kerneltask/micropython,MrSurly/micropython,Peetz0r/micropython-esp32,tobbad/micropython,dmazzella/micropython,HenrikSolver/micropython,pramasoul/micropython,micropython/micropython-esp32,bvernoux/micropython,TDAbboud/micropython,MrSurly/micropython-esp32,tuc-osg/micropython,trezor/micropython,trezor/micropython,micropython/micropython-esp32,pozetroninc/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,adafruit/circuitpython,MrSurly/micropython-esp32,MrSurly/micropython-esp32,toolmacher/micropython,chrisdearman/micropython,selste/micropython,pozetroninc/micropython,chrisdearman/micropython,deshipu/micropython,HenrikSolver/micropython,lowRISC/micropython,micropython/micropython-esp32,deshipu/micropython,HenrikSolver/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,hiway/micropython,swegener/micropython,adafruit/circuitpython,trezor/micropython,pramasoul/micropython,trezor/micropython,cwyark/micropython,lowRISC/micropython,adafruit/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,AriZuu/micropython,MrSurly/micropython-esp32,torwag/micropython,tuc-osg/micropython,hiway/micropython,infinnovation/micropython,toolmacher/micropython,adafruit/circuitpython,ryannathans/micropython,deshipu/micropython,selste/micropython,selste/micropython,micropython/micropython-esp32,alex-robbins/micropython,blazewicz/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,HenrikSolver/micropython,mhoffma/micropython,infinnovation/micropython,toolmacher/micropython,dmazzella/micropython,cwyark/micropython,adafruit/micropython,ryannathans/micropython,SHA2017-badge/micropython-esp32,pozetroninc/micropython,pramasoul/micropython,adafruit/circuitpython,blazewicz/micropython,torwag/micropython,PappaPeppar/micropython,cwyark/micropython,henriknelson/micropython,swegener/micropython,bvernoux/micropython,TDAbboud/micropython,deshipu/micropython,tralamazza/micropython,adafruit/micropython,AriZuu/micropython,pramasoul/micropython,AriZuu/micropython,MrSurly/micropython,oopy/micropython,torwag/micropython,chrisdearman/micropython,kerneltask/micropython,tralamazza/micropython,dmazzella/micropython,PappaPeppar/micropython,TDAbboud/micropython,PappaPeppar/micropython,cwyark/micropython,micropython/micropython-esp32,lowRISC/micropython,ryannathans/micropython,swegener/micropython,dmazzella/micropython,blazewicz/micropython,henriknelson/micropython,tobbad/micropython,MrSurly/micropython,Timmenem/micropython,tralamazza/micropython,toolmacher/micropython,tuc-osg/micropython,tuc-osg/micropython,adafruit/micropython,alex-robbins/micropython,puuu/micropython,Peetz0r/micropython-esp32,mhoffma/micropython,pfalcon/micropython,pfalcon/micropython,ryannathans/micropython,oopy/micropython,hiway/micropython,oopy/micropython,mhoffma/micropython,pramasoul/micropython,selste/micropython,torwag/micropython,Timmenem/micropython,alex-robbins/micropython,Peetz0r/micropython-esp32,HenrikSolver/micropython,PappaPeppar/micropython,infinnovation/micropython,infinnovation/micropython,MrSurly/micropython,lowRISC/micropython,ryannathans/micropython,kerneltask/micropython,puuu/micropython,pozetroninc/micropython,lowRISC/micropython,kerneltask/micropython,chrisdearman/micropython,alex-robbins/micropython,MrSurly/micropython-esp32,hiway/micropython,Timmenem/micropython,bvernoux/micropython,oopy/micropython,alex-robbins/micropython,swegener/micropython,blazewicz/micropython,AriZuu/micropython,TDAbboud/micropython,puuu/micropython,bvernoux/micropython,puuu/micropython,hiway/micropython,infinnovation/micropython,adafruit/circuitpython,bvernoux/micropython,Timmenem/micropython,puuu/micropython,chrisdearman/micropython,tuc-osg/micropython,AriZuu/micropython,tobbad/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,toolmacher/micropython,trezor/micropython,adafruit/circuitpython,pfalcon/micropython,TDAbboud/micropython,Timmenem/micropython,torwag/micropython,cwyark/micropython,pfalcon/micropython,selste/micropython,kerneltask/micropython,pfalcon/micropython
|
tests/basics: Add test for logical constant folding.
|
# tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
|
<commit_before><commit_msg>tests/basics: Add test for logical constant folding.<commit_after>
|
# tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
|
tests/basics: Add test for logical constant folding.# tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
|
<commit_before><commit_msg>tests/basics: Add test for logical constant folding.<commit_after># tests logical constant folding in parser
def f_true():
print('f_true')
return True
def f_false():
print('f_false')
return False
print(0 or False)
print(1 or foo)
print(f_false() or 1 or foo)
print(f_false() or 1 or f_true())
print(0 and foo)
print(1 and True)
print(f_true() and 0 and foo)
print(f_true() and 1 and f_false())
print(not 0)
print(not False)
print(not 1)
print(not True)
print(not not 0)
print(not not 1)
|
|
be17cf90b06a118d579c0211dd3bc2d45433fb2d
|
tests/test_handle_long_response.py
|
tests/test_handle_long_response.py
|
import context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
|
Write unit tests for _handle_long_response
|
Write unit tests for _handle_long_response
|
Python
|
mit
|
venmo/slouch
|
Write unit tests for _handle_long_response
|
import context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
|
<commit_before><commit_msg>Write unit tests for _handle_long_response<commit_after>
|
import context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
|
Write unit tests for _handle_long_responseimport context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
|
<commit_before><commit_msg>Write unit tests for _handle_long_response<commit_after>import context
class TestHandleLongResponse(context.slouch.testing.CommandTestCase):
bot_class = context.TimerBot
config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'}
normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n"
over_limit_text = normal_text * 50 # 8550 chars
def test_handle_long_message_api(self):
_res = {
'type': 'message',
'text': self.normal_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual(len(responses), 1)
self.assertEqual(responses, [{
'type': 'message',
'text': self.normal_text,
'channel': None
}])
def test_handle_long_message_over_limit_api(self):
_res = {
'type': 'message',
'text': self.over_limit_text,
'channel': None,
}
responses = self.bot._handle_long_response(_res)
self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
def test_handle_long_message_rtm(self):
responses = self.bot._handle_long_response(self.normal_text)
self.assertEqual(responses, [self.normal_text])
self.assertEqual(len(responses), 1)
def test_handle_long_message_over_limit_rtm(self):
responses = self.bot._handle_long_response(self.over_limit_text)
self.assertEqual([len(r) for r in responses], [3932, 3933, 685])
self.assertEqual(len(responses), 3)
|
|
063899021158fe872745b335595b3094db9834d8
|
pycket/test/test_version.py
|
pycket/test/test_version.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
|
Add a test for 'version.
|
Add a test for 'version.
! needs updating with every nightly, currentl
|
Python
|
mit
|
samth/pycket,krono/pycket,vishesh/pycket,magnusmorton/pycket,vishesh/pycket,vishesh/pycket,magnusmorton/pycket,cderici/pycket,pycket/pycket,pycket/pycket,cderici/pycket,krono/pycket,magnusmorton/pycket,krono/pycket,pycket/pycket,cderici/pycket,samth/pycket,samth/pycket
|
Add a test for 'version.
! needs updating with every nightly, currentl
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
|
<commit_before><commit_msg>Add a test for 'version.
! needs updating with every nightly, currentl<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
|
Add a test for 'version.
! needs updating with every nightly, currentl#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
|
<commit_before><commit_msg>Add a test for 'version.
! needs updating with every nightly, currentl<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Test the version here.
#
import pytest
from pycket.test.testhelper import check_equal
EXPECTED_VERSION='6.1.1.8'
def test_version():
check_equal('(version)', '"%s"' % EXPECTED_VERSION)
# EOF
|
|
fe37335645993ad10c9902aaaaf0ca2c53912d49
|
movies_avg_etl.py
|
movies_avg_etl.py
|
import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
Create Average Movies rating etl
|
Feat: Create Average Movies rating etl
Extracts data from 2 tables in the database, transforms the data and writes the result into another table in the same database
|
Python
|
mit
|
searchs/bigdatabox,searchs/bigdatabox
|
Feat: Create Average Movies rating etl
Extracts data from 2 tables in the database, transforms the data and writes the result into another table in the same database
|
import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
<commit_before><commit_msg>Feat: Create Average Movies rating etl
Extracts data from 2 tables in the database, transforms the data and writes the result into another table in the same database<commit_after>
|
import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
Feat: Create Average Movies rating etl
Extracts data from 2 tables in the database, transforms the data and writes the result into another table in the same databaseimport pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
<commit_before><commit_msg>Feat: Create Average Movies rating etl
Extracts data from 2 tables in the database, transforms the data and writes the result into another table in the same database<commit_after>import pyspark
spark = (
pyspark.sql.SparkSession.builder.appName("FromDatabase")
.config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar")
.getOrCreate()
)
# Read table from db using Spark JDBC
def extract_movies_to_df():
movies_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "movies")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return movies_df
# Read users table from db using Spark JDBC
def extract_users_to_df():
users_df = (
spark.read.format("jdbc")
.option("url", "jdbc:postgresql://localhost:5432/etl_pipeline")
.option("dbtable", "users")
.option("user", "<username")
.option("password", "<password>")
.option("driver", "org.postgresql.Driver")
.load()
)
return users_df
# transforming tables
def transform_avg_ratings(movies_df, users_df):
avg_rating = users_df.groupby("movie_id").mean("rating")
# join movies_df and avg_rating table on id
df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id)
df = df.drop("movie_id")
return df
# Write the result into avg_ratings table in db
def load_df_to_db(df):
mode = "overwrite"
url = "jdbc:postgresql://localhost:5432/etl_pipeline"
spark.write()
properties = {
"user": "<username>",
"password": "<password>",
"driver": "org.postgresql.Driver",
}
df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties)
if __name__ == "__main__":
movies_df = extract_movies_to_df()
users_df = extract_users_to_df()
ratings_df = transform_avg_ratings(movies_df, users_df)
load_df_to_db(ratings_df)
|
|
7d198f3eaca6a91b731b3e25c0285cd46e72935a
|
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
|
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
|
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
|
Remove duplicates in authorized origins table
|
common/migrations: Remove duplicates in authorized origins table
|
Python
|
agpl-3.0
|
SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui
|
common/migrations: Remove duplicates in authorized origins table
|
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
|
<commit_before><commit_msg>common/migrations: Remove duplicates in authorized origins table<commit_after>
|
# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
|
common/migrations: Remove duplicates in authorized origins table# Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
|
<commit_before><commit_msg>common/migrations: Remove duplicates in authorized origins table<commit_after># Copyright (C) 2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from __future__ import unicode_literals
from django.db import migrations
from swh.web.common.models import SaveAuthorizedOrigin
def _remove_duplicated_urls_in_authorized_list(apps, schema_editor):
sao = SaveAuthorizedOrigin.objects
for url in sao.values_list('url', flat=True).distinct():
sao.filter(pk__in=sao.filter(
url=url).values_list('id', flat=True)[1:]).delete()
class Migration(migrations.Migration):
dependencies = [
('swh.web.common', '0004_auto_20190204_1324'),
]
operations = [
migrations.RunPython(_remove_duplicated_urls_in_authorized_list)
]
|
|
91541cf82f435cb261d9debc85a2a8ae6dd74ab1
|
xutils/init_logging.py
|
xutils/init_logging.py
|
# encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
Add a function to initialize the logging.
|
Add a function to initialize the logging.
|
Python
|
mit
|
xgfone/xutils,xgfone/pycom
|
Add a function to initialize the logging.
|
# encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
<commit_before><commit_msg>Add a function to initialize the logging.<commit_after>
|
# encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
Add a function to initialize the logging.# encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
<commit_before><commit_msg>Add a function to initialize the logging.<commit_after># encoding: utf-8
from __future__ import print_function, absolute_import, unicode_literals, division
import logging
def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None):
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
logger.setLevel(level)
if log_file:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
|
507e3bad4e877330eea29675dafb8210ab6bada5
|
tests/test_agent.py
|
tests/test_agent.py
|
"""
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
|
Add tests for file agent
|
Add tests for file agent
|
Python
|
mit
|
cwahbong/onirim-py
|
Add tests for file agent
|
"""
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
|
<commit_before><commit_msg>Add tests for file agent<commit_after>
|
"""
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
|
Add tests for file agent"""
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
|
<commit_before><commit_msg>Add tests for file agent<commit_after>"""
Tests for a agent.
"""
import io
import os
import pytest
from onirim import action
from onirim import agent
from onirim import component
def file_agent(in_str):
return agent.File(io.StringIO(in_str), open(os.devnull, "w"))
def content():
return component.Content([])
@pytest.mark.parametrize(
"in_str, expected",
[
("play\n0\n", (action.Phase1.play, 0)),
("discard\n4\n", (action.Phase1.discard, 4)),
]
)
def test_file_phase_1_action(in_str, expected):
"""
Test input parsing of phase_1_action.
"""
assert file_agent(in_str).phase_1_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("key\n2\n", (action.Nightmare.by_key, {"idx": 2})),
("door\n3\n", (action.Nightmare.by_door, {"idx": 3})),
("hand\n", (action.Nightmare.by_hand, {})),
("deck\n", (action.Nightmare.by_deck, {})),
]
)
def test_file_nightmare_action(in_str, expected):
"""
Test input parsing of nightmare action.
"""
assert file_agent(in_str).nightmare_action(content()) == expected
@pytest.mark.parametrize(
"in_str, expected",
[
("yes\n", True),
("no\n", False),
]
)
def test_file_open_door(in_str, expected):
"""
Test input parsing of open door.
"""
assert file_agent(in_str).open_door(content(), None) == expected
#def test_file_key_discard_react(in_str, expected):
#TODO
|
|
c67e1af4f765f143cb1b8420e053c1a9f00edd05
|
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
|
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
|
Add migrations for new statuses.
|
Add migrations for new statuses.
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Add migrations for new statuses.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
|
<commit_before><commit_msg>Add migrations for new statuses.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
|
Add migrations for new statuses.# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
|
<commit_before><commit_msg>Add migrations for new statuses.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-04 17:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0167_auto_20190403_1606'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courseentitlement',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='courserun',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='seat',
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.AlterField(
model_name='courserun',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]),
),
]
|
|
d308874989667f36da1638f22d6b2d7e823b5ebd
|
extract-barcode.py
|
extract-barcode.py
|
"""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
Add script to extract reads or alignments matching a barcode.
|
Add script to extract reads or alignments matching a barcode.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Add script to extract reads or alignments matching a barcode.
|
"""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
<commit_before><commit_msg>Add script to extract reads or alignments matching a barcode.<commit_after>
|
"""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
Add script to extract reads or alignments matching a barcode."""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
<commit_before><commit_msg>Add script to extract reads or alignments matching a barcode.<commit_after>"""
code to extract a single cell from a set of alignments or reads marked via Valentine's umis
repository:
https://github.com/vals/umis
"""
import regex as re
import sys
from argparse import ArgumentParser
from pysam import AlignmentFile
def extract_barcode(sam, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
sam_file = AlignmentFile(sam, mode='r')
filter_file = AlignmentFile("-", mode='wh', template=sam_file)
track = sam_file.fetch(until_eof=True)
for i, aln in enumerate(track):
if aln.is_unmapped:
continue
match = parser_re.match(aln.qname)
CB = match.group('CB')
if CB == barcode:
filter_file.write(aln)
def stream_fastq(file_handler):
''' Generator which gives all four lines if a fastq read as one string
'''
next_element = ''
for i, line in enumerate(file_handler):
next_element += line
if i % 4 == 3:
yield next_element
next_element = ''
def extract_barcode_fastq(fastq, barcode):
parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)')
fastq_file = stream_fastq(open(fastq))
for read in fastq_file:
match = parser_re.match(read)
CB = match.group('CB')
if CB == barcode:
sys.stdout.write(read)
if __name__ == "__main__":
parser = ArgumentParser("extract reads/alignments from a single cell")
parser.add_argument("file", help="A SAM or FASTQ file")
parser.add_argument("barcode", help="barcode of the cell to extract")
args = parser.parse_args()
extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq
extract_fn(args.file, args.barcode)
|
|
048d0d7ce30b66af8bf48bcb0cb7f8bfb90fff0c
|
tests/test_iters.py
|
tests/test_iters.py
|
import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
|
Add tests for Part, Pin, Bus and Net iterators.
|
Add tests for Part, Pin, Bus and Net iterators.
|
Python
|
mit
|
xesscorp/skidl,xesscorp/skidl
|
Add tests for Part, Pin, Bus and Net iterators.
|
import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
|
<commit_before><commit_msg>Add tests for Part, Pin, Bus and Net iterators.<commit_after>
|
import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
|
Add tests for Part, Pin, Bus and Net iterators.import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
|
<commit_before><commit_msg>Add tests for Part, Pin, Bus and Net iterators.<commit_after>import pytest
from skidl import *
from .setup_teardown import *
def test_iters_1():
"""Test bus iterator."""
b_size = 4
b = Bus('chplx', b_size)
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 2 * (b_size-1))
def test_iters_2():
"""Test pin iterator."""
q = Part('device','Q_NPN_CEB')
s = 0
for p1 in q:
for p2 in q:
if p1 != p2:
s += 1
assert(s == len(q) * (len(q)-1))
def test_iters_3():
"""Test net iterator."""
b = Net()
for hi in b:
for lo in b:
if hi != lo:
led = Part('device','LED')
hi += led['A']
lo += led['K']
for l in b:
assert(len(l) == 0)
|
|
3d027df005725cbc5dfbba0262b0c52c5392d7f0
|
app/resources/check_token.py
|
app/resources/check_token.py
|
from flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
|
Add whoami resource which decodes token and returns user info from token
|
[CHORE] Add whoami resource which decodes token and returns user info from token
|
Python
|
mit
|
brayoh/bucket-list-api
|
[CHORE] Add whoami resource which decodes token and returns user info from token
|
from flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
|
<commit_before><commit_msg>[CHORE] Add whoami resource which decodes token and returns user info from token<commit_after>
|
from flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
|
[CHORE] Add whoami resource which decodes token and returns user info from tokenfrom flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
|
<commit_before><commit_msg>[CHORE] Add whoami resource which decodes token and returns user info from token<commit_after>from flask import make_response, jsonify
from flask_restful import Resource, reqparse, marshal, fields
from app.models import User
from app.common.auth.token import JWT
user_fields = {
"id": fields.Integer,
"username": fields.String,
"created_at": fields.DateTime
}
class WhoAmIResource(Resource):
""" This class takes a token from the Authorization header
and then returns the user info for the token if its valid
"""
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument("Authorization",
location="headers",
required=True)
def get(self):
""" get method """
args = self.parser.parse_args()
token = args["Authorization"] # get token from header
try:
user_id = int(JWT.decode_token(token))
user = User.query.get(user_id)
return marshal(user, user_fields), 200
except ValueError:
return make_response(jsonify({
"status": "failed",
"message": "Invalid token, please login again"
}), 401)
|
|
f34dabd23faa7d50e507b829e576c1968bdc2d52
|
src/iterations/exercise3.py
|
src/iterations/exercise3.py
|
# Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( )
|
Print The Message Happy New Year
|
Print The Message Happy New Year
#Print The Message "Happy new Year" followed by the name of a person taken from a list for all people mentioned in the list.
|
Python
|
mit
|
let42/python-course
|
Print The Message Happy New Year
#Print The Message "Happy new Year" followed by the name of a person taken from a list for all people mentioned in the list.
|
# Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( )
|
<commit_before><commit_msg>Print The Message Happy New Year
#Print The Message "Happy new Year" followed by the name of a person taken from a list for all people mentioned in the list.<commit_after>
|
# Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( )
|
Print The Message Happy New Year
#Print The Message "Happy new Year" followed by the name of a person taken from a list for all people mentioned in the list.# Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( )
|
<commit_before><commit_msg>Print The Message Happy New Year
#Print The Message "Happy new Year" followed by the name of a person taken from a list for all people mentioned in the list.<commit_after># Print The Message "Happy new Year" followed by the name of a person
# taken from a list for all people mentioned in the list.
def print_Happy_New_Year_to( listOfPeople ):
for user in listOfPeople:
print 'Happy New Year, ', user
print 'Done!'
def main( ):
listOfPeople=['John', 'Mary', 'Luke']
print_Happy_New_Year_to( listOfPeople )
quit(0)
main( )
|
|
ce28c5642c3ab543fc48e2f4f1f0b2f2a62890a2
|
src/misc/parse_tool_playbook_yaml.py
|
src/misc/parse_tool_playbook_yaml.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name)
|
Add script to extract information for playbook files
|
Add script to extract information for playbook files
|
Python
|
apache-2.0
|
ASaiM/framework,ASaiM/framework
|
Add script to extract information for playbook files
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name)
|
<commit_before><commit_msg>Add script to extract information for playbook files<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name)
|
Add script to extract information for playbook files#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name)
|
<commit_before><commit_msg>Add script to extract information for playbook files<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import re
import yaml
def get_revision_number(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
if tool.has_key("revision"):
print tool["revision"][0]
def get_owner(yaml_content, tool_name):
for tool in yaml_content['tools']:
if tool["name"] == tool_name:
print tool['owner']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True)
parser.add_argument('--tool_name', required=True)
parser.add_argument('--tool_function', required=True)
args = parser.parse_args()
with open(args.file,'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
functions = {
'get_revision_number': get_revision_number,
'get_owner': get_owner
}
functions[args.tool_function](yaml_content, args.tool_name)
|
|
24c763ead7af8a669ff1055b3f352f513274a47f
|
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
|
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
|
# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
|
Insert a note at a specific position in a linked list
|
Insert a note at a specific position in a linked list
|
Python
|
mit
|
arvinsim/hackerrank-solutions
|
Insert a note at a specific position in a linked list
|
# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
|
<commit_before><commit_msg>Insert a note at a specific position in a linked list<commit_after>
|
# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
|
Insert a note at a specific position in a linked list# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
|
<commit_before><commit_msg>Insert a note at a specific position in a linked list<commit_after># https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list
# Python 2
"""
Insert Node at a specific position in a linked list
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
# This is a "method-only" submission.
# You only need to complete this method.
def InsertNth(head, data, position):
if head is None:
return Node(data=data)
else:
current = head
if position == 0:
node_to_insert = Node(data=data, next_node=current)
return node_to_insert
else:
prev = None
for i in xrange(position):
prev = current
current = current.next
new_node = Node(data=data)
prev.next = new_node
new_node.next = current
return head
# def display_linked_list(head):
# s = ''
# while True:
# s += '{}->'.format(head.data)
# if head.next == None:
# break
# else:
# head = head.next
# s += 'NULL'
# print(s)
#
#
# # LL = Node(1)
# c = Node(3)
# b = Node(2, c)
# head = Node(1, b)
#
# head = InsertNth(head, 'x', 1)
#
# display_linked_list(head)
|
|
c9e90ef5413bd560422e915d213df73ad88dffd7
|
tests/integration/test_apigateway.py
|
tests/integration/test_apigateway.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
Add apigateway integration test for PutIntegration
|
Add apigateway integration test for PutIntegration
|
Python
|
apache-2.0
|
boto/botocore,pplu/botocore
|
Add apigateway integration test for PutIntegration
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
<commit_before><commit_msg>Add apigateway integration test for PutIntegration<commit_after>
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
Add apigateway integration test for PutIntegration# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
<commit_before><commit_msg>Add apigateway integration test for PutIntegration<commit_after># Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
|
98c1ff71d57749168f0ca35d97dbe77a8a67e082
|
mltils/xgboost/utils.py
|
mltils/xgboost/utils.py
|
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
|
Add module for utilities related to xgboost
|
Add module for utilities related to xgboost
|
Python
|
mit
|
rladeira/mltils
|
Add module for utilities related to xgboost
|
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
|
<commit_before><commit_msg>Add module for utilities related to xgboost<commit_after>
|
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
|
Add module for utilities related to xgboost
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
|
<commit_before><commit_msg>Add module for utilities related to xgboost<commit_after>
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
|
|
ecc8a93ddda784102311ebfd4c3c93624f356778
|
cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py
|
cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
|
Add migration to add strip_html sql function
|
Add migration to add strip_html sql function
Used for stripping html from modules.name (title)
|
Python
|
agpl-3.0
|
Connexions/cnx-archive,Connexions/cnx-archive
|
Add migration to add strip_html sql function
Used for stripping html from modules.name (title)
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
|
<commit_before><commit_msg>Add migration to add strip_html sql function
Used for stripping html from modules.name (title)<commit_after>
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
|
Add migration to add strip_html sql function
Used for stripping html from modules.name (title)# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
|
<commit_before><commit_msg>Add migration to add strip_html sql function
Used for stripping html from modules.name (title)<commit_after># -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
|
|
62c70b301ffc1e178c3bd54bd81291876b3883ea
|
analysis/03-fill-dropouts-linear.py
|
analysis/03-fill-dropouts-linear.py
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
|
Add simple linear interpolation filling.
|
Add simple linear interpolation filling.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add simple linear interpolation filling.
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add simple linear interpolation filling.<commit_after>
|
#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
|
Add simple linear interpolation filling.#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add simple linear interpolation filling.<commit_after>#!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
|
|
55f2325354724cfe8b90324038daf2c1acaa916a
|
teuthology/openstack/test/test_config.py
|
teuthology/openstack/test/test_config.py
|
from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
Add unit tests for OpenStack config defaults
|
Add unit tests for OpenStack config defaults
Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>
|
Python
|
mit
|
SUSE/teuthology,dmick/teuthology,ceph/teuthology,SUSE/teuthology,dmick/teuthology,robbat2/teuthology,ktdreyer/teuthology,dmick/teuthology,caibo2014/teuthology,robbat2/teuthology,ktdreyer/teuthology,dreamhost/teuthology,SUSE/teuthology,caibo2014/teuthology,ceph/teuthology,dreamhost/teuthology
|
Add unit tests for OpenStack config defaults
Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>
|
from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
<commit_before><commit_msg>Add unit tests for OpenStack config defaults
Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com><commit_after>
|
from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
Add unit tests for OpenStack config defaults
Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
<commit_before><commit_msg>Add unit tests for OpenStack config defaults
Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com><commit_after>from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
|
67f5e754a5f90903e09a6a876d858d002c513f8a
|
abcpy/posteriors.py
|
abcpy/posteriors.py
|
import scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
|
Add initial draft of posterior models
|
Add initial draft of posterior models
|
Python
|
bsd-3-clause
|
lintusj1/elfi,lintusj1/elfi,elfi-dev/elfi,HIIT/elfi,elfi-dev/elfi
|
Add initial draft of posterior models
|
import scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
|
<commit_before><commit_msg>Add initial draft of posterior models<commit_after>
|
import scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
|
Add initial draft of posterior modelsimport scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
|
<commit_before><commit_msg>Add initial draft of posterior models<commit_after>import scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
|
|
b7dd7f75f655f4fbcb34d8f9ec260a6f18e8f617
|
backend/scripts/adminuser.py
|
backend/scripts/adminuser.py
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
|
Add utility to create administrative users.
|
Add utility to create administrative users.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add utility to create administrative users.
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
|
<commit_before><commit_msg>Add utility to create administrative users.<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
|
Add utility to create administrative users.#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
|
<commit_before><commit_msg>Add utility to create administrative users.<commit_after>#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
|
|
0c17398f68597eae175ad6a37945cf37e95e1809
|
nodeconductor/structure/migrations/0050_reset_cloud_spl_quota_limits.py
|
nodeconductor/structure/migrations/0050_reset_cloud_spl_quota_limits.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
|
Reset invalid default quotas for CloudServiceProjectLink
|
Reset invalid default quotas for CloudServiceProjectLink [WAL-814]
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Reset invalid default quotas for CloudServiceProjectLink [WAL-814]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
|
<commit_before><commit_msg>Reset invalid default quotas for CloudServiceProjectLink [WAL-814]<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
|
Reset invalid default quotas for CloudServiceProjectLink [WAL-814]# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
|
<commit_before><commit_msg>Reset invalid default quotas for CloudServiceProjectLink [WAL-814]<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
|
|
15d3692aee84432b6b7f8306505b3f59649fd6f9
|
cnxarchive/sql/migrations/20160128111115_mimetype_removal_from_module_files.py
|
cnxarchive/sql/migrations/20160128111115_mimetype_removal_from_module_files.py
|
# -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
|
Remove mimetype from the module_files table
|
Remove mimetype from the module_files table
|
Python
|
agpl-3.0
|
Connexions/cnx-archive,Connexions/cnx-archive
|
Remove mimetype from the module_files table
|
# -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
|
<commit_before><commit_msg>Remove mimetype from the module_files table<commit_after>
|
# -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
|
Remove mimetype from the module_files table# -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
|
<commit_before><commit_msg>Remove mimetype from the module_files table<commit_after># -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
|
|
abe40e3c82ef1f351275a59b2e537f43530caa0c
|
app/cleanup_stories.py
|
app/cleanup_stories.py
|
from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
|
Clean up db script (remove articles older than two days).
|
Clean up db script (remove articles older than two days).
|
Python
|
mit
|
hw3jung/Gucci,hw3jung/Gucci
|
Clean up db script (remove articles older than two days).
|
from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Clean up db script (remove articles older than two days).<commit_after>
|
from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
|
Clean up db script (remove articles older than two days).from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Clean up db script (remove articles older than two days).<commit_after>from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
|
|
372f4a988411e48a0c50cdc74fb2a7f4e5abf052
|
tests/server-identity.py
|
tests/server-identity.py
|
import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
|
Add a server identity test
|
Add a server identity test
|
Python
|
apache-2.0
|
Kitware/tangelo,Kitware/tangelo,Kitware/tangelo
|
Add a server identity test
|
import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
|
<commit_before><commit_msg>Add a server identity test<commit_after>
|
import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
|
Add a server identity testimport nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
|
<commit_before><commit_msg>Add a server identity test<commit_after>import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
|
|
19db4647257617992e9b195828baf39907cc5db1
|
tests/test_exit_codes.py
|
tests/test_exit_codes.py
|
"""Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
|
Add tests for exit codes
|
Add tests for exit codes
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint
|
Add tests for exit codes
|
"""Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
|
<commit_before><commit_msg>Add tests for exit codes<commit_after>
|
"""Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
|
Add tests for exit codes"""Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
|
<commit_before><commit_msg>Add tests for exit codes<commit_after>"""Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.